problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
18.9k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
465
23.6k
num_tokens_prompt
int64
556
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_19529
rasdani/github-patches
git_diff
Parsl__parsl-127
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Do not raise `NotImplemented` In a few places we raise `NotImplemented`, which itself raises a `SyntaxError`; this should be `NotImplementedError`. </issue> <code> [start of parsl/app/app.py] 1 ''' 2 Parsl Apps 3 ========== 4 5 Here lies the definitions for the @App decorator and the APP classes. 6 The APP class encapsulates a generic leaf task that can be executed asynchronously. 7 8 ''' 9 import logging 10 from inspect import signature, Parameter 11 12 # Logging moved here in the PEP8 conformance fixes. 13 logger = logging.getLogger(__name__) 14 15 16 class AppBase (object): 17 """ 18 This is the base class that defines the two external facing functions that an App must define. 19 The __init__ () which is called when the interpretor sees the definition of the decorated 20 function, and the __call__ () which is invoked when a decorated function is called by the user. 21 22 """ 23 24 def __init__(self, func, executor, walltime=60, sites='all', cache=False, exec_type="bash"): 25 ''' Constructor for the APP object. 26 27 Args: 28 - func (function): Takes the function to be made into an App 29 - executor (executor): Executor for the execution resource 30 31 Kwargs: 32 - walltime (int) : Walltime in seconds for the app execution 33 - sites (str|list) : List of site names that this app could execute over. default is 'all' 34 - exec_type (string) : App type (bash|python) 35 - cache (Bool) : Enable caching of this app ? 36 37 Returns: 38 - APP object. 39 40 ''' 41 self.__name__ = func.__name__ 42 self.func = func 43 self.executor = executor 44 self.exec_type = exec_type 45 self.status = 'created' 46 self.sites = sites 47 self.cache = cache 48 49 sig = signature(func) 50 self.kwargs = {} 51 for s in sig.parameters: 52 if sig.parameters[s].default != Parameter.empty: 53 self.kwargs[s] = sig.parameters[s].default 54 55 self.stdout = sig.parameters['stdout'].default if 'stdout' in sig.parameters else None 56 self.stderr = sig.parameters['stderr'].default if 'stderr' in sig.parameters else None 57 self.inputs = sig.parameters['inputs'].default if 'inputs' in sig.parameters else [] 58 self.outputs = sig.parameters['outputs'].default if 'outputs' in sig.parameters else [] 59 60 def __call__(self, *args, **kwargs): 61 ''' The __call__ function must be implemented in the subclasses 62 ''' 63 raise NotImplemented 64 65 66 def app_wrapper(func): 67 68 def wrapper(*args, **kwargs): 69 logger.debug("App wrapper begins") 70 x = func(*args, **kwargs) 71 logger.debug("App wrapper ends") 72 return x 73 74 return wrapper 75 76 77 def App(apptype, executor, walltime=60, cache=False, sites='all'): 78 ''' The App decorator function 79 80 Args: 81 - apptype (string) : Apptype can be bash|python 82 - executor (Executor) : Executor object wrapping threads/process pools etc. 83 84 Kwargs: 85 - walltime (int) : Walltime for app in seconds, 86 default=60 87 - sites (str|List) : List of site names on which the app could execute 88 default='all' 89 - cache (Bool) : Enable caching of the app call 90 default=False 91 92 Returns: 93 An AppFactory object, which when called runs the apps through the executor. 94 ''' 95 96 from parsl import APP_FACTORY_FACTORY 97 98 def Exec(f): 99 return APP_FACTORY_FACTORY.make(apptype, executor, f, 100 sites=sites, 101 cache=cache, 102 walltime=walltime) 103 104 return Exec 105 [end of parsl/app/app.py] [start of parsl/executors/threads.py] 1 import logging 2 import sys 3 import concurrent.futures as cf 4 from parsl.executors.base import ParslExecutor 5 6 logger = logging.getLogger(__name__) 7 8 9 class ThreadPoolExecutor(ParslExecutor): 10 ''' The thread pool executor 11 ''' 12 13 def __init__(self, max_workers=2, thread_name_prefix='', 14 execution_provider=None, config=None, **kwargs): 15 ''' Initialize the thread pool 16 Config options that are really used are : 17 18 config.sites.site.execution.options = {"maxThreads" : <int>, 19 "threadNamePrefix" : <string>} 20 21 Kwargs: 22 - max_workers (int) : Number of threads (Default=2) (keeping name workers/threads for backward compatibility) 23 - thread_name_prefix (string) : Thread name prefix (Only supported in python v3.6+ 24 - execution_provider (ep object) : This is ignored here 25 - config (dict): The config dict object for the site: 26 27 28 ''' 29 30 self._scaling_enabled = False 31 if not config: 32 config = {"execution": {}} 33 if "maxThreads" not in config["execution"]: 34 config["execution"]["maxThreads"] = max_workers 35 if "threadNamePrefix" not in config["execution"]: 36 config["execution"]["threadNamePrefix"] = thread_name_prefix 37 38 self.config = config 39 40 if sys.version_info > (3, 6): 41 self.executor = cf.ThreadPoolExecutor(max_workers=config["execution"]["maxThreads"], 42 thread_name_prefix=config["execution"]["threadNamePrefix"]) 43 else: 44 self.executor = cf.ThreadPoolExecutor(max_workers=config["execution"]["maxThreads"]) 45 46 @property 47 def scaling_enabled(self): 48 return self._scaling_enabled 49 50 def submit(self, *args, **kwargs): 51 ''' Submits work to the thread pool 52 This method is simply pass through and behaves like a submit call as described 53 here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_ 54 55 Returns: 56 Future 57 ''' 58 59 return self.executor.submit(*args, **kwargs) 60 61 def scale_out(self, workers=1): 62 ''' Scales out the number of active workers by 1 63 This method is notImplemented for threads and will raise the error if called. 64 65 Raises: 66 NotImplemented exception 67 ''' 68 69 raise NotImplemented 70 71 def scale_in(self, workers=1): 72 ''' Scale in the number of active workers by 1 73 This method is notImplemented for threads and will raise the error if called. 74 75 Raises: 76 NotImplemented exception 77 ''' 78 79 raise NotImplemented 80 81 def shutdown(self, block=False): 82 ''' Shutdown the ThreadPool 83 This method is notImplemented for threads and will raise the error if called. 84 The interface documentation for IPP is `here <http://ipyparallel.readthedocs.io/en/latest/api/ipyparallel.html#ipyparallel.Client.shutdown>`_ 85 86 Kwargs: 87 - block (Bool): To block for confirmations or not 88 89 ''' 90 x = self.executor.shutdown(wait=block) 91 logger.debug("Done with executor shutdown") 92 return x 93 [end of parsl/executors/threads.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/parsl/app/app.py b/parsl/app/app.py --- a/parsl/app/app.py +++ b/parsl/app/app.py @@ -60,7 +60,7 @@ def __call__(self, *args, **kwargs): ''' The __call__ function must be implemented in the subclasses ''' - raise NotImplemented + raise NotImplementedError def app_wrapper(func): diff --git a/parsl/executors/threads.py b/parsl/executors/threads.py --- a/parsl/executors/threads.py +++ b/parsl/executors/threads.py @@ -66,7 +66,7 @@ NotImplemented exception ''' - raise NotImplemented + raise NotImplementedError def scale_in(self, workers=1): ''' Scale in the number of active workers by 1 @@ -76,7 +76,7 @@ NotImplemented exception ''' - raise NotImplemented + raise NotImplementedError def shutdown(self, block=False): ''' Shutdown the ThreadPool
{"golden_diff": "diff --git a/parsl/app/app.py b/parsl/app/app.py\n--- a/parsl/app/app.py\n+++ b/parsl/app/app.py\n@@ -60,7 +60,7 @@\n def __call__(self, *args, **kwargs):\n ''' The __call__ function must be implemented in the subclasses\n '''\n- raise NotImplemented\n+ raise NotImplementedError\n \n \n def app_wrapper(func):\ndiff --git a/parsl/executors/threads.py b/parsl/executors/threads.py\n--- a/parsl/executors/threads.py\n+++ b/parsl/executors/threads.py\n@@ -66,7 +66,7 @@\n NotImplemented exception\n '''\n \n- raise NotImplemented\n+ raise NotImplementedError\n \n def scale_in(self, workers=1):\n ''' Scale in the number of active workers by 1\n@@ -76,7 +76,7 @@\n NotImplemented exception\n '''\n \n- raise NotImplemented\n+ raise NotImplementedError\n \n def shutdown(self, block=False):\n ''' Shutdown the ThreadPool\n", "issue": "Do not raise `NotImplemented`\nIn a few places we raise `NotImplemented`, which itself raises a `SyntaxError`; this should be `NotImplementedError`.\n", "before_files": [{"content": "'''\nParsl Apps\n==========\n\nHere lies the definitions for the @App decorator and the APP classes.\nThe APP class encapsulates a generic leaf task that can be executed asynchronously.\n\n'''\nimport logging\nfrom inspect import signature, Parameter\n\n# Logging moved here in the PEP8 conformance fixes.\nlogger = logging.getLogger(__name__)\n\n\nclass AppBase (object):\n \"\"\"\n This is the base class that defines the two external facing functions that an App must define.\n The __init__ () which is called when the interpretor sees the definition of the decorated\n function, and the __call__ () which is invoked when a decorated function is called by the user.\n\n \"\"\"\n\n def __init__(self, func, executor, walltime=60, sites='all', cache=False, exec_type=\"bash\"):\n ''' Constructor for the APP object.\n\n Args:\n - func (function): Takes the function to be made into an App\n - executor (executor): Executor for the execution resource\n\n Kwargs:\n - walltime (int) : Walltime in seconds for the app execution\n - sites (str|list) : List of site names that this app could execute over. default is 'all'\n - exec_type (string) : App type (bash|python)\n - cache (Bool) : Enable caching of this app ?\n\n Returns:\n - APP object.\n\n '''\n self.__name__ = func.__name__\n self.func = func\n self.executor = executor\n self.exec_type = exec_type\n self.status = 'created'\n self.sites = sites\n self.cache = cache\n\n sig = signature(func)\n self.kwargs = {}\n for s in sig.parameters:\n if sig.parameters[s].default != Parameter.empty:\n self.kwargs[s] = sig.parameters[s].default\n\n self.stdout = sig.parameters['stdout'].default if 'stdout' in sig.parameters else None\n self.stderr = sig.parameters['stderr'].default if 'stderr' in sig.parameters else None\n self.inputs = sig.parameters['inputs'].default if 'inputs' in sig.parameters else []\n self.outputs = sig.parameters['outputs'].default if 'outputs' in sig.parameters else []\n\n def __call__(self, *args, **kwargs):\n ''' The __call__ function must be implemented in the subclasses\n '''\n raise NotImplemented\n\n\ndef app_wrapper(func):\n\n def wrapper(*args, **kwargs):\n logger.debug(\"App wrapper begins\")\n x = func(*args, **kwargs)\n logger.debug(\"App wrapper ends\")\n return x\n\n return wrapper\n\n\ndef App(apptype, executor, walltime=60, cache=False, sites='all'):\n ''' The App decorator function\n\n Args:\n - apptype (string) : Apptype can be bash|python\n - executor (Executor) : Executor object wrapping threads/process pools etc.\n\n Kwargs:\n - walltime (int) : Walltime for app in seconds,\n default=60\n - sites (str|List) : List of site names on which the app could execute\n default='all'\n - cache (Bool) : Enable caching of the app call\n default=False\n\n Returns:\n An AppFactory object, which when called runs the apps through the executor.\n '''\n\n from parsl import APP_FACTORY_FACTORY\n\n def Exec(f):\n return APP_FACTORY_FACTORY.make(apptype, executor, f,\n sites=sites,\n cache=cache,\n walltime=walltime)\n\n return Exec\n", "path": "parsl/app/app.py"}, {"content": "import logging\nimport sys\nimport concurrent.futures as cf\nfrom parsl.executors.base import ParslExecutor\n\nlogger = logging.getLogger(__name__)\n\n\nclass ThreadPoolExecutor(ParslExecutor):\n ''' The thread pool executor\n '''\n\n def __init__(self, max_workers=2, thread_name_prefix='',\n execution_provider=None, config=None, **kwargs):\n ''' Initialize the thread pool\n Config options that are really used are :\n\n config.sites.site.execution.options = {\"maxThreads\" : <int>,\n \"threadNamePrefix\" : <string>}\n\n Kwargs:\n - max_workers (int) : Number of threads (Default=2) (keeping name workers/threads for backward compatibility)\n - thread_name_prefix (string) : Thread name prefix (Only supported in python v3.6+\n - execution_provider (ep object) : This is ignored here\n - config (dict): The config dict object for the site:\n\n\n '''\n\n self._scaling_enabled = False\n if not config:\n config = {\"execution\": {}}\n if \"maxThreads\" not in config[\"execution\"]:\n config[\"execution\"][\"maxThreads\"] = max_workers\n if \"threadNamePrefix\" not in config[\"execution\"]:\n config[\"execution\"][\"threadNamePrefix\"] = thread_name_prefix\n\n self.config = config\n\n if sys.version_info > (3, 6):\n self.executor = cf.ThreadPoolExecutor(max_workers=config[\"execution\"][\"maxThreads\"],\n thread_name_prefix=config[\"execution\"][\"threadNamePrefix\"])\n else:\n self.executor = cf.ThreadPoolExecutor(max_workers=config[\"execution\"][\"maxThreads\"])\n\n @property\n def scaling_enabled(self):\n return self._scaling_enabled\n\n def submit(self, *args, **kwargs):\n ''' Submits work to the thread pool\n This method is simply pass through and behaves like a submit call as described\n here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_\n\n Returns:\n Future\n '''\n\n return self.executor.submit(*args, **kwargs)\n\n def scale_out(self, workers=1):\n ''' Scales out the number of active workers by 1\n This method is notImplemented for threads and will raise the error if called.\n\n Raises:\n NotImplemented exception\n '''\n\n raise NotImplemented\n\n def scale_in(self, workers=1):\n ''' Scale in the number of active workers by 1\n This method is notImplemented for threads and will raise the error if called.\n\n Raises:\n NotImplemented exception\n '''\n\n raise NotImplemented\n\n def shutdown(self, block=False):\n ''' Shutdown the ThreadPool\n This method is notImplemented for threads and will raise the error if called.\n The interface documentation for IPP is `here <http://ipyparallel.readthedocs.io/en/latest/api/ipyparallel.html#ipyparallel.Client.shutdown>`_\n\n Kwargs:\n - block (Bool): To block for confirmations or not\n\n '''\n x = self.executor.shutdown(wait=block)\n logger.debug(\"Done with executor shutdown\")\n return x\n", "path": "parsl/executors/threads.py"}]}
2,418
232
gh_patches_debug_32571
rasdani/github-patches
git_diff
encode__httpx-1138
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Document the exception hierarchy We've put a stack of great work into this for 0.14 - let's also show it off. </issue> <code> [start of httpx/_exceptions.py] 1 """ 2 Our exception hierarchy: 3 4 * HTTPError 5 x RequestError 6 + TransportError 7 - TimeoutException 8 · ConnectTimeout 9 · ReadTimeout 10 · WriteTimeout 11 · PoolTimeout 12 - NetworkError 13 · ConnectError 14 · ReadError 15 · WriteError 16 · CloseError 17 - ProtocolError 18 · LocalProtocolError 19 · RemoteProtocolError 20 - ProxyError 21 - UnsupportedProtocol 22 + DecodingError 23 + TooManyRedirects 24 + RequestBodyUnavailable 25 x HTTPStatusError 26 * NotRedirectResponse 27 * CookieConflict 28 * StreamError 29 x StreamConsumed 30 x ResponseNotRead 31 x RequestNotRead 32 x ResponseClosed 33 """ 34 import contextlib 35 import typing 36 37 import httpcore 38 39 if typing.TYPE_CHECKING: 40 from ._models import Request, Response # pragma: nocover 41 42 43 class HTTPError(Exception): 44 """ 45 Base class for `RequestError` and `HTTPStatusError`. 46 47 Useful for `try...except` blocks when issuing a request, 48 and then calling .raise_for_status(). 49 50 For example: 51 52 try: 53 response = httpx.get("https://www.example.com") 54 response.raise_for_status() 55 except httpx.HTTPError as exc: 56 print(f"HTTP Exception for {exc.request.url} - {exc.message}") 57 """ 58 59 def __init__(self, message: str, *, request: "Request") -> None: 60 super().__init__(message) 61 self.request = request 62 63 64 class RequestError(HTTPError): 65 """ 66 Base class for all exceptions that may occur when issuing a `.request()`. 67 """ 68 69 def __init__(self, message: str, *, request: "Request") -> None: 70 super().__init__(message, request=request) 71 72 73 class TransportError(RequestError): 74 """ 75 Base class for all exceptions that are mapped from the httpcore API. 76 """ 77 78 79 # Timeout exceptions... 80 81 82 class TimeoutException(TransportError): 83 """ 84 The base class for timeout errors. 85 86 An operation has timed out. 87 """ 88 89 90 class ConnectTimeout(TimeoutException): 91 """ 92 Timed out while connecting to the host. 93 """ 94 95 96 class ReadTimeout(TimeoutException): 97 """ 98 Timed out while receiving data from the host. 99 """ 100 101 102 class WriteTimeout(TimeoutException): 103 """ 104 Timed out while sending data to the host. 105 """ 106 107 108 class PoolTimeout(TimeoutException): 109 """ 110 Timed out waiting to acquire a connection from the pool. 111 """ 112 113 114 # Core networking exceptions... 115 116 117 class NetworkError(TransportError): 118 """ 119 The base class for network-related errors. 120 121 An error occurred while interacting with the network. 122 """ 123 124 125 class ReadError(NetworkError): 126 """ 127 Failed to receive data from the network. 128 """ 129 130 131 class WriteError(NetworkError): 132 """ 133 Failed to send data through the network. 134 """ 135 136 137 class ConnectError(NetworkError): 138 """ 139 Failed to establish a connection. 140 """ 141 142 143 class CloseError(NetworkError): 144 """ 145 Failed to close a connection. 146 """ 147 148 149 # Other transport exceptions... 150 151 152 class ProxyError(TransportError): 153 """ 154 An error occurred while proxying a request. 155 """ 156 157 158 class UnsupportedProtocol(TransportError): 159 """ 160 Attempted to make a request to an unsupported protocol. 161 162 For example issuing a request to `ftp://www.example.com`. 163 """ 164 165 166 class ProtocolError(TransportError): 167 """ 168 The protocol was violated. 169 """ 170 171 172 class LocalProtocolError(ProtocolError): 173 """ 174 A protocol was violated by the client. 175 176 For example if the user instantiated a `Request` instance explicitly, 177 failed to include the mandatory `Host:` header, and then issued it directly 178 using `client.send()`. 179 """ 180 181 182 class RemoteProtocolError(ProtocolError): 183 """ 184 The protocol was violated by the server. 185 186 For exaample, returning malformed HTTP. 187 """ 188 189 190 # Other request exceptions... 191 192 193 class DecodingError(RequestError): 194 """ 195 Decoding of the response failed. 196 """ 197 198 199 class TooManyRedirects(RequestError): 200 """ 201 Too many redirects. 202 """ 203 204 205 class RequestBodyUnavailable(RequestError): 206 """ 207 Had to send the request again, but the request body was streaming, and is 208 no longer available. 209 """ 210 211 212 # Client errors 213 214 215 class HTTPStatusError(HTTPError): 216 """ 217 Response sent an error HTTP status. 218 219 May be raised when calling `response.raise_for_status()` 220 """ 221 222 def __init__( 223 self, message: str, *, request: "Request", response: "Response" 224 ) -> None: 225 super().__init__(message, request=request) 226 self.response = response 227 228 229 class NotRedirectResponse(Exception): 230 """ 231 Response was not a redirect response. 232 233 May be raised if `response.next()` is called without first 234 properly checking `response.is_redirect`. 235 """ 236 237 def __init__(self, message: str) -> None: 238 super().__init__(message) 239 240 241 class CookieConflict(Exception): 242 """ 243 Attempted to lookup a cookie by name, but multiple cookies existed. 244 245 Can occur when calling `response.cookies.get(...)`. 246 """ 247 248 def __init__(self, message: str) -> None: 249 super().__init__(message) 250 251 252 # Stream exceptions... 253 254 # These may occur as the result of a programming error, by accessing 255 # the request/response stream in an invalid manner. 256 257 258 class StreamError(Exception): 259 """ 260 The base class for stream exceptions. 261 262 The developer made an error in accessing the request stream in 263 an invalid way. 264 """ 265 266 def __init__(self, message: str) -> None: 267 super().__init__(message) 268 269 270 class StreamConsumed(StreamError): 271 """ 272 Attempted to read or stream response content, but the content has already 273 been streamed. 274 """ 275 276 def __init__(self) -> None: 277 message = ( 278 "Attempted to read or stream response content, but the content has " 279 "already been streamed." 280 ) 281 super().__init__(message) 282 283 284 class ResponseNotRead(StreamError): 285 """ 286 Attempted to access response content, without having called `read()` 287 after a streaming response. 288 """ 289 290 def __init__(self) -> None: 291 message = ( 292 "Attempted to access response content, without having called `read()` " 293 "after a streaming response." 294 ) 295 super().__init__(message) 296 297 298 class RequestNotRead(StreamError): 299 """ 300 Attempted to access request content, without having called `read()`. 301 """ 302 303 def __init__(self) -> None: 304 message = "Attempted to access request content, without having called `read()`." 305 super().__init__(message) 306 307 308 class ResponseClosed(StreamError): 309 """ 310 Attempted to read or stream response content, but the request has been 311 closed. 312 """ 313 314 def __init__(self) -> None: 315 message = ( 316 "Attempted to read or stream response content, but the request has " 317 "been closed." 318 ) 319 super().__init__(message) 320 321 322 # The `InvalidURL` class is no longer required. It was being used to enforce only 323 # 'http'/'https' URLs being requested, but is now treated instead at the 324 # transport layer using `UnsupportedProtocol()`.` 325 326 # We are currently still exposing this class, but it will be removed in 1.0. 327 InvalidURL = UnsupportedProtocol 328 329 330 @contextlib.contextmanager 331 def map_exceptions( 332 mapping: typing.Mapping[typing.Type[Exception], typing.Type[Exception]], 333 **kwargs: typing.Any, 334 ) -> typing.Iterator[None]: 335 try: 336 yield 337 except Exception as exc: 338 mapped_exc = None 339 340 for from_exc, to_exc in mapping.items(): 341 if not isinstance(exc, from_exc): 342 continue 343 # We want to map to the most specific exception we can find. 344 # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to 345 # `httpx.ReadTimeout`, not just `httpx.TimeoutException`. 346 if mapped_exc is None or issubclass(to_exc, mapped_exc): 347 mapped_exc = to_exc 348 349 if mapped_exc is None: 350 raise 351 352 message = str(exc) 353 raise mapped_exc(message, **kwargs) from None # type: ignore 354 355 356 HTTPCORE_EXC_MAP = { 357 httpcore.TimeoutException: TimeoutException, 358 httpcore.ConnectTimeout: ConnectTimeout, 359 httpcore.ReadTimeout: ReadTimeout, 360 httpcore.WriteTimeout: WriteTimeout, 361 httpcore.PoolTimeout: PoolTimeout, 362 httpcore.NetworkError: NetworkError, 363 httpcore.ConnectError: ConnectError, 364 httpcore.ReadError: ReadError, 365 httpcore.WriteError: WriteError, 366 httpcore.CloseError: CloseError, 367 httpcore.ProxyError: ProxyError, 368 httpcore.UnsupportedProtocol: UnsupportedProtocol, 369 httpcore.ProtocolError: ProtocolError, 370 httpcore.LocalProtocolError: LocalProtocolError, 371 httpcore.RemoteProtocolError: RemoteProtocolError, 372 } 373 [end of httpx/_exceptions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/httpx/_exceptions.py b/httpx/_exceptions.py --- a/httpx/_exceptions.py +++ b/httpx/_exceptions.py @@ -45,15 +45,17 @@ Base class for `RequestError` and `HTTPStatusError`. Useful for `try...except` blocks when issuing a request, - and then calling .raise_for_status(). + and then calling `.raise_for_status()`. For example: + ``` try: response = httpx.get("https://www.example.com") response.raise_for_status() except httpx.HTTPError as exc: print(f"HTTP Exception for {exc.request.url} - {exc.message}") + ``` """ def __init__(self, message: str, *, request: "Request") -> None: @@ -72,7 +74,9 @@ class TransportError(RequestError): """ - Base class for all exceptions that are mapped from the httpcore API. + Base class for all exceptions that occur at the level of the Transport API. + + All of these exceptions also have an equivelent mapping in `httpcore`. """ @@ -151,7 +155,7 @@ class ProxyError(TransportError): """ - An error occurred while proxying a request. + An error occurred while establishing a proxy connection. """ @@ -192,7 +196,7 @@ class DecodingError(RequestError): """ - Decoding of the response failed. + Decoding of the response failed, due to a malformed encoding. """ @@ -214,7 +218,7 @@ class HTTPStatusError(HTTPError): """ - Response sent an error HTTP status. + The response had an error HTTP status of 4xx or 5xx. May be raised when calling `response.raise_for_status()` """
{"golden_diff": "diff --git a/httpx/_exceptions.py b/httpx/_exceptions.py\n--- a/httpx/_exceptions.py\n+++ b/httpx/_exceptions.py\n@@ -45,15 +45,17 @@\n Base class for `RequestError` and `HTTPStatusError`.\n \n Useful for `try...except` blocks when issuing a request,\n- and then calling .raise_for_status().\n+ and then calling `.raise_for_status()`.\n \n For example:\n \n+ ```\n try:\n response = httpx.get(\"https://www.example.com\")\n response.raise_for_status()\n except httpx.HTTPError as exc:\n print(f\"HTTP Exception for {exc.request.url} - {exc.message}\")\n+ ```\n \"\"\"\n \n def __init__(self, message: str, *, request: \"Request\") -> None:\n@@ -72,7 +74,9 @@\n \n class TransportError(RequestError):\n \"\"\"\n- Base class for all exceptions that are mapped from the httpcore API.\n+ Base class for all exceptions that occur at the level of the Transport API.\n+\n+ All of these exceptions also have an equivelent mapping in `httpcore`.\n \"\"\"\n \n \n@@ -151,7 +155,7 @@\n \n class ProxyError(TransportError):\n \"\"\"\n- An error occurred while proxying a request.\n+ An error occurred while establishing a proxy connection.\n \"\"\"\n \n \n@@ -192,7 +196,7 @@\n \n class DecodingError(RequestError):\n \"\"\"\n- Decoding of the response failed.\n+ Decoding of the response failed, due to a malformed encoding.\n \"\"\"\n \n \n@@ -214,7 +218,7 @@\n \n class HTTPStatusError(HTTPError):\n \"\"\"\n- Response sent an error HTTP status.\n+ The response had an error HTTP status of 4xx or 5xx.\n \n May be raised when calling `response.raise_for_status()`\n \"\"\"\n", "issue": "Document the exception hierarchy \nWe've put a stack of great work into this for 0.14 - let's also show it off.\n", "before_files": [{"content": "\"\"\"\nOur exception hierarchy:\n\n* HTTPError\n x RequestError\n + TransportError\n - TimeoutException\n \u00b7 ConnectTimeout\n \u00b7 ReadTimeout\n \u00b7 WriteTimeout\n \u00b7 PoolTimeout\n - NetworkError\n \u00b7 ConnectError\n \u00b7 ReadError\n \u00b7 WriteError\n \u00b7 CloseError\n - ProtocolError\n \u00b7 LocalProtocolError\n \u00b7 RemoteProtocolError\n - ProxyError\n - UnsupportedProtocol\n + DecodingError\n + TooManyRedirects\n + RequestBodyUnavailable\n x HTTPStatusError\n* NotRedirectResponse\n* CookieConflict\n* StreamError\n x StreamConsumed\n x ResponseNotRead\n x RequestNotRead\n x ResponseClosed\n\"\"\"\nimport contextlib\nimport typing\n\nimport httpcore\n\nif typing.TYPE_CHECKING:\n from ._models import Request, Response # pragma: nocover\n\n\nclass HTTPError(Exception):\n \"\"\"\n Base class for `RequestError` and `HTTPStatusError`.\n\n Useful for `try...except` blocks when issuing a request,\n and then calling .raise_for_status().\n\n For example:\n\n try:\n response = httpx.get(\"https://www.example.com\")\n response.raise_for_status()\n except httpx.HTTPError as exc:\n print(f\"HTTP Exception for {exc.request.url} - {exc.message}\")\n \"\"\"\n\n def __init__(self, message: str, *, request: \"Request\") -> None:\n super().__init__(message)\n self.request = request\n\n\nclass RequestError(HTTPError):\n \"\"\"\n Base class for all exceptions that may occur when issuing a `.request()`.\n \"\"\"\n\n def __init__(self, message: str, *, request: \"Request\") -> None:\n super().__init__(message, request=request)\n\n\nclass TransportError(RequestError):\n \"\"\"\n Base class for all exceptions that are mapped from the httpcore API.\n \"\"\"\n\n\n# Timeout exceptions...\n\n\nclass TimeoutException(TransportError):\n \"\"\"\n The base class for timeout errors.\n\n An operation has timed out.\n \"\"\"\n\n\nclass ConnectTimeout(TimeoutException):\n \"\"\"\n Timed out while connecting to the host.\n \"\"\"\n\n\nclass ReadTimeout(TimeoutException):\n \"\"\"\n Timed out while receiving data from the host.\n \"\"\"\n\n\nclass WriteTimeout(TimeoutException):\n \"\"\"\n Timed out while sending data to the host.\n \"\"\"\n\n\nclass PoolTimeout(TimeoutException):\n \"\"\"\n Timed out waiting to acquire a connection from the pool.\n \"\"\"\n\n\n# Core networking exceptions...\n\n\nclass NetworkError(TransportError):\n \"\"\"\n The base class for network-related errors.\n\n An error occurred while interacting with the network.\n \"\"\"\n\n\nclass ReadError(NetworkError):\n \"\"\"\n Failed to receive data from the network.\n \"\"\"\n\n\nclass WriteError(NetworkError):\n \"\"\"\n Failed to send data through the network.\n \"\"\"\n\n\nclass ConnectError(NetworkError):\n \"\"\"\n Failed to establish a connection.\n \"\"\"\n\n\nclass CloseError(NetworkError):\n \"\"\"\n Failed to close a connection.\n \"\"\"\n\n\n# Other transport exceptions...\n\n\nclass ProxyError(TransportError):\n \"\"\"\n An error occurred while proxying a request.\n \"\"\"\n\n\nclass UnsupportedProtocol(TransportError):\n \"\"\"\n Attempted to make a request to an unsupported protocol.\n\n For example issuing a request to `ftp://www.example.com`.\n \"\"\"\n\n\nclass ProtocolError(TransportError):\n \"\"\"\n The protocol was violated.\n \"\"\"\n\n\nclass LocalProtocolError(ProtocolError):\n \"\"\"\n A protocol was violated by the client.\n\n For example if the user instantiated a `Request` instance explicitly,\n failed to include the mandatory `Host:` header, and then issued it directly\n using `client.send()`.\n \"\"\"\n\n\nclass RemoteProtocolError(ProtocolError):\n \"\"\"\n The protocol was violated by the server.\n\n For exaample, returning malformed HTTP.\n \"\"\"\n\n\n# Other request exceptions...\n\n\nclass DecodingError(RequestError):\n \"\"\"\n Decoding of the response failed.\n \"\"\"\n\n\nclass TooManyRedirects(RequestError):\n \"\"\"\n Too many redirects.\n \"\"\"\n\n\nclass RequestBodyUnavailable(RequestError):\n \"\"\"\n Had to send the request again, but the request body was streaming, and is\n no longer available.\n \"\"\"\n\n\n# Client errors\n\n\nclass HTTPStatusError(HTTPError):\n \"\"\"\n Response sent an error HTTP status.\n\n May be raised when calling `response.raise_for_status()`\n \"\"\"\n\n def __init__(\n self, message: str, *, request: \"Request\", response: \"Response\"\n ) -> None:\n super().__init__(message, request=request)\n self.response = response\n\n\nclass NotRedirectResponse(Exception):\n \"\"\"\n Response was not a redirect response.\n\n May be raised if `response.next()` is called without first\n properly checking `response.is_redirect`.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass CookieConflict(Exception):\n \"\"\"\n Attempted to lookup a cookie by name, but multiple cookies existed.\n\n Can occur when calling `response.cookies.get(...)`.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\n# Stream exceptions...\n\n# These may occur as the result of a programming error, by accessing\n# the request/response stream in an invalid manner.\n\n\nclass StreamError(Exception):\n \"\"\"\n The base class for stream exceptions.\n\n The developer made an error in accessing the request stream in\n an invalid way.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass StreamConsumed(StreamError):\n \"\"\"\n Attempted to read or stream response content, but the content has already\n been streamed.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to read or stream response content, but the content has \"\n \"already been streamed.\"\n )\n super().__init__(message)\n\n\nclass ResponseNotRead(StreamError):\n \"\"\"\n Attempted to access response content, without having called `read()`\n after a streaming response.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to access response content, without having called `read()` \"\n \"after a streaming response.\"\n )\n super().__init__(message)\n\n\nclass RequestNotRead(StreamError):\n \"\"\"\n Attempted to access request content, without having called `read()`.\n \"\"\"\n\n def __init__(self) -> None:\n message = \"Attempted to access request content, without having called `read()`.\"\n super().__init__(message)\n\n\nclass ResponseClosed(StreamError):\n \"\"\"\n Attempted to read or stream response content, but the request has been\n closed.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to read or stream response content, but the request has \"\n \"been closed.\"\n )\n super().__init__(message)\n\n\n# The `InvalidURL` class is no longer required. It was being used to enforce only\n# 'http'/'https' URLs being requested, but is now treated instead at the\n# transport layer using `UnsupportedProtocol()`.`\n\n# We are currently still exposing this class, but it will be removed in 1.0.\nInvalidURL = UnsupportedProtocol\n\n\[email protected]\ndef map_exceptions(\n mapping: typing.Mapping[typing.Type[Exception], typing.Type[Exception]],\n **kwargs: typing.Any,\n) -> typing.Iterator[None]:\n try:\n yield\n except Exception as exc:\n mapped_exc = None\n\n for from_exc, to_exc in mapping.items():\n if not isinstance(exc, from_exc):\n continue\n # We want to map to the most specific exception we can find.\n # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to\n # `httpx.ReadTimeout`, not just `httpx.TimeoutException`.\n if mapped_exc is None or issubclass(to_exc, mapped_exc):\n mapped_exc = to_exc\n\n if mapped_exc is None:\n raise\n\n message = str(exc)\n raise mapped_exc(message, **kwargs) from None # type: ignore\n\n\nHTTPCORE_EXC_MAP = {\n httpcore.TimeoutException: TimeoutException,\n httpcore.ConnectTimeout: ConnectTimeout,\n httpcore.ReadTimeout: ReadTimeout,\n httpcore.WriteTimeout: WriteTimeout,\n httpcore.PoolTimeout: PoolTimeout,\n httpcore.NetworkError: NetworkError,\n httpcore.ConnectError: ConnectError,\n httpcore.ReadError: ReadError,\n httpcore.WriteError: WriteError,\n httpcore.CloseError: CloseError,\n httpcore.ProxyError: ProxyError,\n httpcore.UnsupportedProtocol: UnsupportedProtocol,\n httpcore.ProtocolError: ProtocolError,\n httpcore.LocalProtocolError: LocalProtocolError,\n httpcore.RemoteProtocolError: RemoteProtocolError,\n}\n", "path": "httpx/_exceptions.py"}]}
3,534
422
gh_patches_debug_15149
rasdani/github-patches
git_diff
apache__airflow-29518
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> KubernetesExecutor leaves failed pods due to deepcopy issue with Google providers ### Apache Airflow version Other Airflow 2 version (please specify below) ### What happened With Airflow 2.3 and 2.4 there appears to be a bug in the KubernetesExecutor when used in conjunction with the Google airflow providers. This bug does not affect Airflow 2.2 due to the pip version requirements. The bug specifically presents itself when using nearly any Google provider operator. During the pod lifecycle, all is well until the executor in the pod starts to clean up following a successful run. Airflow itself still see's the task marked as a success, but in Kubernetes, while the task is finishing up after reporting status, it actually crashes and puts the pod into a Failed state silently: ``` Traceback (most recent call last): File "/home/airflow/.local/bin/airflow", line 8, in <module> sys.exit(main()) File "/home/airflow/.local/lib/python3.9/site-packages/airflow/__main__.py", line 39, in main args.func(args) File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/cli_parser.py", line 52, in command return func(*args, **kwargs) File "/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/cli.py", line 103, in wrapper return f(*args, **kwargs) File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py", line 382, in task_run _run_task_by_selected_method(args, dag, ti) File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py", line 189, in _run_task_by_selected_method _run_task_by_local_task_job(args, ti) File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py", line 247, in _run_task_by_local_task_job run_job.run() File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/base_job.py", line 247, in run self._execute() File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py", line 137, in _execute self.handle_task_exit(return_code) File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py", line 168, in handle_task_exit self._run_mini_scheduler_on_child_tasks() File "/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/session.py", line 75, in wrapper return func(*args, session=session, **kwargs) File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py", line 253, in _run_mini_scheduler_on_child_tasks partial_dag = task.dag.partial_subset( File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 2188, in partial_subset dag.task_dict = { File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 2189, in <dictcomp> t.task_id: _deepcopy_task(t) File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 2186, in _deepcopy_task return copy.deepcopy(t, memo) File "/usr/local/lib/python3.9/copy.py", line 153, in deepcopy y = copier(memo) File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/baseoperator.py", line 1163, in __deepcopy__ setattr(result, k, copy.deepcopy(v, memo)) File "/usr/local/lib/python3.9/copy.py", line 172, in deepcopy y = _reconstruct(x, memo, *rv) File "/usr/local/lib/python3.9/copy.py", line 264, in _reconstruct y = func(*args) File "/usr/local/lib/python3.9/enum.py", line 384, in __call__ return cls.__new__(cls, value) File "/usr/local/lib/python3.9/enum.py", line 702, in __new__ raise ve_exc ValueError: <object object at 0x7f570181a3c0> is not a valid _MethodDefault ``` Based on a quick look, it appears to be related to the default argument that Google is using in its operators which happens to be an Enum, and fails during a deepcopy at the end of the task. Example operator that is affected: https://github.com/apache/airflow/blob/403ed7163f3431deb7fc21108e1743385e139907/airflow/providers/google/cloud/hooks/dataproc.py#L753 Reference to the Google Python API core which has the Enum causing the problem: https://github.com/googleapis/python-api-core/blob/main/google/api_core/gapic_v1/method.py#L31 ### What you think should happen instead Kubernetes pods should succeed, be marked as `Completed`, and then be gracefully terminated. ### How to reproduce Use any `apache-airflow-providers-google` >= 7.0.0 which includes `google-api-core` >= 2.2.2. Run a DAG with a task which uses any of the Google operators which have `_MethodDefault` as a default argument. ### Operating System Debian GNU/Linux 11 (bullseye) ### Versions of Apache Airflow Providers apache-airflow-providers-amazon==6.0.0 apache-airflow-providers-apache-hive==5.0.0 apache-airflow-providers-celery==3.0.0 apache-airflow-providers-cncf-kubernetes==4.4.0 apache-airflow-providers-common-sql==1.3.1 apache-airflow-providers-docker==3.2.0 apache-airflow-providers-elasticsearch==4.2.1 apache-airflow-providers-ftp==3.1.0 apache-airflow-providers-google==8.4.0 apache-airflow-providers-grpc==3.0.0 apache-airflow-providers-hashicorp==3.1.0 apache-airflow-providers-http==4.0.0 apache-airflow-providers-imap==3.0.0 apache-airflow-providers-microsoft-azure==4.3.0 apache-airflow-providers-mysql==3.2.1 apache-airflow-providers-odbc==3.1.2 apache-airflow-providers-postgres==5.2.2 apache-airflow-providers-presto==4.2.0 apache-airflow-providers-redis==3.0.0 apache-airflow-providers-sendgrid==3.0.0 apache-airflow-providers-sftp==4.1.0 apache-airflow-providers-slack==6.0.0 apache-airflow-providers-sqlite==3.2.1 apache-airflow-providers-ssh==3.2.0 ### Deployment Other 3rd-party Helm chart ### Deployment details _No response_ ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md) </issue> <code> [start of airflow/providers/google/cloud/operators/cloud_base.py] 1 # 2 # Licensed to the Apache Software Foundation (ASF) under one 3 # or more contributor license agreements. See the NOTICE file 4 # distributed with this work for additional information 5 # regarding copyright ownership. The ASF licenses this file 6 # to you under the Apache License, Version 2.0 (the 7 # "License"); you may not use this file except in compliance 8 # with the License. You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, 13 # software distributed under the License is distributed on an 14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 # KIND, either express or implied. See the License for the 16 # specific language governing permissions and limitations 17 # under the License. 18 """This module contains a Google API base operator.""" 19 from __future__ import annotations 20 21 from airflow.models import BaseOperator 22 23 24 class GoogleCloudBaseOperator(BaseOperator): 25 """ 26 Abstract base class that takes care of common specifics of the operators built 27 on top of Google API client libraries. 28 """ 29 30 pass 31 [end of airflow/providers/google/cloud/operators/cloud_base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/airflow/providers/google/cloud/operators/cloud_base.py b/airflow/providers/google/cloud/operators/cloud_base.py --- a/airflow/providers/google/cloud/operators/cloud_base.py +++ b/airflow/providers/google/cloud/operators/cloud_base.py @@ -18,6 +18,8 @@ """This module contains a Google API base operator.""" from __future__ import annotations +from google.api_core.gapic_v1.method import DEFAULT + from airflow.models import BaseOperator @@ -27,4 +29,11 @@ on top of Google API client libraries. """ - pass + def __deepcopy__(self, memo): + """ + Updating the memo to fix the non-copyable global constant. + This constant can be specified in operator parameters as a retry configuration to indicate a default. + See https://github.com/apache/airflow/issues/28751 for details. + """ + memo[id(DEFAULT)] = DEFAULT + return super().__deepcopy__(memo)
{"golden_diff": "diff --git a/airflow/providers/google/cloud/operators/cloud_base.py b/airflow/providers/google/cloud/operators/cloud_base.py\n--- a/airflow/providers/google/cloud/operators/cloud_base.py\n+++ b/airflow/providers/google/cloud/operators/cloud_base.py\n@@ -18,6 +18,8 @@\n \"\"\"This module contains a Google API base operator.\"\"\"\n from __future__ import annotations\n \n+from google.api_core.gapic_v1.method import DEFAULT\n+\n from airflow.models import BaseOperator\n \n \n@@ -27,4 +29,11 @@\n on top of Google API client libraries.\n \"\"\"\n \n- pass\n+ def __deepcopy__(self, memo):\n+ \"\"\"\n+ Updating the memo to fix the non-copyable global constant.\n+ This constant can be specified in operator parameters as a retry configuration to indicate a default.\n+ See https://github.com/apache/airflow/issues/28751 for details.\n+ \"\"\"\n+ memo[id(DEFAULT)] = DEFAULT\n+ return super().__deepcopy__(memo)\n", "issue": "KubernetesExecutor leaves failed pods due to deepcopy issue with Google providers\n### Apache Airflow version\r\n\r\nOther Airflow 2 version (please specify below)\r\n\r\n### What happened\r\n\r\nWith Airflow 2.3 and 2.4 there appears to be a bug in the KubernetesExecutor when used in conjunction with the Google airflow providers. This bug does not affect Airflow 2.2 due to the pip version requirements.\r\n\r\nThe bug specifically presents itself when using nearly any Google provider operator. During the pod lifecycle, all is well until the executor in the pod starts to clean up following a successful run. Airflow itself still see's the task marked as a success, but in Kubernetes, while the task is finishing up after reporting status, it actually crashes and puts the pod into a Failed state silently:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/airflow/.local/bin/airflow\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/__main__.py\", line 39, in main\r\n args.func(args)\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/cli_parser.py\", line 52, in command\r\n return func(*args, **kwargs)\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/cli.py\", line 103, in wrapper\r\n return f(*args, **kwargs)\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py\", line 382, in task_run\r\n _run_task_by_selected_method(args, dag, ti)\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py\", line 189, in _run_task_by_selected_method\r\n _run_task_by_local_task_job(args, ti)\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py\", line 247, in _run_task_by_local_task_job\r\n run_job.run()\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/base_job.py\", line 247, in run\r\n self._execute()\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py\", line 137, in _execute\r\n self.handle_task_exit(return_code)\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py\", line 168, in handle_task_exit\r\n self._run_mini_scheduler_on_child_tasks()\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/session.py\", line 75, in wrapper\r\n return func(*args, session=session, **kwargs)\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py\", line 253, in _run_mini_scheduler_on_child_tasks\r\n partial_dag = task.dag.partial_subset(\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py\", line 2188, in partial_subset\r\n dag.task_dict = {\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py\", line 2189, in <dictcomp>\r\n t.task_id: _deepcopy_task(t)\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py\", line 2186, in _deepcopy_task\r\n return copy.deepcopy(t, memo)\r\n File \"/usr/local/lib/python3.9/copy.py\", line 153, in deepcopy\r\n y = copier(memo)\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/models/baseoperator.py\", line 1163, in __deepcopy__\r\n setattr(result, k, copy.deepcopy(v, memo))\r\n File \"/usr/local/lib/python3.9/copy.py\", line 172, in deepcopy\r\n y = _reconstruct(x, memo, *rv)\r\n File \"/usr/local/lib/python3.9/copy.py\", line 264, in _reconstruct\r\n y = func(*args)\r\n File \"/usr/local/lib/python3.9/enum.py\", line 384, in __call__\r\n return cls.__new__(cls, value)\r\n File \"/usr/local/lib/python3.9/enum.py\", line 702, in __new__\r\n raise ve_exc\r\nValueError: <object object at 0x7f570181a3c0> is not a valid _MethodDefault\r\n```\r\n\r\nBased on a quick look, it appears to be related to the default argument that Google is using in its operators which happens to be an Enum, and fails during a deepcopy at the end of the task.\r\n\r\nExample operator that is affected: https://github.com/apache/airflow/blob/403ed7163f3431deb7fc21108e1743385e139907/airflow/providers/google/cloud/hooks/dataproc.py#L753\r\nReference to the Google Python API core which has the Enum causing the problem: https://github.com/googleapis/python-api-core/blob/main/google/api_core/gapic_v1/method.py#L31\r\n\r\n### What you think should happen instead\r\n\r\nKubernetes pods should succeed, be marked as `Completed`, and then be gracefully terminated.\r\n\r\n### How to reproduce\r\n\r\nUse any `apache-airflow-providers-google` >= 7.0.0 which includes `google-api-core` >= 2.2.2. Run a DAG with a task which uses any of the Google operators which have `_MethodDefault` as a default argument.\r\n\r\n### Operating System\r\n\r\nDebian GNU/Linux 11 (bullseye)\r\n\r\n### Versions of Apache Airflow Providers\r\n\r\napache-airflow-providers-amazon==6.0.0\r\napache-airflow-providers-apache-hive==5.0.0\r\napache-airflow-providers-celery==3.0.0\r\napache-airflow-providers-cncf-kubernetes==4.4.0\r\napache-airflow-providers-common-sql==1.3.1\r\napache-airflow-providers-docker==3.2.0\r\napache-airflow-providers-elasticsearch==4.2.1\r\napache-airflow-providers-ftp==3.1.0\r\napache-airflow-providers-google==8.4.0\r\napache-airflow-providers-grpc==3.0.0\r\napache-airflow-providers-hashicorp==3.1.0\r\napache-airflow-providers-http==4.0.0\r\napache-airflow-providers-imap==3.0.0\r\napache-airflow-providers-microsoft-azure==4.3.0\r\napache-airflow-providers-mysql==3.2.1\r\napache-airflow-providers-odbc==3.1.2\r\napache-airflow-providers-postgres==5.2.2\r\napache-airflow-providers-presto==4.2.0\r\napache-airflow-providers-redis==3.0.0\r\napache-airflow-providers-sendgrid==3.0.0\r\napache-airflow-providers-sftp==4.1.0\r\napache-airflow-providers-slack==6.0.0\r\napache-airflow-providers-sqlite==3.2.1\r\napache-airflow-providers-ssh==3.2.0\r\n\r\n### Deployment\r\n\r\nOther 3rd-party Helm chart\r\n\r\n### Deployment details\r\n\r\n_No response_\r\n\r\n### Anything else\r\n\r\n_No response_\r\n\r\n### Are you willing to submit PR?\r\n\r\n- [X] Yes I am willing to submit a PR!\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\r\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"This module contains a Google API base operator.\"\"\"\nfrom __future__ import annotations\n\nfrom airflow.models import BaseOperator\n\n\nclass GoogleCloudBaseOperator(BaseOperator):\n \"\"\"\n Abstract base class that takes care of common specifics of the operators built\n on top of Google API client libraries.\n \"\"\"\n\n pass\n", "path": "airflow/providers/google/cloud/operators/cloud_base.py"}]}
2,597
223
gh_patches_debug_19372
rasdani/github-patches
git_diff
aws-powertools__powertools-lambda-python-1534
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Static typing: aws_lambda_powertools.logging.utils.copy_config_to_registered_loggers argument log_level should accept int ### Static type checker used mypy (project's standard) ### AWS Lambda function runtime 3.9 ### AWS Lambda Powertools for Python version latest ### Static type checker info ``` $ mypy repro.py repro.py:5: error: Argument "log_level" to "copy_config_to_registered_loggers" has incompatible type "int"; expected "Optional[str]" Found 1 error in 1 file (checked 1 source file) ``` ``` mypy --version mypy 0.971 (compiled: yes) ``` ### Code snippet ```python from aws_lambda_powertools.logging import utils from aws_lambda_powertools import Logger logger = Logger() utils.copy_config_to_registered_loggers(source_logger=logger, log_level=30) ``` ### Possible Solution Update signature to accept `Union[str, int]` </issue> <code> [start of aws_lambda_powertools/logging/utils.py] 1 import logging 2 from typing import Callable, List, Optional, Set, Union 3 4 from .logger import Logger 5 6 PACKAGE_LOGGER = "aws_lambda_powertools" 7 8 9 def copy_config_to_registered_loggers( 10 source_logger: Logger, 11 log_level: Optional[str] = None, 12 exclude: Optional[Set[str]] = None, 13 include: Optional[Set[str]] = None, 14 ) -> None: 15 16 """Copies source Logger level and handler to all registered loggers for consistent formatting. 17 18 Parameters 19 ---------- 20 source_logger : Logger 21 Powertools Logger to copy configuration from 22 log_level : str, optional 23 Logging level to set to registered loggers, by default uses source_logger logging level 24 include : Optional[Set[str]], optional 25 List of logger names to include, by default all registered loggers are included 26 exclude : Optional[Set[str]], optional 27 List of logger names to exclude, by default None 28 """ 29 level = log_level or source_logger.level 30 31 # Assumptions: Only take parent loggers not children (dot notation rule) 32 # Steps: 33 # 1. Default operation: Include all registered loggers 34 # 2. Only include set? Only add Loggers in the list and ignore all else 35 # 3. Include and exclude set? Add Logger if it’s in include and not in exclude 36 # 4. Only exclude set? Ignore Logger in the excluding list 37 38 # Exclude source and powertools package logger by default 39 # If source logger is a child ensure we exclude parent logger to not break child logger 40 # from receiving/pushing updates to keys being added/removed 41 source_logger_name = source_logger.name.split(".")[0] 42 43 if exclude: 44 exclude.update([source_logger_name, PACKAGE_LOGGER]) 45 else: 46 exclude = {source_logger_name, PACKAGE_LOGGER} 47 48 # Prepare loggers set 49 if include: 50 loggers = include.difference(exclude) 51 filter_func = _include_registered_loggers_filter 52 else: 53 loggers = exclude 54 filter_func = _exclude_registered_loggers_filter 55 56 registered_loggers = _find_registered_loggers(source_logger, loggers, filter_func) 57 for logger in registered_loggers: 58 _configure_logger(source_logger, logger, level) 59 60 61 def _include_registered_loggers_filter(loggers: Set[str]): 62 return [logging.getLogger(name) for name in logging.root.manager.loggerDict if "." not in name and name in loggers] 63 64 65 def _exclude_registered_loggers_filter(loggers: Set[str]) -> List[logging.Logger]: 66 return [ 67 logging.getLogger(name) for name in logging.root.manager.loggerDict if "." not in name and name not in loggers 68 ] 69 70 71 def _find_registered_loggers( 72 source_logger: Logger, loggers: Set[str], filter_func: Callable[[Set[str]], List[logging.Logger]] 73 ) -> List[logging.Logger]: 74 """Filter root loggers based on provided parameters.""" 75 root_loggers = filter_func(loggers) 76 source_logger.debug(f"Filtered root loggers: {root_loggers}") 77 return root_loggers 78 79 80 def _configure_logger(source_logger: Logger, logger: logging.Logger, level: Union[int, str]) -> None: 81 logger.handlers = [] 82 logger.setLevel(level) 83 logger.propagate = False # ensure we don't propagate logs to existing loggers, #1073 84 source_logger.debug(f"Logger {logger} reconfigured to use logging level {level}") 85 for source_handler in source_logger.handlers: 86 logger.addHandler(source_handler) 87 source_logger.debug(f"Logger {logger} reconfigured to use {source_handler}") 88 [end of aws_lambda_powertools/logging/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/aws_lambda_powertools/logging/utils.py b/aws_lambda_powertools/logging/utils.py --- a/aws_lambda_powertools/logging/utils.py +++ b/aws_lambda_powertools/logging/utils.py @@ -8,7 +8,7 @@ def copy_config_to_registered_loggers( source_logger: Logger, - log_level: Optional[str] = None, + log_level: Optional[Union[int, str]] = None, exclude: Optional[Set[str]] = None, include: Optional[Set[str]] = None, ) -> None: @@ -19,7 +19,7 @@ ---------- source_logger : Logger Powertools Logger to copy configuration from - log_level : str, optional + log_level : Union[int, str], optional Logging level to set to registered loggers, by default uses source_logger logging level include : Optional[Set[str]], optional List of logger names to include, by default all registered loggers are included
{"golden_diff": "diff --git a/aws_lambda_powertools/logging/utils.py b/aws_lambda_powertools/logging/utils.py\n--- a/aws_lambda_powertools/logging/utils.py\n+++ b/aws_lambda_powertools/logging/utils.py\n@@ -8,7 +8,7 @@\n \n def copy_config_to_registered_loggers(\n source_logger: Logger,\n- log_level: Optional[str] = None,\n+ log_level: Optional[Union[int, str]] = None,\n exclude: Optional[Set[str]] = None,\n include: Optional[Set[str]] = None,\n ) -> None:\n@@ -19,7 +19,7 @@\n ----------\n source_logger : Logger\n Powertools Logger to copy configuration from\n- log_level : str, optional\n+ log_level : Union[int, str], optional\n Logging level to set to registered loggers, by default uses source_logger logging level\n include : Optional[Set[str]], optional\n List of logger names to include, by default all registered loggers are included\n", "issue": "Static typing: aws_lambda_powertools.logging.utils.copy_config_to_registered_loggers argument log_level should accept int\n### Static type checker used\r\n\r\nmypy (project's standard)\r\n\r\n### AWS Lambda function runtime\r\n\r\n3.9\r\n\r\n### AWS Lambda Powertools for Python version\r\n\r\nlatest\r\n\r\n### Static type checker info\r\n\r\n```\r\n$ mypy repro.py\r\nrepro.py:5: error: Argument \"log_level\" to \"copy_config_to_registered_loggers\" has incompatible type \"int\"; expected \"Optional[str]\"\r\nFound 1 error in 1 file (checked 1 source file)\r\n```\r\n\r\n\r\n```\r\nmypy --version\r\nmypy 0.971 (compiled: yes)\r\n```\r\n\r\n### Code snippet\r\n\r\n```python\r\nfrom aws_lambda_powertools.logging import utils\r\nfrom aws_lambda_powertools import Logger\r\n\r\nlogger = Logger()\r\nutils.copy_config_to_registered_loggers(source_logger=logger, log_level=30)\r\n```\r\n\r\n\r\n### Possible Solution\r\n\r\nUpdate signature to accept `Union[str, int]`\n", "before_files": [{"content": "import logging\nfrom typing import Callable, List, Optional, Set, Union\n\nfrom .logger import Logger\n\nPACKAGE_LOGGER = \"aws_lambda_powertools\"\n\n\ndef copy_config_to_registered_loggers(\n source_logger: Logger,\n log_level: Optional[str] = None,\n exclude: Optional[Set[str]] = None,\n include: Optional[Set[str]] = None,\n) -> None:\n\n \"\"\"Copies source Logger level and handler to all registered loggers for consistent formatting.\n\n Parameters\n ----------\n source_logger : Logger\n Powertools Logger to copy configuration from\n log_level : str, optional\n Logging level to set to registered loggers, by default uses source_logger logging level\n include : Optional[Set[str]], optional\n List of logger names to include, by default all registered loggers are included\n exclude : Optional[Set[str]], optional\n List of logger names to exclude, by default None\n \"\"\"\n level = log_level or source_logger.level\n\n # Assumptions: Only take parent loggers not children (dot notation rule)\n # Steps:\n # 1. Default operation: Include all registered loggers\n # 2. Only include set? Only add Loggers in the list and ignore all else\n # 3. Include and exclude set? Add Logger if it\u2019s in include and not in exclude\n # 4. Only exclude set? Ignore Logger in the excluding list\n\n # Exclude source and powertools package logger by default\n # If source logger is a child ensure we exclude parent logger to not break child logger\n # from receiving/pushing updates to keys being added/removed\n source_logger_name = source_logger.name.split(\".\")[0]\n\n if exclude:\n exclude.update([source_logger_name, PACKAGE_LOGGER])\n else:\n exclude = {source_logger_name, PACKAGE_LOGGER}\n\n # Prepare loggers set\n if include:\n loggers = include.difference(exclude)\n filter_func = _include_registered_loggers_filter\n else:\n loggers = exclude\n filter_func = _exclude_registered_loggers_filter\n\n registered_loggers = _find_registered_loggers(source_logger, loggers, filter_func)\n for logger in registered_loggers:\n _configure_logger(source_logger, logger, level)\n\n\ndef _include_registered_loggers_filter(loggers: Set[str]):\n return [logging.getLogger(name) for name in logging.root.manager.loggerDict if \".\" not in name and name in loggers]\n\n\ndef _exclude_registered_loggers_filter(loggers: Set[str]) -> List[logging.Logger]:\n return [\n logging.getLogger(name) for name in logging.root.manager.loggerDict if \".\" not in name and name not in loggers\n ]\n\n\ndef _find_registered_loggers(\n source_logger: Logger, loggers: Set[str], filter_func: Callable[[Set[str]], List[logging.Logger]]\n) -> List[logging.Logger]:\n \"\"\"Filter root loggers based on provided parameters.\"\"\"\n root_loggers = filter_func(loggers)\n source_logger.debug(f\"Filtered root loggers: {root_loggers}\")\n return root_loggers\n\n\ndef _configure_logger(source_logger: Logger, logger: logging.Logger, level: Union[int, str]) -> None:\n logger.handlers = []\n logger.setLevel(level)\n logger.propagate = False # ensure we don't propagate logs to existing loggers, #1073\n source_logger.debug(f\"Logger {logger} reconfigured to use logging level {level}\")\n for source_handler in source_logger.handlers:\n logger.addHandler(source_handler)\n source_logger.debug(f\"Logger {logger} reconfigured to use {source_handler}\")\n", "path": "aws_lambda_powertools/logging/utils.py"}]}
1,690
212
gh_patches_debug_40727
rasdani/github-patches
git_diff
kserve__kserve-1472
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [AWS] Support IAM Role for Service Account in KFServing /kind feature **Describe the solution you'd like** [A clear and concise description of what you want to happen.] Currently, it needs a S3 credential to download model. We need more fine grain control ways. [IRSA](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) is a better option. This helps us deploy model without credentials **Anything else you would like to add:** [Miscellaneous information that will assist in solving the issue.] </issue> <code> [start of python/kfserving/kfserving/storage.py] 1 # Copyright 2020 kubeflow.org. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import glob 16 import logging 17 import tempfile 18 import mimetypes 19 import os 20 import re 21 import json 22 import shutil 23 import tarfile 24 import zipfile 25 import gzip 26 from urllib.parse import urlparse 27 import requests 28 from azure.storage.blob import BlockBlobService 29 from google.auth import exceptions 30 from google.cloud import storage 31 from minio import Minio 32 from kfserving.kfmodel_repository import MODEL_MOUNT_DIRS 33 34 _GCS_PREFIX = "gs://" 35 _S3_PREFIX = "s3://" 36 _BLOB_RE = "https://(.+?).blob.core.windows.net/(.+)" 37 _LOCAL_PREFIX = "file://" 38 _URI_RE = "https?://(.+)/(.+)" 39 _HTTP_PREFIX = "http(s)://" 40 _HEADERS_SUFFIX = "-headers" 41 42 43 class Storage(object): # pylint: disable=too-few-public-methods 44 @staticmethod 45 def download(uri: str, out_dir: str = None) -> str: 46 logging.info("Copying contents of %s to local", uri) 47 48 is_local = False 49 if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri): 50 is_local = True 51 52 if out_dir is None: 53 if is_local: 54 # noop if out_dir is not set and the path is local 55 return Storage._download_local(uri) 56 out_dir = tempfile.mkdtemp() 57 elif not os.path.exists(out_dir): 58 os.mkdir(out_dir) 59 60 if uri.startswith(_GCS_PREFIX): 61 Storage._download_gcs(uri, out_dir) 62 elif uri.startswith(_S3_PREFIX): 63 Storage._download_s3(uri, out_dir) 64 elif re.search(_BLOB_RE, uri): 65 Storage._download_blob(uri, out_dir) 66 elif is_local: 67 return Storage._download_local(uri, out_dir) 68 elif re.search(_URI_RE, uri): 69 return Storage._download_from_uri(uri, out_dir) 70 elif uri.startswith(MODEL_MOUNT_DIRS): 71 # Don't need to download models if this InferenceService is running in the multi-model 72 # serving mode. The model agent will download models. 73 return out_dir 74 else: 75 raise Exception("Cannot recognize storage type for " + uri + 76 "\n'%s', '%s', '%s', and '%s' are the current available storage type." % 77 (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX, _HTTP_PREFIX)) 78 79 logging.info("Successfully copied %s to %s", uri, out_dir) 80 return out_dir 81 82 @staticmethod 83 def _download_s3(uri, temp_dir: str): 84 client = Storage._create_minio_client() 85 bucket_args = uri.replace(_S3_PREFIX, "", 1).split("/", 1) 86 bucket_name = bucket_args[0] 87 bucket_path = bucket_args[1] if len(bucket_args) > 1 else "" 88 objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True) 89 count = 0 90 for obj in objects: 91 # Replace any prefix from the object key with temp_dir 92 subdir_object_key = obj.object_name.replace(bucket_path, "", 1).strip("/") 93 # fget_object handles directory creation if does not exist 94 if not obj.is_dir: 95 if subdir_object_key == "": 96 subdir_object_key = obj.object_name 97 client.fget_object(bucket_name, obj.object_name, 98 os.path.join(temp_dir, subdir_object_key)) 99 count = count + 1 100 if count == 0: 101 raise RuntimeError("Failed to fetch model. \ 102 The path or model %s does not exist." % (uri)) 103 104 @staticmethod 105 def _download_gcs(uri, temp_dir: str): 106 try: 107 storage_client = storage.Client() 108 except exceptions.DefaultCredentialsError: 109 storage_client = storage.Client.create_anonymous_client() 110 bucket_args = uri.replace(_GCS_PREFIX, "", 1).split("/", 1) 111 bucket_name = bucket_args[0] 112 bucket_path = bucket_args[1] if len(bucket_args) > 1 else "" 113 bucket = storage_client.bucket(bucket_name) 114 prefix = bucket_path 115 if not prefix.endswith("/"): 116 prefix = prefix + "/" 117 blobs = bucket.list_blobs(prefix=prefix) 118 count = 0 119 for blob in blobs: 120 # Replace any prefix from the object key with temp_dir 121 subdir_object_key = blob.name.replace(bucket_path, "", 1).strip("/") 122 123 # Create necessary subdirectory to store the object locally 124 if "/" in subdir_object_key: 125 local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit("/", 1)[0]) 126 if not os.path.isdir(local_object_dir): 127 os.makedirs(local_object_dir, exist_ok=True) 128 if subdir_object_key.strip() != "": 129 dest_path = os.path.join(temp_dir, subdir_object_key) 130 logging.info("Downloading: %s", dest_path) 131 blob.download_to_filename(dest_path) 132 count = count + 1 133 if count == 0: 134 raise RuntimeError("Failed to fetch model. \ 135 The path or model %s does not exist." % uri) 136 137 @staticmethod 138 def _download_blob(uri, out_dir: str): # pylint: disable=too-many-locals 139 match = re.search(_BLOB_RE, uri) 140 account_name = match.group(1) 141 storage_url = match.group(2) 142 container_name, prefix = storage_url.split("/", 1) 143 144 logging.info("Connecting to BLOB account: [%s], container: [%s], prefix: [%s]", 145 account_name, 146 container_name, 147 prefix) 148 try: 149 block_blob_service = BlockBlobService(account_name=account_name) 150 blobs = block_blob_service.list_blobs(container_name, prefix=prefix) 151 except Exception: # pylint: disable=broad-except 152 token = Storage._get_azure_storage_token() 153 if token is None: 154 logging.warning("Azure credentials not found, retrying anonymous access") 155 block_blob_service = BlockBlobService(account_name=account_name, token_credential=token) 156 blobs = block_blob_service.list_blobs(container_name, prefix=prefix) 157 count = 0 158 for blob in blobs: 159 dest_path = os.path.join(out_dir, blob.name) 160 if "/" in blob.name: 161 head, tail = os.path.split(blob.name) 162 if prefix is not None: 163 head = head[len(prefix):] 164 if head.startswith('/'): 165 head = head[1:] 166 dir_path = os.path.join(out_dir, head) 167 dest_path = os.path.join(dir_path, tail) 168 if not os.path.isdir(dir_path): 169 os.makedirs(dir_path) 170 171 logging.info("Downloading: %s to %s", blob.name, dest_path) 172 block_blob_service.get_blob_to_path(container_name, blob.name, dest_path) 173 count = count + 1 174 if count == 0: 175 raise RuntimeError("Failed to fetch model. \ 176 The path or model %s does not exist." % (uri)) 177 178 @staticmethod 179 def _get_azure_storage_token(): 180 tenant_id = os.getenv("AZ_TENANT_ID", "") 181 client_id = os.getenv("AZ_CLIENT_ID", "") 182 client_secret = os.getenv("AZ_CLIENT_SECRET", "") 183 subscription_id = os.getenv("AZ_SUBSCRIPTION_ID", "") 184 185 if tenant_id == "" or client_id == "" or client_secret == "" or subscription_id == "": 186 return None 187 188 # note the SP must have "Storage Blob Data Owner" perms for this to work 189 import adal 190 from azure.storage.common import TokenCredential 191 192 authority_url = "https://login.microsoftonline.com/" + tenant_id 193 194 context = adal.AuthenticationContext(authority_url) 195 196 token = context.acquire_token_with_client_credentials( 197 "https://storage.azure.com/", 198 client_id, 199 client_secret) 200 201 token_credential = TokenCredential(token["accessToken"]) 202 203 logging.info("Retrieved SP token credential for client_id: %s", client_id) 204 205 return token_credential 206 207 @staticmethod 208 def _download_local(uri, out_dir=None): 209 local_path = uri.replace(_LOCAL_PREFIX, "", 1) 210 if not os.path.exists(local_path): 211 raise RuntimeError("Local path %s does not exist." % (uri)) 212 213 if out_dir is None: 214 return local_path 215 elif not os.path.isdir(out_dir): 216 os.makedirs(out_dir) 217 218 if os.path.isdir(local_path): 219 local_path = os.path.join(local_path, "*") 220 221 for src in glob.glob(local_path): 222 _, tail = os.path.split(src) 223 dest_path = os.path.join(out_dir, tail) 224 logging.info("Linking: %s to %s", src, dest_path) 225 os.symlink(src, dest_path) 226 return out_dir 227 228 @staticmethod 229 def _download_from_uri(uri, out_dir=None): 230 url = urlparse(uri) 231 filename = os.path.basename(url.path) 232 mimetype, encoding = mimetypes.guess_type(url.path) 233 local_path = os.path.join(out_dir, filename) 234 235 if filename == '': 236 raise ValueError('No filename contained in URI: %s' % (uri)) 237 238 # Get header information from host url 239 headers = {} 240 host_uri = url.hostname 241 242 headers_json = os.getenv(host_uri + _HEADERS_SUFFIX, "{}") 243 headers = json.loads(headers_json) 244 245 with requests.get(uri, stream=True, headers=headers) as response: 246 if response.status_code != 200: 247 raise RuntimeError("URI: %s returned a %s response code." % (uri, response.status_code)) 248 if mimetype == 'application/zip' and not response.headers.get('Content-Type', '')\ 249 .startswith('application/zip'): 250 raise RuntimeError("URI: %s did not respond with \'Content-Type\': \'application/zip\'" % uri) 251 if mimetype == 'application/x-tar' and not response.headers.get('Content-Type', '')\ 252 .startswith('application/x-tar'): 253 raise RuntimeError("URI: %s did not respond with \'Content-Type\': \'application/x-tar\'" % uri) 254 if (mimetype != 'application/zip' and mimetype != 'application/x-tar') and \ 255 not response.headers.get('Content-Type', '').startswith('application/octet-stream'): 256 raise RuntimeError("URI: %s did not respond with \'Content-Type\': \'application/octet-stream\'" 257 % uri) 258 259 if encoding == 'gzip': 260 stream = gzip.GzipFile(fileobj=response.raw) 261 local_path = os.path.join(out_dir, f'{filename}.tar') 262 else: 263 stream = response.raw 264 with open(local_path, 'wb') as out: 265 shutil.copyfileobj(stream, out) 266 267 if mimetype in ["application/x-tar", "application/zip"]: 268 if mimetype == "application/x-tar": 269 archive = tarfile.open(local_path, 'r', encoding='utf-8') 270 else: 271 archive = zipfile.ZipFile(local_path, 'r') 272 archive.extractall(out_dir) 273 archive.close() 274 os.remove(local_path) 275 276 return out_dir 277 278 @staticmethod 279 def _create_minio_client(): 280 # Adding prefixing "http" in urlparse is necessary for it to be the netloc 281 url = urlparse(os.getenv("AWS_ENDPOINT_URL", "http://s3.amazonaws.com")) 282 use_ssl = url.scheme == 'https' if url.scheme else bool(os.getenv("S3_USE_HTTPS", "true")) 283 return Minio(url.netloc, 284 access_key=os.getenv("AWS_ACCESS_KEY_ID", ""), 285 secret_key=os.getenv("AWS_SECRET_ACCESS_KEY", ""), 286 region=os.getenv("AWS_REGION", ""), 287 secure=use_ssl) 288 [end of python/kfserving/kfserving/storage.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/python/kfserving/kfserving/storage.py b/python/kfserving/kfserving/storage.py --- a/python/kfserving/kfserving/storage.py +++ b/python/kfserving/kfserving/storage.py @@ -13,22 +13,24 @@ # limitations under the License. import glob +import gzip import logging -import tempfile import mimetypes import os import re import json import shutil import tarfile +import tempfile import zipfile -import gzip from urllib.parse import urlparse + +import boto3 import requests from azure.storage.blob import BlockBlobService from google.auth import exceptions from google.cloud import storage -from minio import Minio + from kfserving.kfmodel_repository import MODEL_MOUNT_DIRS _GCS_PREFIX = "gs://" @@ -81,25 +83,27 @@ @staticmethod def _download_s3(uri, temp_dir: str): - client = Storage._create_minio_client() - bucket_args = uri.replace(_S3_PREFIX, "", 1).split("/", 1) - bucket_name = bucket_args[0] - bucket_path = bucket_args[1] if len(bucket_args) > 1 else "" - objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True) - count = 0 - for obj in objects: - # Replace any prefix from the object key with temp_dir - subdir_object_key = obj.object_name.replace(bucket_path, "", 1).strip("/") - # fget_object handles directory creation if does not exist - if not obj.is_dir: - if subdir_object_key == "": - subdir_object_key = obj.object_name - client.fget_object(bucket_name, obj.object_name, - os.path.join(temp_dir, subdir_object_key)) - count = count + 1 - if count == 0: - raise RuntimeError("Failed to fetch model. \ -The path or model %s does not exist." % (uri)) + s3 = boto3.resource('s3', endpoint_url=os.getenv("AWS_ENDPOINT_URL", "http://s3.amazonaws.com")) + parsed = urlparse(uri, scheme='s3') + bucket_name = parsed.netloc + bucket_path = parsed.path.lstrip('/') + + bucket = s3.Bucket(bucket_name) + for obj in bucket.objects.filter(Prefix=bucket_path): + # Skip where boto3 lists the directory as an object + if obj.key.endswith("/"): + continue + # In the case where bucket_path points to a single object, set the target key to bucket_path + # Otherwise, remove the bucket_path prefix, strip any extra slashes, then prepend the target_dir + target_key = ( + obj.key + if bucket_path == obj.key + else obj.key.replace(bucket_path, "", 1).lstrip("/") + ) + target = f"{temp_dir}/{target_key}" + if not os.path.exists(os.path.dirname(target)): + os.makedirs(os.path.dirname(target), exist_ok=True) + bucket.download_file(obj.key, target) @staticmethod def _download_gcs(uri, temp_dir: str): @@ -274,14 +278,3 @@ os.remove(local_path) return out_dir - - @staticmethod - def _create_minio_client(): - # Adding prefixing "http" in urlparse is necessary for it to be the netloc - url = urlparse(os.getenv("AWS_ENDPOINT_URL", "http://s3.amazonaws.com")) - use_ssl = url.scheme == 'https' if url.scheme else bool(os.getenv("S3_USE_HTTPS", "true")) - return Minio(url.netloc, - access_key=os.getenv("AWS_ACCESS_KEY_ID", ""), - secret_key=os.getenv("AWS_SECRET_ACCESS_KEY", ""), - region=os.getenv("AWS_REGION", ""), - secure=use_ssl)
{"golden_diff": "diff --git a/python/kfserving/kfserving/storage.py b/python/kfserving/kfserving/storage.py\n--- a/python/kfserving/kfserving/storage.py\n+++ b/python/kfserving/kfserving/storage.py\n@@ -13,22 +13,24 @@\n # limitations under the License.\n \n import glob\n+import gzip\n import logging\n-import tempfile\n import mimetypes\n import os\n import re\n import json\n import shutil\n import tarfile\n+import tempfile\n import zipfile\n-import gzip\n from urllib.parse import urlparse\n+\n+import boto3\n import requests\n from azure.storage.blob import BlockBlobService\n from google.auth import exceptions\n from google.cloud import storage\n-from minio import Minio\n+\n from kfserving.kfmodel_repository import MODEL_MOUNT_DIRS\n \n _GCS_PREFIX = \"gs://\"\n@@ -81,25 +83,27 @@\n \n @staticmethod\n def _download_s3(uri, temp_dir: str):\n- client = Storage._create_minio_client()\n- bucket_args = uri.replace(_S3_PREFIX, \"\", 1).split(\"/\", 1)\n- bucket_name = bucket_args[0]\n- bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n- objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)\n- count = 0\n- for obj in objects:\n- # Replace any prefix from the object key with temp_dir\n- subdir_object_key = obj.object_name.replace(bucket_path, \"\", 1).strip(\"/\")\n- # fget_object handles directory creation if does not exist\n- if not obj.is_dir:\n- if subdir_object_key == \"\":\n- subdir_object_key = obj.object_name\n- client.fget_object(bucket_name, obj.object_name,\n- os.path.join(temp_dir, subdir_object_key))\n- count = count + 1\n- if count == 0:\n- raise RuntimeError(\"Failed to fetch model. \\\n-The path or model %s does not exist.\" % (uri))\n+ s3 = boto3.resource('s3', endpoint_url=os.getenv(\"AWS_ENDPOINT_URL\", \"http://s3.amazonaws.com\"))\n+ parsed = urlparse(uri, scheme='s3')\n+ bucket_name = parsed.netloc\n+ bucket_path = parsed.path.lstrip('/')\n+\n+ bucket = s3.Bucket(bucket_name)\n+ for obj in bucket.objects.filter(Prefix=bucket_path):\n+ # Skip where boto3 lists the directory as an object\n+ if obj.key.endswith(\"/\"):\n+ continue\n+ # In the case where bucket_path points to a single object, set the target key to bucket_path\n+ # Otherwise, remove the bucket_path prefix, strip any extra slashes, then prepend the target_dir\n+ target_key = (\n+ obj.key\n+ if bucket_path == obj.key\n+ else obj.key.replace(bucket_path, \"\", 1).lstrip(\"/\")\n+ )\n+ target = f\"{temp_dir}/{target_key}\"\n+ if not os.path.exists(os.path.dirname(target)):\n+ os.makedirs(os.path.dirname(target), exist_ok=True)\n+ bucket.download_file(obj.key, target)\n \n @staticmethod\n def _download_gcs(uri, temp_dir: str):\n@@ -274,14 +278,3 @@\n os.remove(local_path)\n \n return out_dir\n-\n- @staticmethod\n- def _create_minio_client():\n- # Adding prefixing \"http\" in urlparse is necessary for it to be the netloc\n- url = urlparse(os.getenv(\"AWS_ENDPOINT_URL\", \"http://s3.amazonaws.com\"))\n- use_ssl = url.scheme == 'https' if url.scheme else bool(os.getenv(\"S3_USE_HTTPS\", \"true\"))\n- return Minio(url.netloc,\n- access_key=os.getenv(\"AWS_ACCESS_KEY_ID\", \"\"),\n- secret_key=os.getenv(\"AWS_SECRET_ACCESS_KEY\", \"\"),\n- region=os.getenv(\"AWS_REGION\", \"\"),\n- secure=use_ssl)\n", "issue": "[AWS] Support IAM Role for Service Account in KFServing\n/kind feature\r\n\r\n**Describe the solution you'd like**\r\n[A clear and concise description of what you want to happen.]\r\nCurrently, it needs a S3 credential to download model. We need more fine grain control ways. [IRSA](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) is a better option. This helps us deploy model without credentials\r\n \r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\n", "before_files": [{"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport logging\nimport tempfile\nimport mimetypes\nimport os\nimport re\nimport json\nimport shutil\nimport tarfile\nimport zipfile\nimport gzip\nfrom urllib.parse import urlparse\nimport requests\nfrom azure.storage.blob import BlockBlobService\nfrom google.auth import exceptions\nfrom google.cloud import storage\nfrom minio import Minio\nfrom kfserving.kfmodel_repository import MODEL_MOUNT_DIRS\n\n_GCS_PREFIX = \"gs://\"\n_S3_PREFIX = \"s3://\"\n_BLOB_RE = \"https://(.+?).blob.core.windows.net/(.+)\"\n_LOCAL_PREFIX = \"file://\"\n_URI_RE = \"https?://(.+)/(.+)\"\n_HTTP_PREFIX = \"http(s)://\"\n_HEADERS_SUFFIX = \"-headers\"\n\n\nclass Storage(object): # pylint: disable=too-few-public-methods\n @staticmethod\n def download(uri: str, out_dir: str = None) -> str:\n logging.info(\"Copying contents of %s to local\", uri)\n\n is_local = False\n if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):\n is_local = True\n\n if out_dir is None:\n if is_local:\n # noop if out_dir is not set and the path is local\n return Storage._download_local(uri)\n out_dir = tempfile.mkdtemp()\n elif not os.path.exists(out_dir):\n os.mkdir(out_dir)\n\n if uri.startswith(_GCS_PREFIX):\n Storage._download_gcs(uri, out_dir)\n elif uri.startswith(_S3_PREFIX):\n Storage._download_s3(uri, out_dir)\n elif re.search(_BLOB_RE, uri):\n Storage._download_blob(uri, out_dir)\n elif is_local:\n return Storage._download_local(uri, out_dir)\n elif re.search(_URI_RE, uri):\n return Storage._download_from_uri(uri, out_dir)\n elif uri.startswith(MODEL_MOUNT_DIRS):\n # Don't need to download models if this InferenceService is running in the multi-model\n # serving mode. The model agent will download models.\n return out_dir\n else:\n raise Exception(\"Cannot recognize storage type for \" + uri +\n \"\\n'%s', '%s', '%s', and '%s' are the current available storage type.\" %\n (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX, _HTTP_PREFIX))\n\n logging.info(\"Successfully copied %s to %s\", uri, out_dir)\n return out_dir\n\n @staticmethod\n def _download_s3(uri, temp_dir: str):\n client = Storage._create_minio_client()\n bucket_args = uri.replace(_S3_PREFIX, \"\", 1).split(\"/\", 1)\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)\n count = 0\n for obj in objects:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = obj.object_name.replace(bucket_path, \"\", 1).strip(\"/\")\n # fget_object handles directory creation if does not exist\n if not obj.is_dir:\n if subdir_object_key == \"\":\n subdir_object_key = obj.object_name\n client.fget_object(bucket_name, obj.object_name,\n os.path.join(temp_dir, subdir_object_key))\n count = count + 1\n if count == 0:\n raise RuntimeError(\"Failed to fetch model. \\\nThe path or model %s does not exist.\" % (uri))\n\n @staticmethod\n def _download_gcs(uri, temp_dir: str):\n try:\n storage_client = storage.Client()\n except exceptions.DefaultCredentialsError:\n storage_client = storage.Client.create_anonymous_client()\n bucket_args = uri.replace(_GCS_PREFIX, \"\", 1).split(\"/\", 1)\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n bucket = storage_client.bucket(bucket_name)\n prefix = bucket_path\n if not prefix.endswith(\"/\"):\n prefix = prefix + \"/\"\n blobs = bucket.list_blobs(prefix=prefix)\n count = 0\n for blob in blobs:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = blob.name.replace(bucket_path, \"\", 1).strip(\"/\")\n\n # Create necessary subdirectory to store the object locally\n if \"/\" in subdir_object_key:\n local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit(\"/\", 1)[0])\n if not os.path.isdir(local_object_dir):\n os.makedirs(local_object_dir, exist_ok=True)\n if subdir_object_key.strip() != \"\":\n dest_path = os.path.join(temp_dir, subdir_object_key)\n logging.info(\"Downloading: %s\", dest_path)\n blob.download_to_filename(dest_path)\n count = count + 1\n if count == 0:\n raise RuntimeError(\"Failed to fetch model. \\\nThe path or model %s does not exist.\" % uri)\n\n @staticmethod\n def _download_blob(uri, out_dir: str): # pylint: disable=too-many-locals\n match = re.search(_BLOB_RE, uri)\n account_name = match.group(1)\n storage_url = match.group(2)\n container_name, prefix = storage_url.split(\"/\", 1)\n\n logging.info(\"Connecting to BLOB account: [%s], container: [%s], prefix: [%s]\",\n account_name,\n container_name,\n prefix)\n try:\n block_blob_service = BlockBlobService(account_name=account_name)\n blobs = block_blob_service.list_blobs(container_name, prefix=prefix)\n except Exception: # pylint: disable=broad-except\n token = Storage._get_azure_storage_token()\n if token is None:\n logging.warning(\"Azure credentials not found, retrying anonymous access\")\n block_blob_service = BlockBlobService(account_name=account_name, token_credential=token)\n blobs = block_blob_service.list_blobs(container_name, prefix=prefix)\n count = 0\n for blob in blobs:\n dest_path = os.path.join(out_dir, blob.name)\n if \"/\" in blob.name:\n head, tail = os.path.split(blob.name)\n if prefix is not None:\n head = head[len(prefix):]\n if head.startswith('/'):\n head = head[1:]\n dir_path = os.path.join(out_dir, head)\n dest_path = os.path.join(dir_path, tail)\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n\n logging.info(\"Downloading: %s to %s\", blob.name, dest_path)\n block_blob_service.get_blob_to_path(container_name, blob.name, dest_path)\n count = count + 1\n if count == 0:\n raise RuntimeError(\"Failed to fetch model. \\\nThe path or model %s does not exist.\" % (uri))\n\n @staticmethod\n def _get_azure_storage_token():\n tenant_id = os.getenv(\"AZ_TENANT_ID\", \"\")\n client_id = os.getenv(\"AZ_CLIENT_ID\", \"\")\n client_secret = os.getenv(\"AZ_CLIENT_SECRET\", \"\")\n subscription_id = os.getenv(\"AZ_SUBSCRIPTION_ID\", \"\")\n\n if tenant_id == \"\" or client_id == \"\" or client_secret == \"\" or subscription_id == \"\":\n return None\n\n # note the SP must have \"Storage Blob Data Owner\" perms for this to work\n import adal\n from azure.storage.common import TokenCredential\n\n authority_url = \"https://login.microsoftonline.com/\" + tenant_id\n\n context = adal.AuthenticationContext(authority_url)\n\n token = context.acquire_token_with_client_credentials(\n \"https://storage.azure.com/\",\n client_id,\n client_secret)\n\n token_credential = TokenCredential(token[\"accessToken\"])\n\n logging.info(\"Retrieved SP token credential for client_id: %s\", client_id)\n\n return token_credential\n\n @staticmethod\n def _download_local(uri, out_dir=None):\n local_path = uri.replace(_LOCAL_PREFIX, \"\", 1)\n if not os.path.exists(local_path):\n raise RuntimeError(\"Local path %s does not exist.\" % (uri))\n\n if out_dir is None:\n return local_path\n elif not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n\n if os.path.isdir(local_path):\n local_path = os.path.join(local_path, \"*\")\n\n for src in glob.glob(local_path):\n _, tail = os.path.split(src)\n dest_path = os.path.join(out_dir, tail)\n logging.info(\"Linking: %s to %s\", src, dest_path)\n os.symlink(src, dest_path)\n return out_dir\n\n @staticmethod\n def _download_from_uri(uri, out_dir=None):\n url = urlparse(uri)\n filename = os.path.basename(url.path)\n mimetype, encoding = mimetypes.guess_type(url.path)\n local_path = os.path.join(out_dir, filename)\n\n if filename == '':\n raise ValueError('No filename contained in URI: %s' % (uri))\n\n # Get header information from host url\n headers = {}\n host_uri = url.hostname\n\n headers_json = os.getenv(host_uri + _HEADERS_SUFFIX, \"{}\")\n headers = json.loads(headers_json)\n\n with requests.get(uri, stream=True, headers=headers) as response:\n if response.status_code != 200:\n raise RuntimeError(\"URI: %s returned a %s response code.\" % (uri, response.status_code))\n if mimetype == 'application/zip' and not response.headers.get('Content-Type', '')\\\n .startswith('application/zip'):\n raise RuntimeError(\"URI: %s did not respond with \\'Content-Type\\': \\'application/zip\\'\" % uri)\n if mimetype == 'application/x-tar' and not response.headers.get('Content-Type', '')\\\n .startswith('application/x-tar'):\n raise RuntimeError(\"URI: %s did not respond with \\'Content-Type\\': \\'application/x-tar\\'\" % uri)\n if (mimetype != 'application/zip' and mimetype != 'application/x-tar') and \\\n not response.headers.get('Content-Type', '').startswith('application/octet-stream'):\n raise RuntimeError(\"URI: %s did not respond with \\'Content-Type\\': \\'application/octet-stream\\'\"\n % uri)\n\n if encoding == 'gzip':\n stream = gzip.GzipFile(fileobj=response.raw)\n local_path = os.path.join(out_dir, f'{filename}.tar')\n else:\n stream = response.raw\n with open(local_path, 'wb') as out:\n shutil.copyfileobj(stream, out)\n\n if mimetype in [\"application/x-tar\", \"application/zip\"]:\n if mimetype == \"application/x-tar\":\n archive = tarfile.open(local_path, 'r', encoding='utf-8')\n else:\n archive = zipfile.ZipFile(local_path, 'r')\n archive.extractall(out_dir)\n archive.close()\n os.remove(local_path)\n\n return out_dir\n\n @staticmethod\n def _create_minio_client():\n # Adding prefixing \"http\" in urlparse is necessary for it to be the netloc\n url = urlparse(os.getenv(\"AWS_ENDPOINT_URL\", \"http://s3.amazonaws.com\"))\n use_ssl = url.scheme == 'https' if url.scheme else bool(os.getenv(\"S3_USE_HTTPS\", \"true\"))\n return Minio(url.netloc,\n access_key=os.getenv(\"AWS_ACCESS_KEY_ID\", \"\"),\n secret_key=os.getenv(\"AWS_SECRET_ACCESS_KEY\", \"\"),\n region=os.getenv(\"AWS_REGION\", \"\"),\n secure=use_ssl)\n", "path": "python/kfserving/kfserving/storage.py"}]}
4,057
868
gh_patches_debug_374
rasdani/github-patches
git_diff
cupy__cupy-2615
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> cupy.where fails for complex arrays The function cupy.where does not work for complex arrays (numpy.where does): ``` import cupy as cp a = cp.arange(5).astype(cp.complex128) b = cp.arange(5).astype(cp.complex128) c = cp.where(a==b,a,b) ``` fails with the error message > TypeError: Wrong type ((<class 'numpy.bool_'>, <class 'numpy.complex128'>, <class 'numpy.complex128'>)) of arguments for cupy_where For `cp.float64`, everything works fine. CuPy Version : 6.4.0 CUDA Root : /usr CUDA Build Version : 8000 CUDA Driver Version : 10020 CUDA Runtime Version : 8000 cuDNN Build Version : 7102 cuDNN Version : 7102 NCCL Build Version : 2213 NCCL Runtime Version : (unknown) </issue> <code> [start of cupy/sorting/search.py] 1 from cupy import core 2 from cupy.core import fusion 3 4 5 def argmax(a, axis=None, dtype=None, out=None, keepdims=False): 6 """Returns the indices of the maximum along an axis. 7 8 Args: 9 a (cupy.ndarray): Array to take argmax. 10 axis (int): Along which axis to find the maximum. ``a`` is flattened by 11 default. 12 dtype: Data type specifier. 13 out (cupy.ndarray): Output array. 14 keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis 15 of length one. 16 17 Returns: 18 cupy.ndarray: The indices of the maximum of ``a`` along an axis. 19 20 .. seealso:: :func:`numpy.argmax` 21 22 """ 23 # TODO(okuta): check type 24 return a.argmax(axis=axis, dtype=dtype, out=out, keepdims=keepdims) 25 26 27 # TODO(okuta): Implement nanargmax 28 29 30 def argmin(a, axis=None, dtype=None, out=None, keepdims=False): 31 """Returns the indices of the minimum along an axis. 32 33 Args: 34 a (cupy.ndarray): Array to take argmin. 35 axis (int): Along which axis to find the minimum. ``a`` is flattened by 36 default. 37 dtype: Data type specifier. 38 out (cupy.ndarray): Output array. 39 keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis 40 of length one. 41 42 Returns: 43 cupy.ndarray: The indices of the minimum of ``a`` along an axis. 44 45 .. seealso:: :func:`numpy.argmin` 46 47 """ 48 # TODO(okuta): check type 49 return a.argmin(axis=axis, dtype=dtype, out=out, keepdims=keepdims) 50 51 52 # TODO(okuta): Implement nanargmin 53 54 55 # TODO(okuta): Implement argwhere 56 57 58 def nonzero(a): 59 """Return the indices of the elements that are non-zero. 60 61 Returns a tuple of arrays, one for each dimension of a, 62 containing the indices of the non-zero elements in that dimension. 63 64 Args: 65 a (cupy.ndarray): array 66 67 Returns: 68 tuple of arrays: Indices of elements that are non-zero. 69 70 .. seealso:: :func:`numpy.nonzero` 71 72 """ 73 assert isinstance(a, core.ndarray) 74 return a.nonzero() 75 76 77 def flatnonzero(a): 78 """Return indices that are non-zero in the flattened version of a. 79 80 This is equivalent to a.ravel().nonzero()[0]. 81 82 Args: 83 a (cupy.ndarray): input array 84 85 Returns: 86 cupy.ndarray: Output array, 87 containing the indices of the elements of a.ravel() that are non-zero. 88 89 .. seealso:: :func:`numpy.flatnonzero` 90 """ 91 assert isinstance(a, core.ndarray) 92 return a.ravel().nonzero()[0] 93 94 95 _where_ufunc = core.create_ufunc( 96 'cupy_where', 97 ('???->?', '?bb->b', '?BB->B', '?hh->h', '?HH->H', '?ii->i', '?II->I', 98 '?ll->l', '?LL->L', '?qq->q', '?QQ->Q', '?ee->e', '?ff->f', 99 # On CUDA 6.5 these combinations don't work correctly (on CUDA >=7.0, it 100 # works). 101 # See issue #551. 102 '?hd->d', '?Hd->d', 103 '?dd->d'), 104 'out0 = in0 ? in1 : in2') 105 106 107 def where(condition, x=None, y=None): 108 """Return elements, either from x or y, depending on condition. 109 110 If only condition is given, return ``condition.nonzero()``. 111 112 Args: 113 condition (cupy.ndarray): When True, take x, otherwise take y. 114 x (cupy.ndarray): Values from which to choose on ``True``. 115 y (cupy.ndarray): Values from which to choose on ``False``. 116 117 Returns: 118 cupy.ndarray: Each element of output contains elements of ``x`` when 119 ``condition`` is ``True``, otherwise elements of ``y``. If only 120 ``condition`` is given, return the tuple ``condition.nonzero()``, 121 the indices where ``condition`` is True. 122 123 .. seealso:: :func:`numpy.where` 124 125 """ 126 127 missing = (x is None, y is None).count(True) 128 129 if missing == 1: 130 raise ValueError('Must provide both \'x\' and \'y\' or neither.') 131 if missing == 2: 132 return nonzero(condition) 133 134 if fusion._is_fusing(): 135 return fusion._call_ufunc(_where_ufunc, condition, x, y) 136 return _where_ufunc(condition.astype('?'), x, y) 137 138 139 # TODO(okuta): Implement searchsorted 140 141 142 # TODO(okuta): Implement extract 143 [end of cupy/sorting/search.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cupy/sorting/search.py b/cupy/sorting/search.py --- a/cupy/sorting/search.py +++ b/cupy/sorting/search.py @@ -100,7 +100,7 @@ # works). # See issue #551. '?hd->d', '?Hd->d', - '?dd->d'), + '?dd->d', '?FF->F', '?DD->D'), 'out0 = in0 ? in1 : in2')
{"golden_diff": "diff --git a/cupy/sorting/search.py b/cupy/sorting/search.py\n--- a/cupy/sorting/search.py\n+++ b/cupy/sorting/search.py\n@@ -100,7 +100,7 @@\n # works).\n # See issue #551.\n '?hd->d', '?Hd->d',\n- '?dd->d'),\n+ '?dd->d', '?FF->F', '?DD->D'),\n 'out0 = in0 ? in1 : in2')\n", "issue": "cupy.where fails for complex arrays\nThe function cupy.where does not work for complex arrays (numpy.where does):\r\n\r\n```\r\nimport cupy as cp\r\na = cp.arange(5).astype(cp.complex128)\r\nb = cp.arange(5).astype(cp.complex128)\r\nc = cp.where(a==b,a,b)\r\n```\r\n\r\nfails with the error message\r\n\r\n> TypeError: Wrong type ((<class 'numpy.bool_'>, <class 'numpy.complex128'>, <class 'numpy.complex128'>)) of arguments for cupy_where\r\n\r\nFor `cp.float64`, everything works fine.\r\n\r\nCuPy Version : 6.4.0\r\nCUDA Root : /usr\r\nCUDA Build Version : 8000\r\nCUDA Driver Version : 10020\r\nCUDA Runtime Version : 8000\r\ncuDNN Build Version : 7102\r\ncuDNN Version : 7102\r\nNCCL Build Version : 2213\r\nNCCL Runtime Version : (unknown)\n", "before_files": [{"content": "from cupy import core\nfrom cupy.core import fusion\n\n\ndef argmax(a, axis=None, dtype=None, out=None, keepdims=False):\n \"\"\"Returns the indices of the maximum along an axis.\n\n Args:\n a (cupy.ndarray): Array to take argmax.\n axis (int): Along which axis to find the maximum. ``a`` is flattened by\n default.\n dtype: Data type specifier.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis\n of length one.\n\n Returns:\n cupy.ndarray: The indices of the maximum of ``a`` along an axis.\n\n .. seealso:: :func:`numpy.argmax`\n\n \"\"\"\n # TODO(okuta): check type\n return a.argmax(axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n\n# TODO(okuta): Implement nanargmax\n\n\ndef argmin(a, axis=None, dtype=None, out=None, keepdims=False):\n \"\"\"Returns the indices of the minimum along an axis.\n\n Args:\n a (cupy.ndarray): Array to take argmin.\n axis (int): Along which axis to find the minimum. ``a`` is flattened by\n default.\n dtype: Data type specifier.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis\n of length one.\n\n Returns:\n cupy.ndarray: The indices of the minimum of ``a`` along an axis.\n\n .. seealso:: :func:`numpy.argmin`\n\n \"\"\"\n # TODO(okuta): check type\n return a.argmin(axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n\n# TODO(okuta): Implement nanargmin\n\n\n# TODO(okuta): Implement argwhere\n\n\ndef nonzero(a):\n \"\"\"Return the indices of the elements that are non-zero.\n\n Returns a tuple of arrays, one for each dimension of a,\n containing the indices of the non-zero elements in that dimension.\n\n Args:\n a (cupy.ndarray): array\n\n Returns:\n tuple of arrays: Indices of elements that are non-zero.\n\n .. seealso:: :func:`numpy.nonzero`\n\n \"\"\"\n assert isinstance(a, core.ndarray)\n return a.nonzero()\n\n\ndef flatnonzero(a):\n \"\"\"Return indices that are non-zero in the flattened version of a.\n\n This is equivalent to a.ravel().nonzero()[0].\n\n Args:\n a (cupy.ndarray): input array\n\n Returns:\n cupy.ndarray: Output array,\n containing the indices of the elements of a.ravel() that are non-zero.\n\n .. seealso:: :func:`numpy.flatnonzero`\n \"\"\"\n assert isinstance(a, core.ndarray)\n return a.ravel().nonzero()[0]\n\n\n_where_ufunc = core.create_ufunc(\n 'cupy_where',\n ('???->?', '?bb->b', '?BB->B', '?hh->h', '?HH->H', '?ii->i', '?II->I',\n '?ll->l', '?LL->L', '?qq->q', '?QQ->Q', '?ee->e', '?ff->f',\n # On CUDA 6.5 these combinations don't work correctly (on CUDA >=7.0, it\n # works).\n # See issue #551.\n '?hd->d', '?Hd->d',\n '?dd->d'),\n 'out0 = in0 ? in1 : in2')\n\n\ndef where(condition, x=None, y=None):\n \"\"\"Return elements, either from x or y, depending on condition.\n\n If only condition is given, return ``condition.nonzero()``.\n\n Args:\n condition (cupy.ndarray): When True, take x, otherwise take y.\n x (cupy.ndarray): Values from which to choose on ``True``.\n y (cupy.ndarray): Values from which to choose on ``False``.\n\n Returns:\n cupy.ndarray: Each element of output contains elements of ``x`` when\n ``condition`` is ``True``, otherwise elements of ``y``. If only\n ``condition`` is given, return the tuple ``condition.nonzero()``,\n the indices where ``condition`` is True.\n\n .. seealso:: :func:`numpy.where`\n\n \"\"\"\n\n missing = (x is None, y is None).count(True)\n\n if missing == 1:\n raise ValueError('Must provide both \\'x\\' and \\'y\\' or neither.')\n if missing == 2:\n return nonzero(condition)\n\n if fusion._is_fusing():\n return fusion._call_ufunc(_where_ufunc, condition, x, y)\n return _where_ufunc(condition.astype('?'), x, y)\n\n\n# TODO(okuta): Implement searchsorted\n\n\n# TODO(okuta): Implement extract\n", "path": "cupy/sorting/search.py"}]}
2,176
113
gh_patches_debug_30952
rasdani/github-patches
git_diff
apluslms__a-plus-576
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> LTI: remove request.host from LTI parameters Remove `request.get_host()` from all LTI parameter calculations. Use `settings.BASE_URL` instead. `request.get_host` comes from the browser, thus user can use it to manipulate the data send as part of the post. These related issues can be fixed in connection or should be created as another issues: - [x] `tool_consumer_instance_guid` this field doesn't seem to include relevant information. it should be created from settings.BASE_URL - [x] `launch_presentation_return_url` this should include link to the page in A+, which will render the launch button (basically to the page this link is rendered at). - [ ] Interesting second part is to include or keep query parameters, so we can request reauthentication by the tool provider (e.g. koodisäilö), which would be sending browser to `{launch_presentation_return_url}?tc_return_url=<ulr>` or such. TODO: raphendyr needs to write down better explanation (i.e. create an issue) - **Moved to an issue #431** </issue> <code> [start of external_services/lti.py] 1 from hashlib import md5 2 from django.conf import settings 3 from django.core.exceptions import PermissionDenied 4 from django.utils.translation import get_language 5 from rest_framework.reverse import reverse 6 from rest_framework.settings import api_settings 7 from oauthlib.common import urldecode 8 from oauthlib.oauth1 import Client, SIGNATURE_HMAC, SIGNATURE_TYPE_BODY, \ 9 SIGNATURE_TYPE_QUERY 10 import json 11 12 from aplus.api import api_reverse 13 from lib.helpers import update_url_params 14 from course.models import Enrollment 15 16 17 class LTIRequest(object): 18 19 def __init__(self, service, user, instance, request, title, context_id=None, link_id=None, add=None, exercise=None): 20 self.service = service 21 course = instance.course 22 # Context and resource parameters. 23 context_id = context_id or (request.get_host() + instance.get_absolute_url()) 24 link_id = link_id or "aplus{:d}".format(service.pk) 25 title = title or link_id 26 27 # Gather user information 28 user_id, given_name, family_name, full_name, email = self.user_info(instance, user) 29 30 # Determine user role. 31 role = "Learner,Student" 32 # Student is not a standard role name, but it has been used here before 33 if course.is_teacher(user): 34 role = "Instructor" 35 elif instance.is_assistant(user): 36 role = "TA,TeachingAssistant" # "TA" is not a standard role 37 38 self.parameters = add or {} 39 self.parameters.update({ 40 41 "lti_version": "LTI-1p0", 42 "lti_message_type": "basic-lti-launch-request", 43 44 "resource_link_id": link_id, 45 "resource_link_title": title, 46 47 # User. 48 "user_id": user_id, 49 "roles": role, 50 "lis_person_name_full": full_name, 51 "lis_person_name_given": given_name, 52 "lis_person_name_family": family_name, 53 "lis_person_contact_email_primary": email, 54 55 # Selected course. 56 "context_id": context_id, 57 "context_title": course.name, 58 "context_label": course.code, 59 60 "launch_presentation_locale": get_language(), 61 "launch_presentation_document_target": 62 "iframe" if exercise and exercise.open_in_iframe else "window", 63 "launch_presentation_return_url": request.scheme + '://' + request.get_host() + instance.get_absolute_url(), 64 65 "tool_consumer_instance_guid": request.get_host() + "/aplus", 66 "tool_consumer_instance_name": "A+ LMS", 67 }) 68 69 if service.api_access: 70 self.parameters.update({ 71 'custom_context_api': settings.BASE_URL + api_reverse("course-detail", kwargs={'course_id': instance.id}), 72 'custom_context_api_id': str(instance.id), 73 'custom_user_api_token': user.userprofile.api_token, 74 }) 75 76 if exercise: 77 # LTI 1.1 Tool Provider may return grades to A+ (Tool Consumer) 78 self.parameters.update({ 79 # Outcome Service requests from the LTI Tool Provider include the 80 # sourcedid from the launch request. It is used to create new submissions 81 # for storing the points of the user. 82 "lis_result_sourcedid": "{}-{}".format(exercise.pk, user_id), 83 # The LTI Tool Provider posts Outcome Service requests to this URL (i.e., points for a submission) 84 "lis_outcome_service_url": reverse('lti-outcomes', request=request, 85 kwargs={'version': api_settings.DEFAULT_VERSION}), 86 }) 87 88 def user_info(self, course_instance, user): 89 if self.service.is_anonymous: 90 # Anonymize user information 91 enrollment = Enrollment.objects.filter(course_instance=course_instance, user_profile=user.userprofile).first() 92 if not enrollment: 93 raise PermissionDenied() 94 # Creates anon name and id for pre-pseudonymisation Enrollments 95 if not (enrollment.anon_name or enrollment.anon_id): 96 # the model's post_save functions take care of the creation 97 enrollment.save() 98 user_id = "a" + enrollment.anon_id # a for anonymous 99 full_name = enrollment.anon_name 100 given_name, sep, family_name = full_name.rpartition(" ") 101 if not given_name: 102 given_name = "Anonymous" 103 email = "anonymous-{}@aplus.invalid".format(enrollment.anon_id) 104 else: 105 user_id = "i" + str(user.pk) # i for internal 106 full_name = "{} {}".format(user.first_name, user.last_name) 107 given_name = user.first_name 108 family_name = user.last_name 109 email = user.email 110 return user_id, given_name, family_name, full_name, email 111 112 def get_checksum_of_parameters(self, only_user_and_course_level_params=False): 113 if only_user_and_course_level_params: 114 # do not include parameters that change between different exercises for the same LTI service 115 included_keys = ( 116 "lti_version", 117 "lti_message_type", 118 "user_id", 119 "lis_person_name_full", 120 "lis_person_contact_email_primary", 121 "context_id", 122 "context_label", 123 "tool_consumer_instance_guid", 124 ) 125 params = [(key, value) for key, value in self.parameters.items() if key in included_keys] 126 else: 127 params = self.parameters.items() 128 sum = md5() 129 for key, value in sorted(params): 130 sum.update("{}={};".format(key, value).encode('utf-8')) 131 return sum.hexdigest() 132 133 def sign_post_parameters(self, url=None): 134 client = Client(self.service.consumer_key, 135 client_secret=self.service.consumer_secret, 136 signature_method=SIGNATURE_HMAC, 137 signature_type=SIGNATURE_TYPE_BODY) 138 uri, headers, body = client.sign(self._get_url(url), 139 http_method="POST", 140 body=self.parameters, 141 headers={"Content-Type": "application/x-www-form-urlencoded"}) 142 return urldecode(body) 143 144 def sign_get_query(self, url=None): 145 client = Client(self.service.consumer_key, 146 client_secret=self.service.consumer_secret, 147 signature_method=SIGNATURE_HMAC, 148 signature_type=SIGNATURE_TYPE_QUERY) 149 uri = update_url_params(self._get_url(url), self.parameters) 150 try: 151 query, headers, body = client.sign(uri, http_method="GET") 152 except ValueError as e: 153 raise ValueError("Invalid url %r for %r: %s" % (uri, self.service, e)) 154 return query 155 156 def _get_url(self, url=None): 157 if url and url.startswith('//') or '://' in url: 158 return url 159 return self.service.get_final_url(url) 160 161 162 class CustomStudentInfoLTIRequest(LTIRequest): 163 164 def __init__(self, service, user, profiles, instance, request, title, context_id=None, link_id=None, add=None, exercise=None): 165 self.service = service 166 self.course_instance = instance 167 parameters = add or {} 168 if not service.is_anonymous: 169 parameters['custom_student_id'] = self.true_student_id(user.userprofile) 170 if len(profiles) > 1: 171 parameters['custom_group_members'] = self.group_json(profiles) 172 super().__init__(service, user, instance, request, title, context_id, link_id, parameters, exercise) 173 174 def true_student_id(self, profile): 175 return profile.student_id or "A{:d}".format(profile.id) 176 177 def group_json(self, profiles): 178 data = [] 179 for profile in profiles: 180 user_id, given_name, family_name, full_name, email = self.user_info(self.course_instance, profile.user) 181 d = { 182 'user': user_id, 183 'given_name': given_name, 184 'family_name': family_name, 185 'full_name': full_name, 186 'email': email, 187 } 188 if not self.service.is_anonymous: 189 d['student_id'] = self.true_student_id(profile) 190 data.append(d) 191 return json.dumps(data) 192 [end of external_services/lti.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/external_services/lti.py b/external_services/lti.py --- a/external_services/lti.py +++ b/external_services/lti.py @@ -1,4 +1,6 @@ from hashlib import md5 +from urllib.parse import urlsplit, urljoin + from django.conf import settings from django.core.exceptions import PermissionDenied from django.utils.translation import get_language @@ -19,8 +21,9 @@ def __init__(self, service, user, instance, request, title, context_id=None, link_id=None, add=None, exercise=None): self.service = service course = instance.course + base_url_parts = urlsplit(settings.BASE_URL) # Context and resource parameters. - context_id = context_id or (request.get_host() + instance.get_absolute_url()) + context_id = context_id or (base_url_parts.netloc + instance.get_absolute_url()) link_id = link_id or "aplus{:d}".format(service.pk) title = title or link_id @@ -60,9 +63,9 @@ "launch_presentation_locale": get_language(), "launch_presentation_document_target": "iframe" if exercise and exercise.open_in_iframe else "window", - "launch_presentation_return_url": request.scheme + '://' + request.get_host() + instance.get_absolute_url(), + "launch_presentation_return_url": urljoin(settings.BASE_URL, instance.get_absolute_url()), - "tool_consumer_instance_guid": request.get_host() + "/aplus", + "tool_consumer_instance_guid": base_url_parts.netloc + "/aplus", "tool_consumer_instance_name": "A+ LMS", })
{"golden_diff": "diff --git a/external_services/lti.py b/external_services/lti.py\n--- a/external_services/lti.py\n+++ b/external_services/lti.py\n@@ -1,4 +1,6 @@\n from hashlib import md5\n+from urllib.parse import urlsplit, urljoin\n+\n from django.conf import settings\n from django.core.exceptions import PermissionDenied\n from django.utils.translation import get_language\n@@ -19,8 +21,9 @@\n def __init__(self, service, user, instance, request, title, context_id=None, link_id=None, add=None, exercise=None):\n self.service = service\n course = instance.course\n+ base_url_parts = urlsplit(settings.BASE_URL)\n # Context and resource parameters.\n- context_id = context_id or (request.get_host() + instance.get_absolute_url())\n+ context_id = context_id or (base_url_parts.netloc + instance.get_absolute_url())\n link_id = link_id or \"aplus{:d}\".format(service.pk)\n title = title or link_id\n \n@@ -60,9 +63,9 @@\n \"launch_presentation_locale\": get_language(),\n \"launch_presentation_document_target\":\n \"iframe\" if exercise and exercise.open_in_iframe else \"window\",\n- \"launch_presentation_return_url\": request.scheme + '://' + request.get_host() + instance.get_absolute_url(),\n+ \"launch_presentation_return_url\": urljoin(settings.BASE_URL, instance.get_absolute_url()),\n \n- \"tool_consumer_instance_guid\": request.get_host() + \"/aplus\",\n+ \"tool_consumer_instance_guid\": base_url_parts.netloc + \"/aplus\",\n \"tool_consumer_instance_name\": \"A+ LMS\",\n })\n", "issue": "LTI: remove request.host from LTI parameters\nRemove `request.get_host()` from all LTI parameter calculations. Use `settings.BASE_URL` instead. `request.get_host` comes from the browser, thus user can use it to manipulate the data send as part of the post.\r\n\r\nThese related issues can be fixed in connection or should be created as another issues:\r\n\r\n- [x] `tool_consumer_instance_guid` this field doesn't seem to include relevant information. it should be created from settings.BASE_URL\r\n- [x] `launch_presentation_return_url` this should include link to the page in A+, which will render the launch button (basically to the page this link is rendered at).\r\n- [ ] Interesting second part is to include or keep query parameters, so we can request reauthentication by the tool provider (e.g. koodis\u00e4il\u00f6), which would be sending browser to `{launch_presentation_return_url}?tc_return_url=<ulr>` or such. TODO: raphendyr needs to write down better explanation (i.e. create an issue) - **Moved to an issue #431**\n", "before_files": [{"content": "from hashlib import md5\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\nfrom django.utils.translation import get_language\nfrom rest_framework.reverse import reverse\nfrom rest_framework.settings import api_settings\nfrom oauthlib.common import urldecode\nfrom oauthlib.oauth1 import Client, SIGNATURE_HMAC, SIGNATURE_TYPE_BODY, \\\n SIGNATURE_TYPE_QUERY\nimport json\n\nfrom aplus.api import api_reverse\nfrom lib.helpers import update_url_params\nfrom course.models import Enrollment\n\n\nclass LTIRequest(object):\n\n def __init__(self, service, user, instance, request, title, context_id=None, link_id=None, add=None, exercise=None):\n self.service = service\n course = instance.course\n # Context and resource parameters.\n context_id = context_id or (request.get_host() + instance.get_absolute_url())\n link_id = link_id or \"aplus{:d}\".format(service.pk)\n title = title or link_id\n\n # Gather user information\n user_id, given_name, family_name, full_name, email = self.user_info(instance, user)\n\n # Determine user role.\n role = \"Learner,Student\"\n # Student is not a standard role name, but it has been used here before\n if course.is_teacher(user):\n role = \"Instructor\"\n elif instance.is_assistant(user):\n role = \"TA,TeachingAssistant\" # \"TA\" is not a standard role\n\n self.parameters = add or {}\n self.parameters.update({\n\n \"lti_version\": \"LTI-1p0\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n\n \"resource_link_id\": link_id,\n \"resource_link_title\": title,\n\n # User.\n \"user_id\": user_id,\n \"roles\": role,\n \"lis_person_name_full\": full_name,\n \"lis_person_name_given\": given_name,\n \"lis_person_name_family\": family_name,\n \"lis_person_contact_email_primary\": email,\n\n # Selected course.\n \"context_id\": context_id,\n \"context_title\": course.name,\n \"context_label\": course.code,\n\n \"launch_presentation_locale\": get_language(),\n \"launch_presentation_document_target\":\n \"iframe\" if exercise and exercise.open_in_iframe else \"window\",\n \"launch_presentation_return_url\": request.scheme + '://' + request.get_host() + instance.get_absolute_url(),\n\n \"tool_consumer_instance_guid\": request.get_host() + \"/aplus\",\n \"tool_consumer_instance_name\": \"A+ LMS\",\n })\n\n if service.api_access:\n self.parameters.update({\n 'custom_context_api': settings.BASE_URL + api_reverse(\"course-detail\", kwargs={'course_id': instance.id}),\n 'custom_context_api_id': str(instance.id),\n 'custom_user_api_token': user.userprofile.api_token,\n })\n\n if exercise:\n # LTI 1.1 Tool Provider may return grades to A+ (Tool Consumer)\n self.parameters.update({\n # Outcome Service requests from the LTI Tool Provider include the\n # sourcedid from the launch request. It is used to create new submissions\n # for storing the points of the user.\n \"lis_result_sourcedid\": \"{}-{}\".format(exercise.pk, user_id),\n # The LTI Tool Provider posts Outcome Service requests to this URL (i.e., points for a submission)\n \"lis_outcome_service_url\": reverse('lti-outcomes', request=request,\n kwargs={'version': api_settings.DEFAULT_VERSION}),\n })\n\n def user_info(self, course_instance, user):\n if self.service.is_anonymous:\n # Anonymize user information\n enrollment = Enrollment.objects.filter(course_instance=course_instance, user_profile=user.userprofile).first()\n if not enrollment:\n raise PermissionDenied()\n # Creates anon name and id for pre-pseudonymisation Enrollments\n if not (enrollment.anon_name or enrollment.anon_id):\n # the model's post_save functions take care of the creation\n enrollment.save()\n user_id = \"a\" + enrollment.anon_id # a for anonymous\n full_name = enrollment.anon_name\n given_name, sep, family_name = full_name.rpartition(\" \")\n if not given_name:\n given_name = \"Anonymous\"\n email = \"anonymous-{}@aplus.invalid\".format(enrollment.anon_id)\n else:\n user_id = \"i\" + str(user.pk) # i for internal\n full_name = \"{} {}\".format(user.first_name, user.last_name)\n given_name = user.first_name\n family_name = user.last_name\n email = user.email\n return user_id, given_name, family_name, full_name, email\n\n def get_checksum_of_parameters(self, only_user_and_course_level_params=False):\n if only_user_and_course_level_params:\n # do not include parameters that change between different exercises for the same LTI service\n included_keys = (\n \"lti_version\",\n \"lti_message_type\",\n \"user_id\",\n \"lis_person_name_full\",\n \"lis_person_contact_email_primary\",\n \"context_id\",\n \"context_label\",\n \"tool_consumer_instance_guid\",\n )\n params = [(key, value) for key, value in self.parameters.items() if key in included_keys]\n else:\n params = self.parameters.items()\n sum = md5()\n for key, value in sorted(params):\n sum.update(\"{}={};\".format(key, value).encode('utf-8'))\n return sum.hexdigest()\n\n def sign_post_parameters(self, url=None):\n client = Client(self.service.consumer_key,\n client_secret=self.service.consumer_secret,\n signature_method=SIGNATURE_HMAC,\n signature_type=SIGNATURE_TYPE_BODY)\n uri, headers, body = client.sign(self._get_url(url),\n http_method=\"POST\",\n body=self.parameters,\n headers={\"Content-Type\": \"application/x-www-form-urlencoded\"})\n return urldecode(body)\n\n def sign_get_query(self, url=None):\n client = Client(self.service.consumer_key,\n client_secret=self.service.consumer_secret,\n signature_method=SIGNATURE_HMAC,\n signature_type=SIGNATURE_TYPE_QUERY)\n uri = update_url_params(self._get_url(url), self.parameters)\n try:\n query, headers, body = client.sign(uri, http_method=\"GET\")\n except ValueError as e:\n raise ValueError(\"Invalid url %r for %r: %s\" % (uri, self.service, e))\n return query\n\n def _get_url(self, url=None):\n if url and url.startswith('//') or '://' in url:\n return url\n return self.service.get_final_url(url)\n\n\nclass CustomStudentInfoLTIRequest(LTIRequest):\n\n def __init__(self, service, user, profiles, instance, request, title, context_id=None, link_id=None, add=None, exercise=None):\n self.service = service\n self.course_instance = instance\n parameters = add or {}\n if not service.is_anonymous:\n parameters['custom_student_id'] = self.true_student_id(user.userprofile)\n if len(profiles) > 1:\n parameters['custom_group_members'] = self.group_json(profiles)\n super().__init__(service, user, instance, request, title, context_id, link_id, parameters, exercise)\n\n def true_student_id(self, profile):\n return profile.student_id or \"A{:d}\".format(profile.id)\n\n def group_json(self, profiles):\n data = []\n for profile in profiles:\n user_id, given_name, family_name, full_name, email = self.user_info(self.course_instance, profile.user)\n d = {\n 'user': user_id,\n 'given_name': given_name,\n 'family_name': family_name,\n 'full_name': full_name,\n 'email': email,\n }\n if not self.service.is_anonymous:\n d['student_id'] = self.true_student_id(profile)\n data.append(d)\n return json.dumps(data)\n", "path": "external_services/lti.py"}]}
2,945
370
gh_patches_debug_162
rasdani/github-patches
git_diff
CTFd__CTFd-796
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Not possible to download files as anonymous user **Environment**: - CTFd Version/Commit: 2.0.0 from master - Operating System: Debian Stretch, Ubuntu 18.04, ... - Web Browser and Version: Firefox 63 **What happened?** * In admin, set visibility of challenges to public * Logout * Open challenge view (`/challenges`) * Click on a challenge with a file * Try to download the file (`/files/c378d661d2c9e103c4409cd4c92d801f/alice_bob.py` * => Error 403 **What did you expect to happen?** * ... * Click on a challenge with a file * Try to download the file * File downloads **How to reproduce your issue** _see above_ **Any associated stack traces or error logs** _none_ </issue> <code> [start of CTFd/__init__.py] 1 import sys 2 import os 3 4 from distutils.version import StrictVersion 5 from flask import Flask 6 from werkzeug.contrib.fixers import ProxyFix 7 from jinja2 import FileSystemLoader 8 from jinja2.sandbox import SandboxedEnvironment 9 from six.moves import input 10 11 from CTFd import utils 12 from CTFd.utils.migrations import migrations, migrate, upgrade, stamp, create_database 13 from CTFd.utils.sessions import CachingSessionInterface 14 from CTFd.utils.updates import update_check 15 from CTFd.utils.initialization import init_request_processors, init_template_filters, init_template_globals 16 from CTFd.utils.events import socketio 17 from CTFd.plugins import init_plugins 18 19 # Hack to support Unicode in Python 2 properly 20 if sys.version_info[0] < 3: 21 reload(sys) 22 sys.setdefaultencoding("utf-8") 23 24 __version__ = '2.0.0' 25 26 27 class CTFdFlask(Flask): 28 def __init__(self, *args, **kwargs): 29 """Overriden Jinja constructor setting a custom jinja_environment""" 30 self.jinja_environment = SandboxedBaseEnvironment 31 self.session_interface = CachingSessionInterface(key_prefix='session') 32 Flask.__init__(self, *args, **kwargs) 33 34 def create_jinja_environment(self): 35 """Overridden jinja environment constructor""" 36 return super(CTFdFlask, self).create_jinja_environment() 37 38 39 class SandboxedBaseEnvironment(SandboxedEnvironment): 40 """SandboxEnvironment that mimics the Flask BaseEnvironment""" 41 def __init__(self, app, **options): 42 if 'loader' not in options: 43 options['loader'] = app.create_global_jinja_loader() 44 # Disable cache entirely so that themes can be switched (#662) 45 # If the cache is enabled, switching themes will cause odd rendering errors 46 SandboxedEnvironment.__init__(self, cache_size=0, **options) 47 self.app = app 48 49 50 class ThemeLoader(FileSystemLoader): 51 """Custom FileSystemLoader that switches themes based on the configuration value""" 52 def __init__(self, searchpath, encoding='utf-8', followlinks=False): 53 super(ThemeLoader, self).__init__(searchpath, encoding, followlinks) 54 self.overriden_templates = {} 55 56 def get_source(self, environment, template): 57 # Check if the template has been overriden 58 if template in self.overriden_templates: 59 return self.overriden_templates[template], template, True 60 61 # Check if the template requested is for the admin panel 62 if template.startswith('admin/'): 63 template = template[6:] # Strip out admin/ 64 template = "/".join(['admin', 'templates', template]) 65 return super(ThemeLoader, self).get_source(environment, template) 66 67 # Load regular theme data 68 theme = utils.get_config('ctf_theme') 69 template = "/".join([theme, 'templates', template]) 70 return super(ThemeLoader, self).get_source(environment, template) 71 72 73 def confirm_upgrade(): 74 if sys.stdin.isatty(): 75 print("/*\\ CTFd has updated and must update the database! /*\\") 76 print("/*\\ Please backup your database before proceeding! /*\\") 77 print("/*\\ CTFd maintainers are not responsible for any data loss! /*\\") 78 if input('Run database migrations (Y/N)').lower().strip() == 'y': 79 return True 80 else: 81 print('/*\\ Ignored database migrations... /*\\') 82 return False 83 else: 84 return True 85 86 87 def run_upgrade(): 88 upgrade() 89 utils.set_config('ctf_version', __version__) 90 91 92 def create_app(config='CTFd.config.Config'): 93 app = CTFdFlask(__name__) 94 with app.app_context(): 95 app.config.from_object(config) 96 97 theme_loader = ThemeLoader(os.path.join(app.root_path, 'themes'), followlinks=True) 98 app.jinja_loader = theme_loader 99 100 from CTFd.models import db, Teams, Solves, Challenges, Fails, Flags, Tags, Files, Tracking 101 102 url = create_database() 103 104 # This allows any changes to the SQLALCHEMY_DATABASE_URI to get pushed back in 105 # This is mostly so we can force MySQL's charset 106 app.config['SQLALCHEMY_DATABASE_URI'] = str(url) 107 108 # Register database 109 db.init_app(app) 110 111 # Register Flask-Migrate 112 migrations.init_app(app, db) 113 114 # Alembic sqlite support is lacking so we should just create_all anyway 115 if url.drivername.startswith('sqlite'): 116 db.create_all() 117 stamp() 118 else: 119 # This creates tables instead of db.create_all() 120 # Allows migrations to happen properly 121 upgrade() 122 123 from CTFd.models import ma 124 125 ma.init_app(app) 126 127 app.db = db 128 app.VERSION = __version__ 129 130 from CTFd.cache import cache 131 132 cache.init_app(app) 133 app.cache = cache 134 135 # If you have multiple workers you must have a shared cache 136 socketio.init_app( 137 app, 138 async_mode=app.config.get('SOCKETIO_ASYNC_MODE'), 139 message_queue=app.config.get('CACHE_REDIS_URL') 140 ) 141 142 if app.config.get('REVERSE_PROXY'): 143 app.wsgi_app = ProxyFix(app.wsgi_app) 144 145 version = utils.get_config('ctf_version') 146 147 # Upgrading from an older version of CTFd 148 if version and (StrictVersion(version) < StrictVersion(__version__)): 149 if confirm_upgrade(): 150 run_upgrade() 151 else: 152 exit() 153 154 if not version: 155 utils.set_config('ctf_version', __version__) 156 157 if not utils.get_config('ctf_theme'): 158 utils.set_config('ctf_theme', 'core') 159 160 update_check(force=True) 161 162 init_request_processors(app) 163 init_template_filters(app) 164 init_template_globals(app) 165 166 # Importing here allows tests to use sensible names (e.g. api instead of api_bp) 167 from CTFd.views import views 168 from CTFd.teams import teams 169 from CTFd.users import users 170 from CTFd.challenges import challenges 171 from CTFd.scoreboard import scoreboard 172 from CTFd.auth import auth 173 from CTFd.admin import admin 174 from CTFd.api import api 175 from CTFd.events import events 176 from CTFd.errors import page_not_found, forbidden, general_error, gateway_error 177 178 app.register_blueprint(views) 179 app.register_blueprint(teams) 180 app.register_blueprint(users) 181 app.register_blueprint(challenges) 182 app.register_blueprint(scoreboard) 183 app.register_blueprint(auth) 184 app.register_blueprint(api) 185 app.register_blueprint(events) 186 187 app.register_blueprint(admin) 188 189 app.register_error_handler(404, page_not_found) 190 app.register_error_handler(403, forbidden) 191 app.register_error_handler(500, general_error) 192 app.register_error_handler(502, gateway_error) 193 194 init_plugins(app) 195 196 return app 197 [end of CTFd/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/CTFd/__init__.py b/CTFd/__init__.py --- a/CTFd/__init__.py +++ b/CTFd/__init__.py @@ -21,7 +21,7 @@ reload(sys) sys.setdefaultencoding("utf-8") -__version__ = '2.0.0' +__version__ = '2.0.1' class CTFdFlask(Flask):
{"golden_diff": "diff --git a/CTFd/__init__.py b/CTFd/__init__.py\n--- a/CTFd/__init__.py\n+++ b/CTFd/__init__.py\n@@ -21,7 +21,7 @@\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n \n-__version__ = '2.0.0'\n+__version__ = '2.0.1'\n \n \n class CTFdFlask(Flask):\n", "issue": "Not possible to download files as anonymous user\n**Environment**:\r\n\r\n - CTFd Version/Commit: 2.0.0 from master\r\n - Operating System: Debian Stretch, Ubuntu 18.04, ...\r\n - Web Browser and Version: Firefox 63\r\n\r\n**What happened?**\r\n\r\n* In admin, set visibility of challenges to public\r\n* Logout\r\n* Open challenge view (`/challenges`)\r\n* Click on a challenge with a file\r\n* Try to download the file (`/files/c378d661d2c9e103c4409cd4c92d801f/alice_bob.py`\r\n* => Error 403\r\n\r\n**What did you expect to happen?**\r\n\r\n* ...\r\n* Click on a challenge with a file\r\n* Try to download the file\r\n* File downloads\r\n\r\n**How to reproduce your issue**\r\n\r\n_see above_\r\n\r\n**Any associated stack traces or error logs**\r\n\r\n_none_\n", "before_files": [{"content": "import sys\nimport os\n\nfrom distutils.version import StrictVersion\nfrom flask import Flask\nfrom werkzeug.contrib.fixers import ProxyFix\nfrom jinja2 import FileSystemLoader\nfrom jinja2.sandbox import SandboxedEnvironment\nfrom six.moves import input\n\nfrom CTFd import utils\nfrom CTFd.utils.migrations import migrations, migrate, upgrade, stamp, create_database\nfrom CTFd.utils.sessions import CachingSessionInterface\nfrom CTFd.utils.updates import update_check\nfrom CTFd.utils.initialization import init_request_processors, init_template_filters, init_template_globals\nfrom CTFd.utils.events import socketio\nfrom CTFd.plugins import init_plugins\n\n# Hack to support Unicode in Python 2 properly\nif sys.version_info[0] < 3:\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n\n__version__ = '2.0.0'\n\n\nclass CTFdFlask(Flask):\n def __init__(self, *args, **kwargs):\n \"\"\"Overriden Jinja constructor setting a custom jinja_environment\"\"\"\n self.jinja_environment = SandboxedBaseEnvironment\n self.session_interface = CachingSessionInterface(key_prefix='session')\n Flask.__init__(self, *args, **kwargs)\n\n def create_jinja_environment(self):\n \"\"\"Overridden jinja environment constructor\"\"\"\n return super(CTFdFlask, self).create_jinja_environment()\n\n\nclass SandboxedBaseEnvironment(SandboxedEnvironment):\n \"\"\"SandboxEnvironment that mimics the Flask BaseEnvironment\"\"\"\n def __init__(self, app, **options):\n if 'loader' not in options:\n options['loader'] = app.create_global_jinja_loader()\n # Disable cache entirely so that themes can be switched (#662)\n # If the cache is enabled, switching themes will cause odd rendering errors\n SandboxedEnvironment.__init__(self, cache_size=0, **options)\n self.app = app\n\n\nclass ThemeLoader(FileSystemLoader):\n \"\"\"Custom FileSystemLoader that switches themes based on the configuration value\"\"\"\n def __init__(self, searchpath, encoding='utf-8', followlinks=False):\n super(ThemeLoader, self).__init__(searchpath, encoding, followlinks)\n self.overriden_templates = {}\n\n def get_source(self, environment, template):\n # Check if the template has been overriden\n if template in self.overriden_templates:\n return self.overriden_templates[template], template, True\n\n # Check if the template requested is for the admin panel\n if template.startswith('admin/'):\n template = template[6:] # Strip out admin/\n template = \"/\".join(['admin', 'templates', template])\n return super(ThemeLoader, self).get_source(environment, template)\n\n # Load regular theme data\n theme = utils.get_config('ctf_theme')\n template = \"/\".join([theme, 'templates', template])\n return super(ThemeLoader, self).get_source(environment, template)\n\n\ndef confirm_upgrade():\n if sys.stdin.isatty():\n print(\"/*\\\\ CTFd has updated and must update the database! /*\\\\\")\n print(\"/*\\\\ Please backup your database before proceeding! /*\\\\\")\n print(\"/*\\\\ CTFd maintainers are not responsible for any data loss! /*\\\\\")\n if input('Run database migrations (Y/N)').lower().strip() == 'y':\n return True\n else:\n print('/*\\\\ Ignored database migrations... /*\\\\')\n return False\n else:\n return True\n\n\ndef run_upgrade():\n upgrade()\n utils.set_config('ctf_version', __version__)\n\n\ndef create_app(config='CTFd.config.Config'):\n app = CTFdFlask(__name__)\n with app.app_context():\n app.config.from_object(config)\n\n theme_loader = ThemeLoader(os.path.join(app.root_path, 'themes'), followlinks=True)\n app.jinja_loader = theme_loader\n\n from CTFd.models import db, Teams, Solves, Challenges, Fails, Flags, Tags, Files, Tracking\n\n url = create_database()\n\n # This allows any changes to the SQLALCHEMY_DATABASE_URI to get pushed back in\n # This is mostly so we can force MySQL's charset\n app.config['SQLALCHEMY_DATABASE_URI'] = str(url)\n\n # Register database\n db.init_app(app)\n\n # Register Flask-Migrate\n migrations.init_app(app, db)\n\n # Alembic sqlite support is lacking so we should just create_all anyway\n if url.drivername.startswith('sqlite'):\n db.create_all()\n stamp()\n else:\n # This creates tables instead of db.create_all()\n # Allows migrations to happen properly\n upgrade()\n\n from CTFd.models import ma\n\n ma.init_app(app)\n\n app.db = db\n app.VERSION = __version__\n\n from CTFd.cache import cache\n\n cache.init_app(app)\n app.cache = cache\n\n # If you have multiple workers you must have a shared cache\n socketio.init_app(\n app,\n async_mode=app.config.get('SOCKETIO_ASYNC_MODE'),\n message_queue=app.config.get('CACHE_REDIS_URL')\n )\n\n if app.config.get('REVERSE_PROXY'):\n app.wsgi_app = ProxyFix(app.wsgi_app)\n\n version = utils.get_config('ctf_version')\n\n # Upgrading from an older version of CTFd\n if version and (StrictVersion(version) < StrictVersion(__version__)):\n if confirm_upgrade():\n run_upgrade()\n else:\n exit()\n\n if not version:\n utils.set_config('ctf_version', __version__)\n\n if not utils.get_config('ctf_theme'):\n utils.set_config('ctf_theme', 'core')\n\n update_check(force=True)\n\n init_request_processors(app)\n init_template_filters(app)\n init_template_globals(app)\n\n # Importing here allows tests to use sensible names (e.g. api instead of api_bp)\n from CTFd.views import views\n from CTFd.teams import teams\n from CTFd.users import users\n from CTFd.challenges import challenges\n from CTFd.scoreboard import scoreboard\n from CTFd.auth import auth\n from CTFd.admin import admin\n from CTFd.api import api\n from CTFd.events import events\n from CTFd.errors import page_not_found, forbidden, general_error, gateway_error\n\n app.register_blueprint(views)\n app.register_blueprint(teams)\n app.register_blueprint(users)\n app.register_blueprint(challenges)\n app.register_blueprint(scoreboard)\n app.register_blueprint(auth)\n app.register_blueprint(api)\n app.register_blueprint(events)\n\n app.register_blueprint(admin)\n\n app.register_error_handler(404, page_not_found)\n app.register_error_handler(403, forbidden)\n app.register_error_handler(500, general_error)\n app.register_error_handler(502, gateway_error)\n\n init_plugins(app)\n\n return app\n", "path": "CTFd/__init__.py"}]}
2,754
100
gh_patches_debug_26189
rasdani/github-patches
git_diff
pytorch__vision-870
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Invalid hash error on ImageNet dataset In my environment, md5 value of `meta.bin` used in ImageNet dataset is different from the value defined in `imagenet.py`. `meta.bin` is generated by `torch.save` in the code. I found python2 and3 generate different files. md5sum hashes are as follows. - (defined) `7e0d3cf156177e4fc47011cdd30ce706` - (Python 2.7.16, Ubuntu) `a36fd93cf3900286d99e24ad0a73ce04` - (Python 3.7.3, Ubuntu) `ca981e8aac175178e80e7949d90ee85c` https://github.com/pytorch/vision/blob/9a481d0bec2700763a799ff148fe2e083b575441/torchvision/datasets/imagenet.py#L23-L26 https://github.com/pytorch/vision/blob/9a481d0bec2700763a799ff148fe2e083b575441/torchvision/datasets/imagenet.py#L117-L118 </issue> <code> [start of torchvision/datasets/imagenet.py] 1 from __future__ import print_function 2 import os 3 import shutil 4 import torch 5 from .folder import ImageFolder 6 from .utils import check_integrity, download_url 7 8 ARCHIVE_DICT = { 9 'train': { 10 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_train.tar', 11 'md5': '1d675b47d978889d74fa0da5fadfb00e', 12 }, 13 'val': { 14 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_val.tar', 15 'md5': '29b22e2961454d5413ddabcf34fc5622', 16 }, 17 'devkit': { 18 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz', 19 'md5': 'fa75699e90414af021442c21a62c3abf', 20 } 21 } 22 23 META_DICT = { 24 'filename': 'meta.bin', 25 'md5': '7e0d3cf156177e4fc47011cdd30ce706', 26 } 27 28 29 class ImageNet(ImageFolder): 30 """`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset. 31 32 Args: 33 root (string): Root directory of the ImageNet Dataset. 34 split (string, optional): The dataset split, supports ``train``, or ``val``. 35 download (bool, optional): If true, downloads the dataset from the internet and 36 puts it in root directory. If dataset is already downloaded, it is not 37 downloaded again. 38 transform (callable, optional): A function/transform that takes in an PIL image 39 and returns a transformed version. E.g, ``transforms.RandomCrop`` 40 target_transform (callable, optional): A function/transform that takes in the 41 target and transforms it. 42 loader (callable, optional): A function to load an image given its path. 43 44 Attributes: 45 classes (list): List of the class names. 46 class_to_idx (dict): Dict with items (class_name, class_index). 47 wnids (list): List of the WordNet IDs. 48 wnid_to_idx (dict): Dict with items (wordnet_id, class_index). 49 imgs (list): List of (image path, class_index) tuples 50 targets (list): The class_index value for each image in the dataset 51 """ 52 53 def __init__(self, root, split='train', download=False, **kwargs): 54 root = self.root = os.path.expanduser(root) 55 self.split = self._verify_split(split) 56 57 if download: 58 self.download() 59 wnid_to_classes = self._load_meta_file()[0] 60 61 super(ImageNet, self).__init__(self.split_folder, **kwargs) 62 self.root = root 63 64 idcs = [idx for _, idx in self.imgs] 65 self.wnids = self.classes 66 self.wnid_to_idx = {wnid: idx for idx, wnid in zip(idcs, self.wnids)} 67 self.classes = [wnid_to_classes[wnid] for wnid in self.wnids] 68 self.class_to_idx = {cls: idx 69 for clss, idx in zip(self.classes, idcs) 70 for cls in clss} 71 72 def download(self): 73 if not self._check_meta_file_integrity(): 74 tmpdir = os.path.join(self.root, 'tmp') 75 76 archive_dict = ARCHIVE_DICT['devkit'] 77 download_and_extract_tar(archive_dict['url'], self.root, 78 extract_root=tmpdir, 79 md5=archive_dict['md5']) 80 devkit_folder = _splitexts(os.path.basename(archive_dict['url']))[0] 81 meta = parse_devkit(os.path.join(tmpdir, devkit_folder)) 82 self._save_meta_file(*meta) 83 84 shutil.rmtree(tmpdir) 85 86 if not os.path.isdir(self.split_folder): 87 archive_dict = ARCHIVE_DICT[self.split] 88 download_and_extract_tar(archive_dict['url'], self.root, 89 extract_root=self.split_folder, 90 md5=archive_dict['md5']) 91 92 if self.split == 'train': 93 prepare_train_folder(self.split_folder) 94 elif self.split == 'val': 95 val_wnids = self._load_meta_file()[1] 96 prepare_val_folder(self.split_folder, val_wnids) 97 else: 98 msg = ("You set download=True, but a folder '{}' already exist in " 99 "the root directory. If you want to re-download or re-extract the " 100 "archive, delete the folder.") 101 print(msg.format(self.split)) 102 103 @property 104 def meta_file(self): 105 return os.path.join(self.root, META_DICT['filename']) 106 107 def _check_meta_file_integrity(self): 108 return check_integrity(self.meta_file, META_DICT['md5']) 109 110 def _load_meta_file(self): 111 if self._check_meta_file_integrity(): 112 return torch.load(self.meta_file) 113 else: 114 raise RuntimeError("Meta file not found or corrupted.", 115 "You can use download=True to create it.") 116 117 def _save_meta_file(self, wnid_to_class, val_wnids): 118 torch.save((wnid_to_class, val_wnids), self.meta_file) 119 120 def _verify_split(self, split): 121 if split not in self.valid_splits: 122 msg = "Unknown split {} .".format(split) 123 msg += "Valid splits are {{}}.".format(", ".join(self.valid_splits)) 124 raise ValueError(msg) 125 return split 126 127 @property 128 def valid_splits(self): 129 return 'train', 'val' 130 131 @property 132 def split_folder(self): 133 return os.path.join(self.root, self.split) 134 135 def extra_repr(self): 136 return "Split: {split}".format(**self.__dict__) 137 138 139 def extract_tar(src, dest=None, gzip=None, delete=False): 140 import tarfile 141 142 if dest is None: 143 dest = os.path.dirname(src) 144 if gzip is None: 145 gzip = src.lower().endswith('.gz') 146 147 mode = 'r:gz' if gzip else 'r' 148 with tarfile.open(src, mode) as tarfh: 149 tarfh.extractall(path=dest) 150 151 if delete: 152 os.remove(src) 153 154 155 def download_and_extract_tar(url, download_root, extract_root=None, filename=None, 156 md5=None, **kwargs): 157 download_root = os.path.expanduser(download_root) 158 if extract_root is None: 159 extract_root = download_root 160 if filename is None: 161 filename = os.path.basename(url) 162 163 if not check_integrity(os.path.join(download_root, filename), md5): 164 download_url(url, download_root, filename=filename, md5=md5) 165 166 extract_tar(os.path.join(download_root, filename), extract_root, **kwargs) 167 168 169 def parse_devkit(root): 170 idx_to_wnid, wnid_to_classes = parse_meta(root) 171 val_idcs = parse_val_groundtruth(root) 172 val_wnids = [idx_to_wnid[idx] for idx in val_idcs] 173 return wnid_to_classes, val_wnids 174 175 176 def parse_meta(devkit_root, path='data', filename='meta.mat'): 177 import scipy.io as sio 178 179 metafile = os.path.join(devkit_root, path, filename) 180 meta = sio.loadmat(metafile, squeeze_me=True)['synsets'] 181 nums_children = list(zip(*meta))[4] 182 meta = [meta[idx] for idx, num_children in enumerate(nums_children) 183 if num_children == 0] 184 idcs, wnids, classes = list(zip(*meta))[:3] 185 classes = [tuple(clss.split(', ')) for clss in classes] 186 idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)} 187 wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)} 188 return idx_to_wnid, wnid_to_classes 189 190 191 def parse_val_groundtruth(devkit_root, path='data', 192 filename='ILSVRC2012_validation_ground_truth.txt'): 193 with open(os.path.join(devkit_root, path, filename), 'r') as txtfh: 194 val_idcs = txtfh.readlines() 195 return [int(val_idx) for val_idx in val_idcs] 196 197 198 def prepare_train_folder(folder): 199 for archive in [os.path.join(folder, archive) for archive in os.listdir(folder)]: 200 extract_tar(archive, os.path.splitext(archive)[0], delete=True) 201 202 203 def prepare_val_folder(folder, wnids): 204 img_files = sorted([os.path.join(folder, file) for file in os.listdir(folder)]) 205 206 for wnid in set(wnids): 207 os.mkdir(os.path.join(folder, wnid)) 208 209 for wnid, img_file in zip(wnids, img_files): 210 shutil.move(img_file, os.path.join(folder, wnid, os.path.basename(img_file))) 211 212 213 def _splitexts(root): 214 exts = [] 215 ext = '.' 216 while ext: 217 root, ext = os.path.splitext(root) 218 exts.append(ext) 219 return root, ''.join(reversed(exts)) 220 [end of torchvision/datasets/imagenet.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchvision/datasets/imagenet.py b/torchvision/datasets/imagenet.py --- a/torchvision/datasets/imagenet.py +++ b/torchvision/datasets/imagenet.py @@ -20,11 +20,6 @@ } } -META_DICT = { - 'filename': 'meta.bin', - 'md5': '7e0d3cf156177e4fc47011cdd30ce706', -} - class ImageNet(ImageFolder): """`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset. @@ -70,7 +65,7 @@ for cls in clss} def download(self): - if not self._check_meta_file_integrity(): + if not check_integrity(self.meta_file): tmpdir = os.path.join(self.root, 'tmp') archive_dict = ARCHIVE_DICT['devkit'] @@ -102,13 +97,10 @@ @property def meta_file(self): - return os.path.join(self.root, META_DICT['filename']) - - def _check_meta_file_integrity(self): - return check_integrity(self.meta_file, META_DICT['md5']) + return os.path.join(self.root, 'meta.bin') def _load_meta_file(self): - if self._check_meta_file_integrity(): + if check_integrity(self.meta_file): return torch.load(self.meta_file) else: raise RuntimeError("Meta file not found or corrupted.",
{"golden_diff": "diff --git a/torchvision/datasets/imagenet.py b/torchvision/datasets/imagenet.py\n--- a/torchvision/datasets/imagenet.py\n+++ b/torchvision/datasets/imagenet.py\n@@ -20,11 +20,6 @@\n }\n }\n \n-META_DICT = {\n- 'filename': 'meta.bin',\n- 'md5': '7e0d3cf156177e4fc47011cdd30ce706',\n-}\n-\n \n class ImageNet(ImageFolder):\n \"\"\"`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.\n@@ -70,7 +65,7 @@\n for cls in clss}\n \n def download(self):\n- if not self._check_meta_file_integrity():\n+ if not check_integrity(self.meta_file):\n tmpdir = os.path.join(self.root, 'tmp')\n \n archive_dict = ARCHIVE_DICT['devkit']\n@@ -102,13 +97,10 @@\n \n @property\n def meta_file(self):\n- return os.path.join(self.root, META_DICT['filename'])\n-\n- def _check_meta_file_integrity(self):\n- return check_integrity(self.meta_file, META_DICT['md5'])\n+ return os.path.join(self.root, 'meta.bin')\n \n def _load_meta_file(self):\n- if self._check_meta_file_integrity():\n+ if check_integrity(self.meta_file):\n return torch.load(self.meta_file)\n else:\n raise RuntimeError(\"Meta file not found or corrupted.\",\n", "issue": "Invalid hash error on ImageNet dataset\nIn my environment, md5 value of `meta.bin` used in ImageNet dataset is different from the value defined in `imagenet.py`.\r\n\r\n`meta.bin` is generated by `torch.save` in the code. I found python2 and3 generate different files.\r\n\r\nmd5sum hashes are as follows.\r\n\r\n- (defined) `7e0d3cf156177e4fc47011cdd30ce706`\r\n- (Python 2.7.16, Ubuntu) `a36fd93cf3900286d99e24ad0a73ce04`\r\n- (Python 3.7.3, Ubuntu) `ca981e8aac175178e80e7949d90ee85c`\r\n\r\nhttps://github.com/pytorch/vision/blob/9a481d0bec2700763a799ff148fe2e083b575441/torchvision/datasets/imagenet.py#L23-L26\r\n\r\nhttps://github.com/pytorch/vision/blob/9a481d0bec2700763a799ff148fe2e083b575441/torchvision/datasets/imagenet.py#L117-L118\n", "before_files": [{"content": "from __future__ import print_function\nimport os\nimport shutil\nimport torch\nfrom .folder import ImageFolder\nfrom .utils import check_integrity, download_url\n\nARCHIVE_DICT = {\n 'train': {\n 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_train.tar',\n 'md5': '1d675b47d978889d74fa0da5fadfb00e',\n },\n 'val': {\n 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_val.tar',\n 'md5': '29b22e2961454d5413ddabcf34fc5622',\n },\n 'devkit': {\n 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz',\n 'md5': 'fa75699e90414af021442c21a62c3abf',\n }\n}\n\nMETA_DICT = {\n 'filename': 'meta.bin',\n 'md5': '7e0d3cf156177e4fc47011cdd30ce706',\n}\n\n\nclass ImageNet(ImageFolder):\n \"\"\"`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.\n\n Args:\n root (string): Root directory of the ImageNet Dataset.\n split (string, optional): The dataset split, supports ``train``, or ``val``.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n\n Attributes:\n classes (list): List of the class names.\n class_to_idx (dict): Dict with items (class_name, class_index).\n wnids (list): List of the WordNet IDs.\n wnid_to_idx (dict): Dict with items (wordnet_id, class_index).\n imgs (list): List of (image path, class_index) tuples\n targets (list): The class_index value for each image in the dataset\n \"\"\"\n\n def __init__(self, root, split='train', download=False, **kwargs):\n root = self.root = os.path.expanduser(root)\n self.split = self._verify_split(split)\n\n if download:\n self.download()\n wnid_to_classes = self._load_meta_file()[0]\n\n super(ImageNet, self).__init__(self.split_folder, **kwargs)\n self.root = root\n\n idcs = [idx for _, idx in self.imgs]\n self.wnids = self.classes\n self.wnid_to_idx = {wnid: idx for idx, wnid in zip(idcs, self.wnids)}\n self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]\n self.class_to_idx = {cls: idx\n for clss, idx in zip(self.classes, idcs)\n for cls in clss}\n\n def download(self):\n if not self._check_meta_file_integrity():\n tmpdir = os.path.join(self.root, 'tmp')\n\n archive_dict = ARCHIVE_DICT['devkit']\n download_and_extract_tar(archive_dict['url'], self.root,\n extract_root=tmpdir,\n md5=archive_dict['md5'])\n devkit_folder = _splitexts(os.path.basename(archive_dict['url']))[0]\n meta = parse_devkit(os.path.join(tmpdir, devkit_folder))\n self._save_meta_file(*meta)\n\n shutil.rmtree(tmpdir)\n\n if not os.path.isdir(self.split_folder):\n archive_dict = ARCHIVE_DICT[self.split]\n download_and_extract_tar(archive_dict['url'], self.root,\n extract_root=self.split_folder,\n md5=archive_dict['md5'])\n\n if self.split == 'train':\n prepare_train_folder(self.split_folder)\n elif self.split == 'val':\n val_wnids = self._load_meta_file()[1]\n prepare_val_folder(self.split_folder, val_wnids)\n else:\n msg = (\"You set download=True, but a folder '{}' already exist in \"\n \"the root directory. If you want to re-download or re-extract the \"\n \"archive, delete the folder.\")\n print(msg.format(self.split))\n\n @property\n def meta_file(self):\n return os.path.join(self.root, META_DICT['filename'])\n\n def _check_meta_file_integrity(self):\n return check_integrity(self.meta_file, META_DICT['md5'])\n\n def _load_meta_file(self):\n if self._check_meta_file_integrity():\n return torch.load(self.meta_file)\n else:\n raise RuntimeError(\"Meta file not found or corrupted.\",\n \"You can use download=True to create it.\")\n\n def _save_meta_file(self, wnid_to_class, val_wnids):\n torch.save((wnid_to_class, val_wnids), self.meta_file)\n\n def _verify_split(self, split):\n if split not in self.valid_splits:\n msg = \"Unknown split {} .\".format(split)\n msg += \"Valid splits are {{}}.\".format(\", \".join(self.valid_splits))\n raise ValueError(msg)\n return split\n\n @property\n def valid_splits(self):\n return 'train', 'val'\n\n @property\n def split_folder(self):\n return os.path.join(self.root, self.split)\n\n def extra_repr(self):\n return \"Split: {split}\".format(**self.__dict__)\n\n\ndef extract_tar(src, dest=None, gzip=None, delete=False):\n import tarfile\n\n if dest is None:\n dest = os.path.dirname(src)\n if gzip is None:\n gzip = src.lower().endswith('.gz')\n\n mode = 'r:gz' if gzip else 'r'\n with tarfile.open(src, mode) as tarfh:\n tarfh.extractall(path=dest)\n\n if delete:\n os.remove(src)\n\n\ndef download_and_extract_tar(url, download_root, extract_root=None, filename=None,\n md5=None, **kwargs):\n download_root = os.path.expanduser(download_root)\n if extract_root is None:\n extract_root = download_root\n if filename is None:\n filename = os.path.basename(url)\n\n if not check_integrity(os.path.join(download_root, filename), md5):\n download_url(url, download_root, filename=filename, md5=md5)\n\n extract_tar(os.path.join(download_root, filename), extract_root, **kwargs)\n\n\ndef parse_devkit(root):\n idx_to_wnid, wnid_to_classes = parse_meta(root)\n val_idcs = parse_val_groundtruth(root)\n val_wnids = [idx_to_wnid[idx] for idx in val_idcs]\n return wnid_to_classes, val_wnids\n\n\ndef parse_meta(devkit_root, path='data', filename='meta.mat'):\n import scipy.io as sio\n\n metafile = os.path.join(devkit_root, path, filename)\n meta = sio.loadmat(metafile, squeeze_me=True)['synsets']\n nums_children = list(zip(*meta))[4]\n meta = [meta[idx] for idx, num_children in enumerate(nums_children)\n if num_children == 0]\n idcs, wnids, classes = list(zip(*meta))[:3]\n classes = [tuple(clss.split(', ')) for clss in classes]\n idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)}\n wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)}\n return idx_to_wnid, wnid_to_classes\n\n\ndef parse_val_groundtruth(devkit_root, path='data',\n filename='ILSVRC2012_validation_ground_truth.txt'):\n with open(os.path.join(devkit_root, path, filename), 'r') as txtfh:\n val_idcs = txtfh.readlines()\n return [int(val_idx) for val_idx in val_idcs]\n\n\ndef prepare_train_folder(folder):\n for archive in [os.path.join(folder, archive) for archive in os.listdir(folder)]:\n extract_tar(archive, os.path.splitext(archive)[0], delete=True)\n\n\ndef prepare_val_folder(folder, wnids):\n img_files = sorted([os.path.join(folder, file) for file in os.listdir(folder)])\n\n for wnid in set(wnids):\n os.mkdir(os.path.join(folder, wnid))\n\n for wnid, img_file in zip(wnids, img_files):\n shutil.move(img_file, os.path.join(folder, wnid, os.path.basename(img_file)))\n\n\ndef _splitexts(root):\n exts = []\n ext = '.'\n while ext:\n root, ext = os.path.splitext(root)\n exts.append(ext)\n return root, ''.join(reversed(exts))\n", "path": "torchvision/datasets/imagenet.py"}]}
3,499
353
gh_patches_debug_29960
rasdani/github-patches
git_diff
tough-dev-school__education-backend-222
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Переписать интеграцию с телеграмом Сейчас используем какой-то [мутный сервис](https://github.com/f213/education-backend/blob/master/src/app/integrations/tg.py#L4) для отправки сообщения в телегу. Так вышло потому, что когда мы запускались, API телеграма было раскомнадзорено. Надо отказаться от использования этого сервиса. Заодно, сделать возможность указывать отдельные каналы уведомлений о новых заказах для разных курсов. </issue> <code> [start of src/app/integrations/tg.py] 1 import requests 2 3 4 def send_happiness_message(text): 5 response = requests.post('https://timepad.f213.in/msg/', json={ 6 'text': text, 7 }) 8 9 assert response.status_code == 200, 'TG proxy should return 200' 10 assert response.json()['ok'] is True, 'TG proxy should say msg is ok' 11 [end of src/app/integrations/tg.py] [start of src/app/settings.py] 1 import environ 2 import os 3 from celery.schedules import crontab 4 5 root = environ.Path(__file__) - 2 # three folder back (/a/b/c/ - 3 = /) 6 env = environ.Env(DEBUG=(bool, False)) # set default values and casting 7 environ.Env.read_env() # reading .env file 8 SITE_ROOT = root() 9 10 USE_L10N = True 11 USE_i18N = True 12 13 LANGUAGE_CODE = 'ru' 14 LOCALE_PATHS = ['locale'] 15 16 INTERNAL_IPS = [ 17 '127.0.0.1', 18 ] 19 FRONTEND_URL = 'https://education.borshev.com' 20 21 USE_TZ = False 22 TIME_ZONE = env('TIME_ZONE', cast=str, default='Europe/Moscow') 23 24 # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 25 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 26 TEST_RUNNER = 'app.test.disable_test_command_runner.DisableTestCommandRunner' 27 28 29 # Quick-start development settings - unsuitable for production 30 # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ 31 32 # SECURITY WARNING: keep the secret key used in production secret! 33 SECRET_KEY = env('SECRET_KEY', cast=str, default='s3cr3t') 34 35 # SECURITY WARNING: don't run with debug turned on in production! 36 DEBUG = env('DEBUG', cast=bool, default=False) 37 CI = env('CI', cast=bool, default=False) 38 ANONYMIZE_ENABLED = DEBUG 39 40 ABSOLUTE_HOST = env('ABSOLUTE_HOST', cast=str, default='https://edu-app.borshev.com') 41 ALLOWED_HOSTS = [ 42 'edu-app.borshev.com', 43 'localhost', 44 'localhost:8000', 45 'education.borshev.com', 46 ABSOLUTE_HOST.replace('https://', ''), 47 ] 48 49 CORS_ORIGIN_WHITELIST = [ 50 'https://pmdaily.ru', 51 'https://education.borshev.com', 52 ] 53 54 CSRF_TRUSTED_ORIGINS = [ 55 'pmdaily.ru', 56 'education.borshev.com', 57 'borshev.com', 58 ] 59 60 61 # Application definition 62 63 INSTALLED_APPS = [ 64 'app', 65 'users', 66 'orders', 67 'products', 68 'shipping', 69 'tinkoff', 70 'triggers', 71 'magnets', 72 'banking', 73 74 'corsheaders', 75 'hattori', 76 'anymail', 77 'rest_framework', 78 'rest_framework.authtoken', 79 'drf_recaptcha', 80 'django_filters', 81 82 'axes', 83 'django.contrib.admin', 84 'django.contrib.auth', 85 'django.contrib.contenttypes', 86 'django.contrib.sessions', 87 'django.contrib.messages', 88 'django.contrib.staticfiles', 89 90 'debug_toolbar', 91 ] 92 93 MIDDLEWARE = [ 94 'django.middleware.security.SecurityMiddleware', 95 96 'django.contrib.sessions.middleware.SessionMiddleware', 97 'corsheaders.middleware.CorsMiddleware', 98 'django.middleware.common.CommonMiddleware', 99 # 'django.middleware.csrf.CsrfViewMiddleware', 100 'django.contrib.auth.middleware.AuthenticationMiddleware', 101 'django.contrib.auth.middleware.RemoteUserMiddleware', 102 'django.contrib.messages.middleware.MessageMiddleware', 103 'django.middleware.clickjacking.XFrameOptionsMiddleware', 104 'app.middleware.real_ip.real_ip_middleware', 105 'axes.middleware.AxesMiddleware', 106 'debug_toolbar.middleware.DebugToolbarMiddleware', 107 ] 108 109 if not DEBUG and not CI: 110 MIDDLEWARE.insert(1, 'whitenoise.middleware.WhiteNoiseMiddleware') 111 112 113 REST_FRAMEWORK = { 114 'DEFAULT_PERMISSION_CLASSES': ( 115 'rest_framework.permissions.IsAuthenticated', 116 ), 117 'DEFAULT_AUTHENTICATION_CLASSES': ( 118 'rest_framework.authentication.TokenAuthentication', 119 ), 120 'DEFAULT_RENDERER_CLASSES': [ 121 'app.renderers.AppJSONRenderer', 122 ], 123 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning', 124 'DEFAULT_PAGINATION_CLASS': 'app.pagination.AppPagination', 125 'PAGE_SIZE': 20, 126 } 127 128 ROOT_URLCONF = 'app.urls' 129 130 TEMPLATES = [ 131 { 132 'BACKEND': 'django.template.backends.django.DjangoTemplates', 133 'DIRS': [], 134 'APP_DIRS': True, 135 'OPTIONS': { 136 'context_processors': [ 137 'django.template.context_processors.debug', 138 'django.template.context_processors.request', 139 'django.contrib.auth.context_processors.auth', 140 'django.contrib.messages.context_processors.messages', 141 ], 142 }, 143 }, 144 ] 145 146 WSGI_APPLICATION = 'app.wsgi.application' 147 148 149 # Database 150 # https://docs.djangoproject.com/en/2.2/ref/settings/#databases 151 DATABASES = { 152 'default': env.db(), # Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ 153 } 154 AUTH_USER_MODEL = 'users.User' 155 AUTHENTICATION_BACKENDS = [ 156 'axes.backends.AxesBackend', 157 'django.contrib.auth.backends.ModelBackend', 158 'django.contrib.auth.backends.RemoteUserBackend', 159 ] 160 161 HEALTH_CHECKS_ERROR_CODE = 503 162 HEALTH_CHECKS = { 163 'db': 'django_healthchecks.contrib.check_database', 164 } 165 166 MEDIA_URL = env('MEDIA_URL', default='/media/') 167 168 STATIC_URL = env('STATIC_URL', default='/static/') 169 STATIC_ROOT = env('STATIC_ROOT') 170 171 SENTRY_DSN = env('SENTRY_DSN', cast=str, default='') 172 173 if not DEBUG and SENTRY_DSN: 174 import sentry_sdk 175 from sentry_sdk.integrations.celery import CeleryIntegration 176 from sentry_sdk.integrations.django import DjangoIntegration 177 from sentry_sdk.integrations.redis import RedisIntegration 178 179 sentry_sdk.init( 180 dsn=SENTRY_DSN, 181 integrations=[DjangoIntegration(), CeleryIntegration(), RedisIntegration()], 182 ) 183 184 BROKER_URL = env('CELERY_BACKEND') 185 CELERY_ALWAYS_EAGER = env('CELERY_ALWAYS_EAGER', cast=bool, default=DEBUG) # by default in debug mode we run all celery tasks in foregroud. 186 CELERY_TIMEZONE = TIME_ZONE 187 CELERY_ENABLE_UTC = False 188 CELERYBEAT_SCHEDULE = { 189 'run_started_purchase_trigger': { 190 'task': 'triggers.tasks.check_for_started_purchase_triggers', 191 'schedule': crontab(hour='*', minute=15), 192 }, 193 'run_record_feedback_trigger': { 194 'task': 'triggers.tasks.check_for_record_feedback_triggers', 195 'schedule': crontab(hour='*', minute=15), 196 }, 197 'ship_unshipped_orders': { 198 'task': 'orders.tasks.ship_unshipped_orders', 199 'schedule': crontab(hour='*', minute='*/2'), 200 }, 201 } 202 203 204 AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID', default=None) 205 AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY', default=None) 206 AWS_STORAGE_BUCKET_NAME = env('AWS_STORAGE_BUCKET_NAME', default=None) 207 AWS_S3_REGION_NAME = env('AWS_S3_REGION_NAME', default=None) 208 AWS_S3_ENDPOINT_URL = env('AWS_S3_ENDPOINT_URL', default=None) 209 210 EMAIL_ENABLED = env('EMAIL_ENABLED', cast=bool, default=False) 211 212 EMAIL_BACKEND = env('EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend') 213 214 MAILCHIMP_API_KEY = env('MAILCHIMP_API_KEY', default='') 215 MAILCHIMP_CONTACT_LIST_ID = env('MAILCHIMP_CONTACT_LIST_ID', cast=str, default=None) 216 217 DEFAULT_FROM_EMAIL = env('EMAIL_FROM', cast=str, default='') 218 ANYMAIL = { 219 'POSTMARK_SERVER_TOKEN': env('POSTMARK_SERVER_TOKEN', cast=str, default=''), 220 'DEBUG_API_REQUESTS': env('DEBUG'), 221 } 222 223 CLICKMEETING_API_KEY = env('CLICKMEETING_API_KEY', default=None, cast=str) 224 225 ZOOMUS_API_KEY = env('ZOOMUS_API_KEY', default=None, cast=str) 226 ZOOMUS_API_SECRET = env('ZOOMUS_API_SECRET', default=None, cast=str) 227 228 TINKOFF_TERMINAL_KEY = env('TINKOFF_TERMINAL_KEY', default=None) 229 TINKOFF_TERMINAL_PASSWORD = env('TINKOFF_TERMINAL_PASSWORD', default=None) 230 TINKOFF_CREDIT_SHOP_ID = env('TINKOFF_CREDIT_SHOP_ID', default=None) 231 TINKOFF_CREDIT_SHOWCASE_ID = env('TINKOFF_CREDIT_SHOWCASE_ID', default=None) 232 233 TINKOFF_CREDIT_DEMO_MODE = env('TINKOFF_CREDIT_DEMO_MODE', default=DEBUG) 234 235 SEND_HAPPINESS_MESSAGES = env('SEND_HAPPINESS_MESSAGES', cast=bool, default=False) 236 237 DRF_RECAPTCHA_SECRET_KEY = env('RECAPTCHA_SECRET_KEY', cast=str, default='') 238 DRF_RECAPTCHA_TESTING = DRF_RECAPTCHA_TESTING_PASS = not env('RECAPTCHA_ENABLED', cast=bool, default=True) 239 [end of src/app/settings.py] [start of src/orders/services/order_shipper.py] 1 from django.conf import settings 2 from django.utils import timezone 3 4 from app.tasks import send_happiness_message, send_mail 5 from orders.models import Order 6 7 8 class Pigwidgeon: 9 """Ship the order (actualy calls item ship() method)""" 10 def __init__(self, order: Order, silent: bool = False): 11 self.order = order 12 self.silent = silent 13 14 def __call__(self): 15 if self.ship(): 16 self.mark_order_as_shipped() 17 18 if not self.order.notification_to_giver_is_sent: 19 self.send_notification_to_giver() 20 21 if not self.silent: 22 self.send_happiness_message() 23 24 def ship(self) -> bool: 25 """Ship the order. Returns true if order is shipped""" 26 desired_date = self.order.desired_shipment_date 27 if desired_date is None or desired_date <= timezone.now(): 28 self.order.item.ship(to=self.order.user, order=self.order) 29 30 return True 31 32 return False 33 34 def mark_order_as_shipped(self): 35 self.order.shipped = timezone.now() 36 self.order.save() 37 38 def send_happiness_message(self): 39 if not settings.SEND_HAPPINESS_MESSAGES: 40 return 41 42 send_happiness_message.delay(text='💰+{sum} ₽, {user}, {reason}'.format( 43 sum=str(self.order.price).replace('.00', ''), 44 user=str(self.order.user), 45 reason=str(self.order.item) if self.order.giver is None else f'{self.order.item} (подарок)', 46 )) 47 48 def send_notification_to_giver(self): 49 if self.order.giver is None: 50 return 51 52 if self.order.desired_shipment_date is None: 53 return 54 55 send_mail.delay( 56 to=self.order.giver.email, 57 template_id='gift-notification-for-giver', # postmark 58 disable_antispam=True, 59 ctx={ 60 'item_name': self.order.item.full_name, 61 'receiver_name': str(self.order.user), 62 'receiver_email': self.order.user.email, 63 'desired_shipment_date': self.order.desired_shipment_date.strftime('%d.%m.%Y'), 64 }, 65 ) 66 67 self.order.notification_to_giver_is_sent = True 68 self.order.save() 69 [end of src/orders/services/order_shipper.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/app/integrations/tg.py b/src/app/integrations/tg.py --- a/src/app/integrations/tg.py +++ b/src/app/integrations/tg.py @@ -1,10 +1,18 @@ import requests +from django.conf import settings -def send_happiness_message(text): - response = requests.post('https://timepad.f213.in/msg/', json={ +def send_message(channel, text): + url = f'https://api.telegram.org/bot{settings.BOT_TOKEN}/sendMessage' + response = requests.post(url, data={ + 'chat_id': channel, 'text': text, + 'parse_mode': 'markdown', + 'disable_web_page_preview': True, }) - assert response.status_code == 200, 'TG proxy should return 200' - assert response.json()['ok'] is True, 'TG proxy should say msg is ok' + assert response.status_code == 200, 'TG should return 200' + + +def send_happiness_message(text): + send_message(settings.HAPPINESS_MESSAGES_CHAT_ID, text) diff --git a/src/app/settings.py b/src/app/settings.py --- a/src/app/settings.py +++ b/src/app/settings.py @@ -232,7 +232,8 @@ TINKOFF_CREDIT_DEMO_MODE = env('TINKOFF_CREDIT_DEMO_MODE', default=DEBUG) -SEND_HAPPINESS_MESSAGES = env('SEND_HAPPINESS_MESSAGES', cast=bool, default=False) +BOT_TOKEN = env('BOT_TOKEN', cast=str, default=None) +HAPPINESS_MESSAGES_CHAT_ID = env('HAPPINESS_MESSAGES_CHAT_ID', cast=str, default=None) DRF_RECAPTCHA_SECRET_KEY = env('RECAPTCHA_SECRET_KEY', cast=str, default='') DRF_RECAPTCHA_TESTING = DRF_RECAPTCHA_TESTING_PASS = not env('RECAPTCHA_ENABLED', cast=bool, default=True) diff --git a/src/orders/services/order_shipper.py b/src/orders/services/order_shipper.py --- a/src/orders/services/order_shipper.py +++ b/src/orders/services/order_shipper.py @@ -36,7 +36,7 @@ self.order.save() def send_happiness_message(self): - if not settings.SEND_HAPPINESS_MESSAGES: + if not settings.HAPPINESS_MESSAGES_CHAT_ID: return send_happiness_message.delay(text='💰+{sum} ₽, {user}, {reason}'.format(
{"golden_diff": "diff --git a/src/app/integrations/tg.py b/src/app/integrations/tg.py\n--- a/src/app/integrations/tg.py\n+++ b/src/app/integrations/tg.py\n@@ -1,10 +1,18 @@\n import requests\n+from django.conf import settings\n \n \n-def send_happiness_message(text):\n- response = requests.post('https://timepad.f213.in/msg/', json={\n+def send_message(channel, text):\n+ url = f'https://api.telegram.org/bot{settings.BOT_TOKEN}/sendMessage'\n+ response = requests.post(url, data={\n+ 'chat_id': channel,\n 'text': text,\n+ 'parse_mode': 'markdown',\n+ 'disable_web_page_preview': True,\n })\n \n- assert response.status_code == 200, 'TG proxy should return 200'\n- assert response.json()['ok'] is True, 'TG proxy should say msg is ok'\n+ assert response.status_code == 200, 'TG should return 200'\n+\n+\n+def send_happiness_message(text):\n+ send_message(settings.HAPPINESS_MESSAGES_CHAT_ID, text)\ndiff --git a/src/app/settings.py b/src/app/settings.py\n--- a/src/app/settings.py\n+++ b/src/app/settings.py\n@@ -232,7 +232,8 @@\n \n TINKOFF_CREDIT_DEMO_MODE = env('TINKOFF_CREDIT_DEMO_MODE', default=DEBUG)\n \n-SEND_HAPPINESS_MESSAGES = env('SEND_HAPPINESS_MESSAGES', cast=bool, default=False)\n+BOT_TOKEN = env('BOT_TOKEN', cast=str, default=None)\n+HAPPINESS_MESSAGES_CHAT_ID = env('HAPPINESS_MESSAGES_CHAT_ID', cast=str, default=None)\n \n DRF_RECAPTCHA_SECRET_KEY = env('RECAPTCHA_SECRET_KEY', cast=str, default='')\n DRF_RECAPTCHA_TESTING = DRF_RECAPTCHA_TESTING_PASS = not env('RECAPTCHA_ENABLED', cast=bool, default=True)\ndiff --git a/src/orders/services/order_shipper.py b/src/orders/services/order_shipper.py\n--- a/src/orders/services/order_shipper.py\n+++ b/src/orders/services/order_shipper.py\n@@ -36,7 +36,7 @@\n self.order.save()\n \n def send_happiness_message(self):\n- if not settings.SEND_HAPPINESS_MESSAGES:\n+ if not settings.HAPPINESS_MESSAGES_CHAT_ID:\n return\n \n send_happiness_message.delay(text='\ud83d\udcb0+{sum} \u20bd, {user}, {reason}'.format(\n", "issue": "\u041f\u0435\u0440\u0435\u043f\u0438\u0441\u0430\u0442\u044c \u0438\u043d\u0442\u0435\u0433\u0440\u0430\u0446\u0438\u044e \u0441 \u0442\u0435\u043b\u0435\u0433\u0440\u0430\u043c\u043e\u043c\n\u0421\u0435\u0439\u0447\u0430\u0441 \u0438\u0441\u043f\u043e\u043b\u044c\u0437\u0443\u0435\u043c \u043a\u0430\u043a\u043e\u0439-\u0442\u043e [\u043c\u0443\u0442\u043d\u044b\u0439 \u0441\u0435\u0440\u0432\u0438\u0441](https://github.com/f213/education-backend/blob/master/src/app/integrations/tg.py#L4) \u0434\u043b\u044f \u043e\u0442\u043f\u0440\u0430\u0432\u043a\u0438 \u0441\u043e\u043e\u0431\u0449\u0435\u043d\u0438\u044f \u0432 \u0442\u0435\u043b\u0435\u0433\u0443. \u0422\u0430\u043a \u0432\u044b\u0448\u043b\u043e \u043f\u043e\u0442\u043e\u043c\u0443, \u0447\u0442\u043e \u043a\u043e\u0433\u0434\u0430 \u043c\u044b \u0437\u0430\u043f\u0443\u0441\u043a\u0430\u043b\u0438\u0441\u044c, API \u0442\u0435\u043b\u0435\u0433\u0440\u0430\u043c\u0430 \u0431\u044b\u043b\u043e \u0440\u0430\u0441\u043a\u043e\u043c\u043d\u0430\u0434\u0437\u043e\u0440\u0435\u043d\u043e.\r\n\r\n\u041d\u0430\u0434\u043e \u043e\u0442\u043a\u0430\u0437\u0430\u0442\u044c\u0441\u044f \u043e\u0442 \u0438\u0441\u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u043d\u0438\u044f \u044d\u0442\u043e\u0433\u043e \u0441\u0435\u0440\u0432\u0438\u0441\u0430. \u0417\u0430\u043e\u0434\u043d\u043e, \u0441\u0434\u0435\u043b\u0430\u0442\u044c \u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e\u0441\u0442\u044c \u0443\u043a\u0430\u0437\u044b\u0432\u0430\u0442\u044c \u043e\u0442\u0434\u0435\u043b\u044c\u043d\u044b\u0435 \u043a\u0430\u043d\u0430\u043b\u044b \u0443\u0432\u0435\u0434\u043e\u043c\u043b\u0435\u043d\u0438\u0439 \u043e \u043d\u043e\u0432\u044b\u0445 \u0437\u0430\u043a\u0430\u0437\u0430\u0445 \u0434\u043b\u044f \u0440\u0430\u0437\u043d\u044b\u0445 \u043a\u0443\u0440\u0441\u043e\u0432.\n", "before_files": [{"content": "import requests\n\n\ndef send_happiness_message(text):\n response = requests.post('https://timepad.f213.in/msg/', json={\n 'text': text,\n })\n\n assert response.status_code == 200, 'TG proxy should return 200'\n assert response.json()['ok'] is True, 'TG proxy should say msg is ok'\n", "path": "src/app/integrations/tg.py"}, {"content": "import environ\nimport os\nfrom celery.schedules import crontab\n\nroot = environ.Path(__file__) - 2 # three folder back (/a/b/c/ - 3 = /)\nenv = environ.Env(DEBUG=(bool, False)) # set default values and casting\nenviron.Env.read_env() # reading .env file\nSITE_ROOT = root()\n\nUSE_L10N = True\nUSE_i18N = True\n\nLANGUAGE_CODE = 'ru'\nLOCALE_PATHS = ['locale']\n\nINTERNAL_IPS = [\n '127.0.0.1',\n]\nFRONTEND_URL = 'https://education.borshev.com'\n\nUSE_TZ = False\nTIME_ZONE = env('TIME_ZONE', cast=str, default='Europe/Moscow')\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nTEST_RUNNER = 'app.test.disable_test_command_runner.DisableTestCommandRunner'\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env('SECRET_KEY', cast=str, default='s3cr3t')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env('DEBUG', cast=bool, default=False)\nCI = env('CI', cast=bool, default=False)\nANONYMIZE_ENABLED = DEBUG\n\nABSOLUTE_HOST = env('ABSOLUTE_HOST', cast=str, default='https://edu-app.borshev.com')\nALLOWED_HOSTS = [\n 'edu-app.borshev.com',\n 'localhost',\n 'localhost:8000',\n 'education.borshev.com',\n ABSOLUTE_HOST.replace('https://', ''),\n]\n\nCORS_ORIGIN_WHITELIST = [\n 'https://pmdaily.ru',\n 'https://education.borshev.com',\n]\n\nCSRF_TRUSTED_ORIGINS = [\n 'pmdaily.ru',\n 'education.borshev.com',\n 'borshev.com',\n]\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'app',\n 'users',\n 'orders',\n 'products',\n 'shipping',\n 'tinkoff',\n 'triggers',\n 'magnets',\n 'banking',\n\n 'corsheaders',\n 'hattori',\n 'anymail',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'drf_recaptcha',\n 'django_filters',\n\n 'axes',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'debug_toolbar',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n # 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.RemoteUserMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'app.middleware.real_ip.real_ip_middleware',\n 'axes.middleware.AxesMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nif not DEBUG and not CI:\n MIDDLEWARE.insert(1, 'whitenoise.middleware.WhiteNoiseMiddleware')\n\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticated',\n ),\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n ),\n 'DEFAULT_RENDERER_CLASSES': [\n 'app.renderers.AppJSONRenderer',\n ],\n 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',\n 'DEFAULT_PAGINATION_CLASS': 'app.pagination.AppPagination',\n 'PAGE_SIZE': 20,\n}\n\nROOT_URLCONF = 'app.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'app.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.2/ref/settings/#databases\nDATABASES = {\n 'default': env.db(), # Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ\n}\nAUTH_USER_MODEL = 'users.User'\nAUTHENTICATION_BACKENDS = [\n 'axes.backends.AxesBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'django.contrib.auth.backends.RemoteUserBackend',\n]\n\nHEALTH_CHECKS_ERROR_CODE = 503\nHEALTH_CHECKS = {\n 'db': 'django_healthchecks.contrib.check_database',\n}\n\nMEDIA_URL = env('MEDIA_URL', default='/media/')\n\nSTATIC_URL = env('STATIC_URL', default='/static/')\nSTATIC_ROOT = env('STATIC_ROOT')\n\nSENTRY_DSN = env('SENTRY_DSN', cast=str, default='')\n\nif not DEBUG and SENTRY_DSN:\n import sentry_sdk\n from sentry_sdk.integrations.celery import CeleryIntegration\n from sentry_sdk.integrations.django import DjangoIntegration\n from sentry_sdk.integrations.redis import RedisIntegration\n\n sentry_sdk.init(\n dsn=SENTRY_DSN,\n integrations=[DjangoIntegration(), CeleryIntegration(), RedisIntegration()],\n )\n\nBROKER_URL = env('CELERY_BACKEND')\nCELERY_ALWAYS_EAGER = env('CELERY_ALWAYS_EAGER', cast=bool, default=DEBUG) # by default in debug mode we run all celery tasks in foregroud.\nCELERY_TIMEZONE = TIME_ZONE\nCELERY_ENABLE_UTC = False\nCELERYBEAT_SCHEDULE = {\n 'run_started_purchase_trigger': {\n 'task': 'triggers.tasks.check_for_started_purchase_triggers',\n 'schedule': crontab(hour='*', minute=15),\n },\n 'run_record_feedback_trigger': {\n 'task': 'triggers.tasks.check_for_record_feedback_triggers',\n 'schedule': crontab(hour='*', minute=15),\n },\n 'ship_unshipped_orders': {\n 'task': 'orders.tasks.ship_unshipped_orders',\n 'schedule': crontab(hour='*', minute='*/2'),\n },\n}\n\n\nAWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID', default=None)\nAWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY', default=None)\nAWS_STORAGE_BUCKET_NAME = env('AWS_STORAGE_BUCKET_NAME', default=None)\nAWS_S3_REGION_NAME = env('AWS_S3_REGION_NAME', default=None)\nAWS_S3_ENDPOINT_URL = env('AWS_S3_ENDPOINT_URL', default=None)\n\nEMAIL_ENABLED = env('EMAIL_ENABLED', cast=bool, default=False)\n\nEMAIL_BACKEND = env('EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')\n\nMAILCHIMP_API_KEY = env('MAILCHIMP_API_KEY', default='')\nMAILCHIMP_CONTACT_LIST_ID = env('MAILCHIMP_CONTACT_LIST_ID', cast=str, default=None)\n\nDEFAULT_FROM_EMAIL = env('EMAIL_FROM', cast=str, default='')\nANYMAIL = {\n 'POSTMARK_SERVER_TOKEN': env('POSTMARK_SERVER_TOKEN', cast=str, default=''),\n 'DEBUG_API_REQUESTS': env('DEBUG'),\n}\n\nCLICKMEETING_API_KEY = env('CLICKMEETING_API_KEY', default=None, cast=str)\n\nZOOMUS_API_KEY = env('ZOOMUS_API_KEY', default=None, cast=str)\nZOOMUS_API_SECRET = env('ZOOMUS_API_SECRET', default=None, cast=str)\n\nTINKOFF_TERMINAL_KEY = env('TINKOFF_TERMINAL_KEY', default=None)\nTINKOFF_TERMINAL_PASSWORD = env('TINKOFF_TERMINAL_PASSWORD', default=None)\nTINKOFF_CREDIT_SHOP_ID = env('TINKOFF_CREDIT_SHOP_ID', default=None)\nTINKOFF_CREDIT_SHOWCASE_ID = env('TINKOFF_CREDIT_SHOWCASE_ID', default=None)\n\nTINKOFF_CREDIT_DEMO_MODE = env('TINKOFF_CREDIT_DEMO_MODE', default=DEBUG)\n\nSEND_HAPPINESS_MESSAGES = env('SEND_HAPPINESS_MESSAGES', cast=bool, default=False)\n\nDRF_RECAPTCHA_SECRET_KEY = env('RECAPTCHA_SECRET_KEY', cast=str, default='')\nDRF_RECAPTCHA_TESTING = DRF_RECAPTCHA_TESTING_PASS = not env('RECAPTCHA_ENABLED', cast=bool, default=True)\n", "path": "src/app/settings.py"}, {"content": "from django.conf import settings\nfrom django.utils import timezone\n\nfrom app.tasks import send_happiness_message, send_mail\nfrom orders.models import Order\n\n\nclass Pigwidgeon:\n \"\"\"Ship the order (actualy calls item ship() method)\"\"\"\n def __init__(self, order: Order, silent: bool = False):\n self.order = order\n self.silent = silent\n\n def __call__(self):\n if self.ship():\n self.mark_order_as_shipped()\n\n if not self.order.notification_to_giver_is_sent:\n self.send_notification_to_giver()\n\n if not self.silent:\n self.send_happiness_message()\n\n def ship(self) -> bool:\n \"\"\"Ship the order. Returns true if order is shipped\"\"\"\n desired_date = self.order.desired_shipment_date\n if desired_date is None or desired_date <= timezone.now():\n self.order.item.ship(to=self.order.user, order=self.order)\n\n return True\n\n return False\n\n def mark_order_as_shipped(self):\n self.order.shipped = timezone.now()\n self.order.save()\n\n def send_happiness_message(self):\n if not settings.SEND_HAPPINESS_MESSAGES:\n return\n\n send_happiness_message.delay(text='\ud83d\udcb0+{sum} \u20bd, {user}, {reason}'.format(\n sum=str(self.order.price).replace('.00', ''),\n user=str(self.order.user),\n reason=str(self.order.item) if self.order.giver is None else f'{self.order.item} (\u043f\u043e\u0434\u0430\u0440\u043e\u043a)',\n ))\n\n def send_notification_to_giver(self):\n if self.order.giver is None:\n return\n\n if self.order.desired_shipment_date is None:\n return\n\n send_mail.delay(\n to=self.order.giver.email,\n template_id='gift-notification-for-giver', # postmark\n disable_antispam=True,\n ctx={\n 'item_name': self.order.item.full_name,\n 'receiver_name': str(self.order.user),\n 'receiver_email': self.order.user.email,\n 'desired_shipment_date': self.order.desired_shipment_date.strftime('%d.%m.%Y'),\n },\n )\n\n self.order.notification_to_giver_is_sent = True\n self.order.save()\n", "path": "src/orders/services/order_shipper.py"}]}
3,900
553
gh_patches_debug_42439
rasdani/github-patches
git_diff
paperless-ngx__paperless-ngx-1858
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Barcode seperator processing fails with password protected PDFs ### Description With PAPERLESS_CONSUMER_ENABLE_BARCODES=true uploading a password protected PDF causes the file task to fail. It appears that the barcode separation stage fails with an error rather than skipping the document. The same document imports correctly with the above settings commented out. Traceback from the failed task: ```python /tmp/paperless/paperless-upload-0bj7vn9g: invalid password : Traceback (most recent call last): File "/usr/src/paperless/src/src/django-q/django_q/cluster.py", line 454, in worker res = f(*task["args"], **task["kwargs"]) File "/usr/src/paperless/src/documents/tasks.py", line 99, in consume_file pdf_filepath, separators = barcodes.scan_file_for_separating_barcodes(path) File "/usr/src/paperless/src/documents/barcodes.py", line 121, in scan_file_for_separating_barcodes pdf = Pdf.open(pdf_filepath) File "/usr/local/lib/python3.9/site-packages/pikepdf/_methods.py", line 791, in open pdf = Pdf._open( pikepdf._qpdf.PasswordError: /tmp/paperless/paperless-upload-0bj7vn9g: invalid password ``` ### Steps to reproduce 1. Launch paperless with PAPERLESS_CONSUMER_ENABLE_BARCODES=true 2. Upload password protected PDF 3. Check the file tasks menu for failed jobs ### Webserver logs _No response_ ### Paperless-ngx version 1.9.2 ### Host OS Fedora Server 36 ### Installation method Other (please describe above) ### Browser _No response_ ### Configuration changes _No response_ ### Other Official docker image running under root podman </issue> <code> [start of src/documents/barcodes.py] 1 import logging 2 import os 3 import shutil 4 import tempfile 5 from functools import lru_cache 6 from typing import List 7 from typing import Optional 8 from typing import Tuple 9 10 import magic 11 from django.conf import settings 12 from pdf2image import convert_from_path 13 from pikepdf import Page 14 from pikepdf import Pdf 15 from pikepdf import PdfImage 16 from PIL import Image 17 from PIL import ImageSequence 18 from pyzbar import pyzbar 19 20 logger = logging.getLogger("paperless.barcodes") 21 22 23 class BarcodeImageFormatError(Exception): 24 pass 25 26 27 @lru_cache(maxsize=8) 28 def supported_file_type(mime_type) -> bool: 29 """ 30 Determines if the file is valid for barcode 31 processing, based on MIME type and settings 32 33 :return: True if the file is supported, False otherwise 34 """ 35 supported_mime = ["application/pdf"] 36 if settings.CONSUMER_BARCODE_TIFF_SUPPORT: 37 supported_mime += ["image/tiff"] 38 39 return mime_type in supported_mime 40 41 42 def barcode_reader(image: Image) -> List[str]: 43 """ 44 Read any barcodes contained in image 45 Returns a list containing all found barcodes 46 """ 47 barcodes = [] 48 # Decode the barcode image 49 detected_barcodes = pyzbar.decode(image) 50 51 if detected_barcodes: 52 # Traverse through all the detected barcodes in image 53 for barcode in detected_barcodes: 54 if barcode.data: 55 decoded_barcode = barcode.data.decode("utf-8") 56 barcodes.append(decoded_barcode) 57 logger.debug( 58 f"Barcode of type {str(barcode.type)} found: {decoded_barcode}", 59 ) 60 return barcodes 61 62 63 def get_file_mime_type(path: str) -> str: 64 """ 65 Determines the file type, based on MIME type. 66 67 Returns the MIME type. 68 """ 69 mime_type = magic.from_file(path, mime=True) 70 logger.debug(f"Detected mime type: {mime_type}") 71 return mime_type 72 73 74 def convert_from_tiff_to_pdf(filepath: str) -> str: 75 """ 76 converts a given TIFF image file to pdf into a temporary directory. 77 78 Returns the new pdf file. 79 """ 80 file_name = os.path.splitext(os.path.basename(filepath))[0] 81 mime_type = get_file_mime_type(filepath) 82 tempdir = tempfile.mkdtemp(prefix="paperless-", dir=settings.SCRATCH_DIR) 83 # use old file name with pdf extension 84 if mime_type == "image/tiff": 85 newpath = os.path.join(tempdir, file_name + ".pdf") 86 else: 87 logger.warning( 88 f"Cannot convert mime type {str(mime_type)} from {str(filepath)} to pdf.", 89 ) 90 return None 91 with Image.open(filepath) as image: 92 images = [] 93 for i, page in enumerate(ImageSequence.Iterator(image)): 94 page = page.convert("RGB") 95 images.append(page) 96 try: 97 if len(images) == 1: 98 images[0].save(newpath) 99 else: 100 images[0].save(newpath, save_all=True, append_images=images[1:]) 101 except OSError as e: 102 logger.warning( 103 f"Could not save the file as pdf. Error: {str(e)}", 104 ) 105 return None 106 return newpath 107 108 109 def scan_file_for_separating_barcodes(filepath: str) -> Tuple[Optional[str], List[int]]: 110 """ 111 Scan the provided pdf file for page separating barcodes 112 Returns a PDF filepath and a list of pagenumbers, 113 which separate the file into new files 114 """ 115 116 def _pikepdf_barcode_scan(pdf_filepath: str): 117 with Pdf.open(pdf_filepath) as pdf: 118 for page_num, page in enumerate(pdf.pages): 119 for image_key in page.images: 120 pdfimage = PdfImage(page.images[image_key]) 121 122 if "/CCITTFaxDecode" in pdfimage.filters: 123 raise BarcodeImageFormatError() 124 125 # Not all images can be transcoded to a PIL image, which 126 # is what pyzbar expects to receive 127 pillow_img = pdfimage.as_pil_image() 128 129 detected_barcodes = barcode_reader(pillow_img) 130 131 if settings.CONSUMER_BARCODE_STRING in detected_barcodes: 132 separator_page_numbers.append(page_num) 133 134 def _pdf2image_barcode_scan(pdf_filepath: str): 135 # use a temporary directory in case the file os too big to handle in memory 136 with tempfile.TemporaryDirectory() as path: 137 pages_from_path = convert_from_path(pdf_filepath, output_folder=path) 138 for current_page_number, page in enumerate(pages_from_path): 139 current_barcodes = barcode_reader(page) 140 if settings.CONSUMER_BARCODE_STRING in current_barcodes: 141 separator_page_numbers.append(current_page_number) 142 143 separator_page_numbers = [] 144 pdf_filepath = None 145 146 mime_type = get_file_mime_type(filepath) 147 148 if supported_file_type(mime_type): 149 pdf_filepath = filepath 150 if mime_type == "image/tiff": 151 pdf_filepath = convert_from_tiff_to_pdf(filepath) 152 153 if settings.CONSUMER_USE_LEGACY_DETECTION: 154 _pdf2image_barcode_scan(pdf_filepath) 155 else: 156 try: 157 _pikepdf_barcode_scan(pdf_filepath) 158 except Exception as e: 159 160 logger.warning( 161 f"Exception using pikepdf for barcodes," 162 f" falling back to pdf2image: {e}", 163 ) 164 # Reset this incase pikepdf got part way through 165 separator_page_numbers = [] 166 _pdf2image_barcode_scan(pdf_filepath) 167 168 else: 169 logger.warning( 170 f"Unsupported file format for barcode reader: {str(mime_type)}", 171 ) 172 return pdf_filepath, separator_page_numbers 173 174 175 def separate_pages(filepath: str, pages_to_split_on: List[int]) -> List[str]: 176 """ 177 Separate the provided pdf file on the pages_to_split_on. 178 The pages which are defined by page_numbers will be removed. 179 Returns a list of (temporary) filepaths to consume. 180 These will need to be deleted later. 181 """ 182 183 document_paths = [] 184 185 if not pages_to_split_on: 186 logger.warning("No pages to split on!") 187 return document_paths 188 189 os.makedirs(settings.SCRATCH_DIR, exist_ok=True) 190 tempdir = tempfile.mkdtemp(prefix="paperless-", dir=settings.SCRATCH_DIR) 191 fname = os.path.splitext(os.path.basename(filepath))[0] 192 pdf = Pdf.open(filepath) 193 194 # A list of documents, ie a list of lists of pages 195 documents: List[List[Page]] = [] 196 # A single document, ie a list of pages 197 document: List[Page] = [] 198 199 for idx, page in enumerate(pdf.pages): 200 # Keep building the new PDF as long as it is not a 201 # separator index 202 if idx not in pages_to_split_on: 203 document.append(page) 204 # Make sure to append the very last document to the documents 205 if idx == (len(pdf.pages) - 1): 206 documents.append(document) 207 document = [] 208 else: 209 # This is a split index, save the current PDF pages, and restart 210 # a new destination page listing 211 logger.debug(f"Starting new document at idx {idx}") 212 documents.append(document) 213 document = [] 214 215 documents = [x for x in documents if len(x)] 216 217 logger.debug(f"Split into {len(documents)} new documents") 218 219 # Write the new documents out 220 for doc_idx, document in enumerate(documents): 221 dst = Pdf.new() 222 dst.pages.extend(document) 223 224 output_filename = f"{fname}_document_{doc_idx}.pdf" 225 226 logger.debug(f"pdf no:{doc_idx} has {len(dst.pages)} pages") 227 savepath = os.path.join(tempdir, output_filename) 228 with open(savepath, "wb") as out: 229 dst.save(out) 230 document_paths.append(savepath) 231 232 return document_paths 233 234 235 def save_to_dir( 236 filepath: str, 237 newname: str = None, 238 target_dir: str = settings.CONSUMPTION_DIR, 239 ): 240 """ 241 Copies filepath to target_dir. 242 Optionally rename the file. 243 """ 244 if os.path.isfile(filepath) and os.path.isdir(target_dir): 245 dst = shutil.copy(filepath, target_dir) 246 logging.debug(f"saved {str(filepath)} to {str(dst)}") 247 if newname: 248 dst_new = os.path.join(target_dir, newname) 249 logger.debug(f"moving {str(dst)} to {str(dst_new)}") 250 os.rename(dst, dst_new) 251 else: 252 logger.warning(f"{str(filepath)} or {str(target_dir)} don't exist.") 253 [end of src/documents/barcodes.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/documents/barcodes.py b/src/documents/barcodes.py --- a/src/documents/barcodes.py +++ b/src/documents/barcodes.py @@ -10,9 +10,12 @@ import magic from django.conf import settings from pdf2image import convert_from_path +from pdf2image.exceptions import PDFPageCountError from pikepdf import Page +from pikepdf import PasswordError from pikepdf import Pdf from pikepdf import PdfImage +from pikepdf.models.image import HifiPrintImageNotTranscodableError from PIL import Image from PIL import ImageSequence from pyzbar import pyzbar @@ -120,7 +123,9 @@ pdfimage = PdfImage(page.images[image_key]) if "/CCITTFaxDecode" in pdfimage.filters: - raise BarcodeImageFormatError() + raise BarcodeImageFormatError( + "Unable to decode CCITTFaxDecode images", + ) # Not all images can be transcoded to a PIL image, which # is what pyzbar expects to receive @@ -132,7 +137,7 @@ separator_page_numbers.append(page_num) def _pdf2image_barcode_scan(pdf_filepath: str): - # use a temporary directory in case the file os too big to handle in memory + # use a temporary directory in case the file is too big to handle in memory with tempfile.TemporaryDirectory() as path: pages_from_path = convert_from_path(pdf_filepath, output_folder=path) for current_page_number, page in enumerate(pages_from_path): @@ -150,20 +155,42 @@ if mime_type == "image/tiff": pdf_filepath = convert_from_tiff_to_pdf(filepath) + # Chose the scanner if settings.CONSUMER_USE_LEGACY_DETECTION: - _pdf2image_barcode_scan(pdf_filepath) + logger.debug("Using pdf2image for barcodes") + scanner_function = _pdf2image_barcode_scan else: - try: - _pikepdf_barcode_scan(pdf_filepath) - except Exception as e: + logger.debug("Using pikepdf for barcodes") + scanner_function = _pikepdf_barcode_scan - logger.warning( - f"Exception using pikepdf for barcodes," - f" falling back to pdf2image: {e}", - ) - # Reset this incase pikepdf got part way through + # Run the scanner + try: + scanner_function(pdf_filepath) + # Neither method can handle password protected PDFs without it being + # provided. Log it and continue + except (PasswordError, PDFPageCountError) as e: + logger.warning( + f"File is likely password protected, not splitting: {e}", + ) + # Handle pikepdf related image decoding issues with a fallback + except (BarcodeImageFormatError, HifiPrintImageNotTranscodableError) as e: + logger.warning( + f"Falling back to pdf2image because: {e}", + ) + try: separator_page_numbers = [] _pdf2image_barcode_scan(pdf_filepath) + # This file is really borked, allow the consumption to continue + # but it may fail further on + except Exception as e: # pragma: no cover + logger.warning( + f"Exception during barcode scanning: {e}", + ) + # We're not sure what happened, but allow the consumption to continue + except Exception as e: # pragma: no cover + logger.warning( + f"Exception during barcode scanning: {e}", + ) else: logger.warning(
{"golden_diff": "diff --git a/src/documents/barcodes.py b/src/documents/barcodes.py\n--- a/src/documents/barcodes.py\n+++ b/src/documents/barcodes.py\n@@ -10,9 +10,12 @@\n import magic\n from django.conf import settings\n from pdf2image import convert_from_path\n+from pdf2image.exceptions import PDFPageCountError\n from pikepdf import Page\n+from pikepdf import PasswordError\n from pikepdf import Pdf\n from pikepdf import PdfImage\n+from pikepdf.models.image import HifiPrintImageNotTranscodableError\n from PIL import Image\n from PIL import ImageSequence\n from pyzbar import pyzbar\n@@ -120,7 +123,9 @@\n pdfimage = PdfImage(page.images[image_key])\n \n if \"/CCITTFaxDecode\" in pdfimage.filters:\n- raise BarcodeImageFormatError()\n+ raise BarcodeImageFormatError(\n+ \"Unable to decode CCITTFaxDecode images\",\n+ )\n \n # Not all images can be transcoded to a PIL image, which\n # is what pyzbar expects to receive\n@@ -132,7 +137,7 @@\n separator_page_numbers.append(page_num)\n \n def _pdf2image_barcode_scan(pdf_filepath: str):\n- # use a temporary directory in case the file os too big to handle in memory\n+ # use a temporary directory in case the file is too big to handle in memory\n with tempfile.TemporaryDirectory() as path:\n pages_from_path = convert_from_path(pdf_filepath, output_folder=path)\n for current_page_number, page in enumerate(pages_from_path):\n@@ -150,20 +155,42 @@\n if mime_type == \"image/tiff\":\n pdf_filepath = convert_from_tiff_to_pdf(filepath)\n \n+ # Chose the scanner\n if settings.CONSUMER_USE_LEGACY_DETECTION:\n- _pdf2image_barcode_scan(pdf_filepath)\n+ logger.debug(\"Using pdf2image for barcodes\")\n+ scanner_function = _pdf2image_barcode_scan\n else:\n- try:\n- _pikepdf_barcode_scan(pdf_filepath)\n- except Exception as e:\n+ logger.debug(\"Using pikepdf for barcodes\")\n+ scanner_function = _pikepdf_barcode_scan\n \n- logger.warning(\n- f\"Exception using pikepdf for barcodes,\"\n- f\" falling back to pdf2image: {e}\",\n- )\n- # Reset this incase pikepdf got part way through\n+ # Run the scanner\n+ try:\n+ scanner_function(pdf_filepath)\n+ # Neither method can handle password protected PDFs without it being\n+ # provided. Log it and continue\n+ except (PasswordError, PDFPageCountError) as e:\n+ logger.warning(\n+ f\"File is likely password protected, not splitting: {e}\",\n+ )\n+ # Handle pikepdf related image decoding issues with a fallback\n+ except (BarcodeImageFormatError, HifiPrintImageNotTranscodableError) as e:\n+ logger.warning(\n+ f\"Falling back to pdf2image because: {e}\",\n+ )\n+ try:\n separator_page_numbers = []\n _pdf2image_barcode_scan(pdf_filepath)\n+ # This file is really borked, allow the consumption to continue\n+ # but it may fail further on\n+ except Exception as e: # pragma: no cover\n+ logger.warning(\n+ f\"Exception during barcode scanning: {e}\",\n+ )\n+ # We're not sure what happened, but allow the consumption to continue\n+ except Exception as e: # pragma: no cover\n+ logger.warning(\n+ f\"Exception during barcode scanning: {e}\",\n+ )\n \n else:\n logger.warning(\n", "issue": "[BUG] Barcode seperator processing fails with password protected PDFs\n### Description\n\nWith PAPERLESS_CONSUMER_ENABLE_BARCODES=true uploading a password protected PDF causes the file task to fail. It appears that the barcode separation stage fails with an error rather than skipping the document. The same document imports correctly with the above settings commented out.\r\n\r\nTraceback from the failed task:\r\n```python\r\n/tmp/paperless/paperless-upload-0bj7vn9g: invalid password : Traceback (most recent call last):\r\n File \"/usr/src/paperless/src/src/django-q/django_q/cluster.py\", line 454, in worker\r\n res = f(*task[\"args\"], **task[\"kwargs\"])\r\n File \"/usr/src/paperless/src/documents/tasks.py\", line 99, in consume_file\r\n pdf_filepath, separators = barcodes.scan_file_for_separating_barcodes(path)\r\n File \"/usr/src/paperless/src/documents/barcodes.py\", line 121, in scan_file_for_separating_barcodes\r\n pdf = Pdf.open(pdf_filepath)\r\n File \"/usr/local/lib/python3.9/site-packages/pikepdf/_methods.py\", line 791, in open\r\n pdf = Pdf._open(\r\npikepdf._qpdf.PasswordError: /tmp/paperless/paperless-upload-0bj7vn9g: invalid password\r\n```\n\n### Steps to reproduce\n\n1. Launch paperless with PAPERLESS_CONSUMER_ENABLE_BARCODES=true\r\n2. Upload password protected PDF\r\n3. Check the file tasks menu for failed jobs\n\n### Webserver logs\n\n_No response_\n\n### Paperless-ngx version\n\n1.9.2\n\n### Host OS\n\nFedora Server 36\n\n### Installation method\n\nOther (please describe above)\n\n### Browser\n\n_No response_\n\n### Configuration changes\n\n_No response_\n\n### Other\n\nOfficial docker image running under root podman\n", "before_files": [{"content": "import logging\nimport os\nimport shutil\nimport tempfile\nfrom functools import lru_cache\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\n\nimport magic\nfrom django.conf import settings\nfrom pdf2image import convert_from_path\nfrom pikepdf import Page\nfrom pikepdf import Pdf\nfrom pikepdf import PdfImage\nfrom PIL import Image\nfrom PIL import ImageSequence\nfrom pyzbar import pyzbar\n\nlogger = logging.getLogger(\"paperless.barcodes\")\n\n\nclass BarcodeImageFormatError(Exception):\n pass\n\n\n@lru_cache(maxsize=8)\ndef supported_file_type(mime_type) -> bool:\n \"\"\"\n Determines if the file is valid for barcode\n processing, based on MIME type and settings\n\n :return: True if the file is supported, False otherwise\n \"\"\"\n supported_mime = [\"application/pdf\"]\n if settings.CONSUMER_BARCODE_TIFF_SUPPORT:\n supported_mime += [\"image/tiff\"]\n\n return mime_type in supported_mime\n\n\ndef barcode_reader(image: Image) -> List[str]:\n \"\"\"\n Read any barcodes contained in image\n Returns a list containing all found barcodes\n \"\"\"\n barcodes = []\n # Decode the barcode image\n detected_barcodes = pyzbar.decode(image)\n\n if detected_barcodes:\n # Traverse through all the detected barcodes in image\n for barcode in detected_barcodes:\n if barcode.data:\n decoded_barcode = barcode.data.decode(\"utf-8\")\n barcodes.append(decoded_barcode)\n logger.debug(\n f\"Barcode of type {str(barcode.type)} found: {decoded_barcode}\",\n )\n return barcodes\n\n\ndef get_file_mime_type(path: str) -> str:\n \"\"\"\n Determines the file type, based on MIME type.\n\n Returns the MIME type.\n \"\"\"\n mime_type = magic.from_file(path, mime=True)\n logger.debug(f\"Detected mime type: {mime_type}\")\n return mime_type\n\n\ndef convert_from_tiff_to_pdf(filepath: str) -> str:\n \"\"\"\n converts a given TIFF image file to pdf into a temporary directory.\n\n Returns the new pdf file.\n \"\"\"\n file_name = os.path.splitext(os.path.basename(filepath))[0]\n mime_type = get_file_mime_type(filepath)\n tempdir = tempfile.mkdtemp(prefix=\"paperless-\", dir=settings.SCRATCH_DIR)\n # use old file name with pdf extension\n if mime_type == \"image/tiff\":\n newpath = os.path.join(tempdir, file_name + \".pdf\")\n else:\n logger.warning(\n f\"Cannot convert mime type {str(mime_type)} from {str(filepath)} to pdf.\",\n )\n return None\n with Image.open(filepath) as image:\n images = []\n for i, page in enumerate(ImageSequence.Iterator(image)):\n page = page.convert(\"RGB\")\n images.append(page)\n try:\n if len(images) == 1:\n images[0].save(newpath)\n else:\n images[0].save(newpath, save_all=True, append_images=images[1:])\n except OSError as e:\n logger.warning(\n f\"Could not save the file as pdf. Error: {str(e)}\",\n )\n return None\n return newpath\n\n\ndef scan_file_for_separating_barcodes(filepath: str) -> Tuple[Optional[str], List[int]]:\n \"\"\"\n Scan the provided pdf file for page separating barcodes\n Returns a PDF filepath and a list of pagenumbers,\n which separate the file into new files\n \"\"\"\n\n def _pikepdf_barcode_scan(pdf_filepath: str):\n with Pdf.open(pdf_filepath) as pdf:\n for page_num, page in enumerate(pdf.pages):\n for image_key in page.images:\n pdfimage = PdfImage(page.images[image_key])\n\n if \"/CCITTFaxDecode\" in pdfimage.filters:\n raise BarcodeImageFormatError()\n\n # Not all images can be transcoded to a PIL image, which\n # is what pyzbar expects to receive\n pillow_img = pdfimage.as_pil_image()\n\n detected_barcodes = barcode_reader(pillow_img)\n\n if settings.CONSUMER_BARCODE_STRING in detected_barcodes:\n separator_page_numbers.append(page_num)\n\n def _pdf2image_barcode_scan(pdf_filepath: str):\n # use a temporary directory in case the file os too big to handle in memory\n with tempfile.TemporaryDirectory() as path:\n pages_from_path = convert_from_path(pdf_filepath, output_folder=path)\n for current_page_number, page in enumerate(pages_from_path):\n current_barcodes = barcode_reader(page)\n if settings.CONSUMER_BARCODE_STRING in current_barcodes:\n separator_page_numbers.append(current_page_number)\n\n separator_page_numbers = []\n pdf_filepath = None\n\n mime_type = get_file_mime_type(filepath)\n\n if supported_file_type(mime_type):\n pdf_filepath = filepath\n if mime_type == \"image/tiff\":\n pdf_filepath = convert_from_tiff_to_pdf(filepath)\n\n if settings.CONSUMER_USE_LEGACY_DETECTION:\n _pdf2image_barcode_scan(pdf_filepath)\n else:\n try:\n _pikepdf_barcode_scan(pdf_filepath)\n except Exception as e:\n\n logger.warning(\n f\"Exception using pikepdf for barcodes,\"\n f\" falling back to pdf2image: {e}\",\n )\n # Reset this incase pikepdf got part way through\n separator_page_numbers = []\n _pdf2image_barcode_scan(pdf_filepath)\n\n else:\n logger.warning(\n f\"Unsupported file format for barcode reader: {str(mime_type)}\",\n )\n return pdf_filepath, separator_page_numbers\n\n\ndef separate_pages(filepath: str, pages_to_split_on: List[int]) -> List[str]:\n \"\"\"\n Separate the provided pdf file on the pages_to_split_on.\n The pages which are defined by page_numbers will be removed.\n Returns a list of (temporary) filepaths to consume.\n These will need to be deleted later.\n \"\"\"\n\n document_paths = []\n\n if not pages_to_split_on:\n logger.warning(\"No pages to split on!\")\n return document_paths\n\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n tempdir = tempfile.mkdtemp(prefix=\"paperless-\", dir=settings.SCRATCH_DIR)\n fname = os.path.splitext(os.path.basename(filepath))[0]\n pdf = Pdf.open(filepath)\n\n # A list of documents, ie a list of lists of pages\n documents: List[List[Page]] = []\n # A single document, ie a list of pages\n document: List[Page] = []\n\n for idx, page in enumerate(pdf.pages):\n # Keep building the new PDF as long as it is not a\n # separator index\n if idx not in pages_to_split_on:\n document.append(page)\n # Make sure to append the very last document to the documents\n if idx == (len(pdf.pages) - 1):\n documents.append(document)\n document = []\n else:\n # This is a split index, save the current PDF pages, and restart\n # a new destination page listing\n logger.debug(f\"Starting new document at idx {idx}\")\n documents.append(document)\n document = []\n\n documents = [x for x in documents if len(x)]\n\n logger.debug(f\"Split into {len(documents)} new documents\")\n\n # Write the new documents out\n for doc_idx, document in enumerate(documents):\n dst = Pdf.new()\n dst.pages.extend(document)\n\n output_filename = f\"{fname}_document_{doc_idx}.pdf\"\n\n logger.debug(f\"pdf no:{doc_idx} has {len(dst.pages)} pages\")\n savepath = os.path.join(tempdir, output_filename)\n with open(savepath, \"wb\") as out:\n dst.save(out)\n document_paths.append(savepath)\n\n return document_paths\n\n\ndef save_to_dir(\n filepath: str,\n newname: str = None,\n target_dir: str = settings.CONSUMPTION_DIR,\n):\n \"\"\"\n Copies filepath to target_dir.\n Optionally rename the file.\n \"\"\"\n if os.path.isfile(filepath) and os.path.isdir(target_dir):\n dst = shutil.copy(filepath, target_dir)\n logging.debug(f\"saved {str(filepath)} to {str(dst)}\")\n if newname:\n dst_new = os.path.join(target_dir, newname)\n logger.debug(f\"moving {str(dst)} to {str(dst_new)}\")\n os.rename(dst, dst_new)\n else:\n logger.warning(f\"{str(filepath)} or {str(target_dir)} don't exist.\")\n", "path": "src/documents/barcodes.py"}]}
3,449
837
gh_patches_debug_24756
rasdani/github-patches
git_diff
netbox-community__netbox-2290
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> deficiency in new webhook implementation <!-- Before opening a new issue, please search through the existing issues to see if your topic has already been addressed. Note that you may need to remove the "is:open" filter from the search bar to include closed issues. Check the appropriate type for your issue below by placing an x between the brackets. For assistance with installation issues, or for any other issues other than those listed below, please raise your topic for discussion on our mailing list: https://groups.google.com/forum/#!forum/netbox-discuss Please note that issues which do not fall under any of the below categories will be closed. Due to an excessive backlog of feature requests, we are not currently accepting any proposals which extend NetBox's feature scope. Do not prepend any sort of tag to your issue's title. An administrator will review your issue and assign labels as appropriate. ---> ### Issue type [ ] Feature request <!-- An enhancement of existing functionality --> [X] Bug report <!-- Unexpected or erroneous behavior --> [ ] Documentation <!-- A modification to the documentation --> <!-- Please describe the environment in which you are running NetBox. (Be sure to verify that you are running the latest stable release of NetBox before submitting a bug report.) If you are submitting a bug report and have made any changes to the code base, please first validate that your bug can be recreated while running an official release. --> ### Environment * Python version: python 2.7.5 * NetBox version: develop-2.4 <!-- BUG REPORTS must include: * A list of the steps needed for someone else to reproduce the bug * A description of the expected and observed behavior * Any relevant error messages (screenshots may also help) FEATURE REQUESTS must include: * A detailed description of the proposed functionality * A use case for the new feature * A rough description of any necessary changes to the database schema * Any relevant third-party libraries which would be needed --> ### Description Testing out the webhook implementation and discovered the following bug: when a model contains a custom field of type date the worker is unable to serialize the data for transmission ``` Traceback (most recent call last): File "/usr/lib/python2.7/site-packages/rq/worker.py", line 793, in perform_job rv = job.perform() File "/usr/lib/python2.7/site-packages/rq/job.py", line 599, in perform self._result = self._execute() File "/usr/lib/python2.7/site-packages/rq/job.py", line 605, in _execute return self.func(*self.args, **self.kwargs) File "/opt/netbox/netbox/extras/webhooks_worker.py", line 44, in process_webhook prepared_request = requests.Request(**params).prepare() File "/usr/lib/python2.7/site-packages/requests/models.py", line 259, in prepare hooks=self.hooks, File "/usr/lib/python2.7/site-packages/requests/models.py", line 307, in prepare self.prepare_body(data, files, json) File "/usr/lib/python2.7/site-packages/requests/models.py", line 427, in prepare_body body = json_dumps(json) File "/usr/lib64/python2.7/site-packages/simplejson/__init__.py", line 382, in dumps return _default_encoder.encode(obj) File "/usr/lib64/python2.7/site-packages/simplejson/encoder.py", line 291, in encode chunks = self.iterencode(o, _one_shot=True) File "/usr/lib64/python2.7/site-packages/simplejson/encoder.py", line 373, in iterencode return _iterencode(o, 0) File "/usr/lib64/python2.7/site-packages/simplejson/encoder.py", line 268, in default o.__class__.__name__) TypeError: Object of type date is not JSON serializable ``` </issue> <code> [start of netbox/extras/webhooks_worker.py] 1 import hashlib 2 import hmac 3 4 import requests 5 from django_rq import job 6 7 from extras.constants import WEBHOOK_CT_JSON, WEBHOOK_CT_X_WWW_FORM_ENCODED, OBJECTCHANGE_ACTION_CHOICES 8 9 10 @job('default') 11 def process_webhook(webhook, data, model_class, event, timestamp): 12 """ 13 Make a POST request to the defined Webhook 14 """ 15 payload = { 16 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event], 17 'timestamp': timestamp, 18 'model': model_class.__name__, 19 'data': data 20 } 21 headers = { 22 'Content-Type': webhook.get_http_content_type_display(), 23 } 24 params = { 25 'method': 'POST', 26 'url': webhook.payload_url, 27 'headers': headers 28 } 29 30 if webhook.http_content_type == WEBHOOK_CT_JSON: 31 params.update({'json': payload}) 32 elif webhook.http_content_type == WEBHOOK_CT_X_WWW_FORM_ENCODED: 33 params.update({'data': payload}) 34 35 prepared_request = requests.Request(**params).prepare() 36 37 if webhook.secret != '': 38 # sign the request with the secret 39 hmac_prep = hmac.new(bytearray(webhook.secret, 'utf8'), prepared_request.body, digestmod=hashlib.sha512) 40 prepared_request.headers['X-Hook-Signature'] = hmac_prep.hexdigest() 41 42 with requests.Session() as session: 43 session.verify = webhook.ssl_verification 44 response = session.send(prepared_request) 45 46 if response.status_code >= 200 and response.status_code <= 299: 47 return 'Status {} returned, webhook successfully processed.'.format(response.status_code) 48 else: 49 raise requests.exceptions.RequestException( 50 "Status {} returned, webhook FAILED to process.".format(response.status_code) 51 ) 52 [end of netbox/extras/webhooks_worker.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/netbox/extras/webhooks_worker.py b/netbox/extras/webhooks_worker.py --- a/netbox/extras/webhooks_worker.py +++ b/netbox/extras/webhooks_worker.py @@ -1,8 +1,10 @@ import hashlib import hmac - import requests +import json + from django_rq import job +from rest_framework.utils.encoders import JSONEncoder from extras.constants import WEBHOOK_CT_JSON, WEBHOOK_CT_X_WWW_FORM_ENCODED, OBJECTCHANGE_ACTION_CHOICES @@ -13,9 +15,9 @@ Make a POST request to the defined Webhook """ payload = { - 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event], + 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event].lower(), 'timestamp': timestamp, - 'model': model_class.__name__, + 'model': model_class._meta.model_name, 'data': data } headers = { @@ -28,7 +30,7 @@ } if webhook.http_content_type == WEBHOOK_CT_JSON: - params.update({'json': payload}) + params.update({'data': json.dumps(payload, cls=JSONEncoder)}) elif webhook.http_content_type == WEBHOOK_CT_X_WWW_FORM_ENCODED: params.update({'data': payload})
{"golden_diff": "diff --git a/netbox/extras/webhooks_worker.py b/netbox/extras/webhooks_worker.py\n--- a/netbox/extras/webhooks_worker.py\n+++ b/netbox/extras/webhooks_worker.py\n@@ -1,8 +1,10 @@\n import hashlib\n import hmac\n-\n import requests\n+import json\n+\n from django_rq import job\n+from rest_framework.utils.encoders import JSONEncoder\n \n from extras.constants import WEBHOOK_CT_JSON, WEBHOOK_CT_X_WWW_FORM_ENCODED, OBJECTCHANGE_ACTION_CHOICES\n \n@@ -13,9 +15,9 @@\n Make a POST request to the defined Webhook\n \"\"\"\n payload = {\n- 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event],\n+ 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event].lower(),\n 'timestamp': timestamp,\n- 'model': model_class.__name__,\n+ 'model': model_class._meta.model_name,\n 'data': data\n }\n headers = {\n@@ -28,7 +30,7 @@\n }\n \n if webhook.http_content_type == WEBHOOK_CT_JSON:\n- params.update({'json': payload})\n+ params.update({'data': json.dumps(payload, cls=JSONEncoder)})\n elif webhook.http_content_type == WEBHOOK_CT_X_WWW_FORM_ENCODED:\n params.update({'data': payload})\n", "issue": "deficiency in new webhook implementation\n<!--\r\n Before opening a new issue, please search through the existing issues to\r\n see if your topic has already been addressed. Note that you may need to\r\n remove the \"is:open\" filter from the search bar to include closed issues.\r\n\r\n Check the appropriate type for your issue below by placing an x between the\r\n brackets. For assistance with installation issues, or for any other issues\r\n other than those listed below, please raise your topic for discussion on\r\n our mailing list:\r\n\r\n https://groups.google.com/forum/#!forum/netbox-discuss\r\n\r\n Please note that issues which do not fall under any of the below categories\r\n will be closed. Due to an excessive backlog of feature requests, we are\r\n not currently accepting any proposals which extend NetBox's feature scope.\r\n\r\n Do not prepend any sort of tag to your issue's title. An administrator will\r\n review your issue and assign labels as appropriate.\r\n--->\r\n### Issue type\r\n[ ] Feature request <!-- An enhancement of existing functionality -->\r\n[X] Bug report <!-- Unexpected or erroneous behavior -->\r\n[ ] Documentation <!-- A modification to the documentation -->\r\n\r\n<!--\r\n Please describe the environment in which you are running NetBox. (Be sure\r\n to verify that you are running the latest stable release of NetBox before\r\n submitting a bug report.) If you are submitting a bug report and have made\r\n any changes to the code base, please first validate that your bug can be\r\n recreated while running an official release.\r\n-->\r\n### Environment\r\n* Python version: python 2.7.5\r\n* NetBox version: develop-2.4\r\n\r\n<!--\r\n BUG REPORTS must include:\r\n * A list of the steps needed for someone else to reproduce the bug\r\n * A description of the expected and observed behavior\r\n * Any relevant error messages (screenshots may also help)\r\n\r\n FEATURE REQUESTS must include:\r\n * A detailed description of the proposed functionality\r\n * A use case for the new feature\r\n * A rough description of any necessary changes to the database schema\r\n * Any relevant third-party libraries which would be needed\r\n-->\r\n### Description\r\nTesting out the webhook implementation and discovered the following bug:\r\n when a model contains a custom field of type date the worker is unable to serialize the data for transmission\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python2.7/site-packages/rq/worker.py\", line 793, in perform_job\r\n rv = job.perform()\r\n File \"/usr/lib/python2.7/site-packages/rq/job.py\", line 599, in perform\r\n self._result = self._execute()\r\n File \"/usr/lib/python2.7/site-packages/rq/job.py\", line 605, in _execute\r\n return self.func(*self.args, **self.kwargs)\r\n File \"/opt/netbox/netbox/extras/webhooks_worker.py\", line 44, in process_webhook\r\n prepared_request = requests.Request(**params).prepare()\r\n File \"/usr/lib/python2.7/site-packages/requests/models.py\", line 259, in prepare\r\n hooks=self.hooks,\r\n File \"/usr/lib/python2.7/site-packages/requests/models.py\", line 307, in prepare\r\n self.prepare_body(data, files, json)\r\n File \"/usr/lib/python2.7/site-packages/requests/models.py\", line 427, in prepare_body\r\n body = json_dumps(json)\r\n File \"/usr/lib64/python2.7/site-packages/simplejson/__init__.py\", line 382, in dumps\r\n return _default_encoder.encode(obj)\r\n File \"/usr/lib64/python2.7/site-packages/simplejson/encoder.py\", line 291, in encode\r\n chunks = self.iterencode(o, _one_shot=True)\r\n File \"/usr/lib64/python2.7/site-packages/simplejson/encoder.py\", line 373, in iterencode\r\n return _iterencode(o, 0)\r\n File \"/usr/lib64/python2.7/site-packages/simplejson/encoder.py\", line 268, in default\r\n o.__class__.__name__)\r\nTypeError: Object of type date is not JSON serializable\r\n```\n", "before_files": [{"content": "import hashlib\nimport hmac\n\nimport requests\nfrom django_rq import job\n\nfrom extras.constants import WEBHOOK_CT_JSON, WEBHOOK_CT_X_WWW_FORM_ENCODED, OBJECTCHANGE_ACTION_CHOICES\n\n\n@job('default')\ndef process_webhook(webhook, data, model_class, event, timestamp):\n \"\"\"\n Make a POST request to the defined Webhook\n \"\"\"\n payload = {\n 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event],\n 'timestamp': timestamp,\n 'model': model_class.__name__,\n 'data': data\n }\n headers = {\n 'Content-Type': webhook.get_http_content_type_display(),\n }\n params = {\n 'method': 'POST',\n 'url': webhook.payload_url,\n 'headers': headers\n }\n\n if webhook.http_content_type == WEBHOOK_CT_JSON:\n params.update({'json': payload})\n elif webhook.http_content_type == WEBHOOK_CT_X_WWW_FORM_ENCODED:\n params.update({'data': payload})\n\n prepared_request = requests.Request(**params).prepare()\n\n if webhook.secret != '':\n # sign the request with the secret\n hmac_prep = hmac.new(bytearray(webhook.secret, 'utf8'), prepared_request.body, digestmod=hashlib.sha512)\n prepared_request.headers['X-Hook-Signature'] = hmac_prep.hexdigest()\n\n with requests.Session() as session:\n session.verify = webhook.ssl_verification\n response = session.send(prepared_request)\n\n if response.status_code >= 200 and response.status_code <= 299:\n return 'Status {} returned, webhook successfully processed.'.format(response.status_code)\n else:\n raise requests.exceptions.RequestException(\n \"Status {} returned, webhook FAILED to process.\".format(response.status_code)\n )\n", "path": "netbox/extras/webhooks_worker.py"}]}
1,916
287
gh_patches_debug_17598
rasdani/github-patches
git_diff
archlinux__archinstall-469
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> cmd_output is undeclared in luks.py but is used when raising an exception ``` if cmd_handle.exit_code != 0: raise DiskError(f'Could not encrypt volume "{partition.path}": {cmd_output}') ``` This will need to be written get the output from the cmd_handle instance of the SysCommand class, or else if this is thrown the output won't be displayed, and we'll instead get an undeclared variable error. </issue> <code> [start of archinstall/lib/luks.py] 1 import pathlib 2 3 from .disk import Partition 4 from .general import * 5 from .output import log 6 7 8 class luks2: 9 def __init__(self, partition, mountpoint, password, key_file=None, auto_unmount=False, *args, **kwargs): 10 self.password = password 11 self.partition = partition 12 self.mountpoint = mountpoint 13 self.args = args 14 self.kwargs = kwargs 15 self.key_file = key_file 16 self.auto_unmount = auto_unmount 17 self.filesystem = 'crypto_LUKS' 18 self.mapdev = None 19 20 def __enter__(self): 21 # if self.partition.allow_formatting: 22 # self.key_file = self.encrypt(self.partition, *self.args, **self.kwargs) 23 # else: 24 if not self.key_file: 25 self.key_file = f"/tmp/{os.path.basename(self.partition.path)}.disk_pw" # TODO: Make disk-pw-file randomly unique? 26 27 if type(self.password) != bytes: 28 self.password = bytes(self.password, 'UTF-8') 29 30 with open(self.key_file, 'wb') as fh: 31 fh.write(self.password) 32 33 return self.unlock(self.partition, self.mountpoint, self.key_file) 34 35 def __exit__(self, *args, **kwargs): 36 # TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager 37 if self.auto_unmount: 38 self.close() 39 40 if len(args) >= 2 and args[1]: 41 raise args[1] 42 return True 43 44 def encrypt(self, partition, password=None, key_size=512, hash_type='sha512', iter_time=10000, key_file=None): 45 if not self.partition.allow_formatting: 46 raise DiskError(f'Could not encrypt volume {self.partition} due to it having a formatting lock.') 47 48 log(f'Encrypting {partition} (This might take a while)', level=logging.INFO) 49 50 if not key_file: 51 if self.key_file: 52 key_file = self.key_file 53 else: 54 key_file = f"/tmp/{os.path.basename(self.partition.path)}.disk_pw" # TODO: Make disk-pw-file randomly unique? 55 56 if not password: 57 password = self.password 58 59 if type(password) != bytes: 60 password = bytes(password, 'UTF-8') 61 62 with open(key_file, 'wb') as fh: 63 fh.write(password) 64 65 cryptsetup_args = shlex.join([ 66 '/usr/bin/cryptsetup', 67 '--batch-mode', 68 '--verbose', 69 '--type', 'luks2', 70 '--pbkdf', 'argon2id', 71 '--hash', hash_type, 72 '--key-size', str(key_size), 73 '--iter-time', str(iter_time), 74 '--key-file', os.path.abspath(key_file), 75 '--use-urandom', 76 'luksFormat', partition.path, 77 ]) 78 79 try: 80 # Try to setup the crypt-device 81 cmd_handle = SysCommand(cryptsetup_args) 82 except SysCallError as err: 83 if err.exit_code == 256: 84 log(f'{partition} is being used, trying to unmount and crypt-close the device and running one more attempt at encrypting the device.', level=logging.DEBUG) 85 # Partition was in use, unmount it and try again 86 partition.unmount() 87 88 # Get crypt-information about the device by doing a reverse lookup starting with the partition path 89 # For instance: /dev/sda 90 devinfo = json.loads(b''.join(SysCommand(f"lsblk --fs -J {partition.path}")).decode('UTF-8'))['blockdevices'][0] 91 92 # For each child (sub-partition/sub-device) 93 if len(children := devinfo.get('children', [])): 94 for child in children: 95 # Unmount the child location 96 if child_mountpoint := child.get('mountpoint', None): 97 log(f'Unmounting {child_mountpoint}', level=logging.DEBUG) 98 SysCommand(f"umount -R {child_mountpoint}") 99 100 # And close it if possible. 101 log(f"Closing crypt device {child['name']}", level=logging.DEBUG) 102 SysCommand(f"cryptsetup close {child['name']}") 103 104 # Then try again to set up the crypt-device 105 cmd_handle = SysCommand(cryptsetup_args) 106 else: 107 raise err 108 109 if cmd_handle.exit_code != 0: 110 raise DiskError(f'Could not encrypt volume "{partition.path}": {cmd_output}') 111 112 return key_file 113 114 def unlock(self, partition, mountpoint, key_file): 115 """ 116 Mounts a luks2 compatible partition to a certain mountpoint. 117 Keyfile must be specified as there's no way to interact with the pw-prompt atm. 118 119 :param mountpoint: The name without absolute path, for instance "luksdev" will point to /dev/mapper/luksdev 120 :type mountpoint: str 121 """ 122 from .disk import get_filesystem_type 123 124 if '/' in mountpoint: 125 os.path.basename(mountpoint) # TODO: Raise exception instead? 126 127 wait_timer = time.time() 128 while pathlib.Path(partition.path).exists() is False and time.time() - wait_timer < 10: 129 time.sleep(0.025) 130 131 SysCommand(f'/usr/bin/cryptsetup open {partition.path} {mountpoint} --key-file {os.path.abspath(key_file)} --type luks2') 132 if os.path.islink(f'/dev/mapper/{mountpoint}'): 133 self.mapdev = f'/dev/mapper/{mountpoint}' 134 unlocked_partition = Partition(self.mapdev, None, encrypted=True, filesystem=get_filesystem_type(self.mapdev), autodetect_filesystem=False) 135 unlocked_partition.allow_formatting = self.partition.allow_formatting 136 return unlocked_partition 137 138 def close(self, mountpoint=None): 139 if not mountpoint: 140 mountpoint = self.mapdev 141 142 SysCommand(f'/usr/bin/cryptsetup close {self.mapdev}') 143 return os.path.islink(self.mapdev) is False 144 145 def format(self, path): 146 if (handle := SysCommand(f"/usr/bin/cryptsetup -q -v luksErase {path}")).exit_code != 0: 147 raise DiskError(f'Could not format {path} with {self.filesystem} because: {b"".join(handle)}') 148 [end of archinstall/lib/luks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/archinstall/lib/luks.py b/archinstall/lib/luks.py --- a/archinstall/lib/luks.py +++ b/archinstall/lib/luks.py @@ -43,7 +43,7 @@ def encrypt(self, partition, password=None, key_size=512, hash_type='sha512', iter_time=10000, key_file=None): if not self.partition.allow_formatting: - raise DiskError(f'Could not encrypt volume {self.partition} due to it having a formatting lock.') + raise DiskError(f'Could not encrypt volume {partition} due to it having a formatting lock.') log(f'Encrypting {partition} (This might take a while)', level=logging.INFO) @@ -107,7 +107,7 @@ raise err if cmd_handle.exit_code != 0: - raise DiskError(f'Could not encrypt volume "{partition.path}": {cmd_output}') + raise DiskError(f'Could not encrypt volume "{partition.path}": {b"".join(cmd_handle)}') return key_file
{"golden_diff": "diff --git a/archinstall/lib/luks.py b/archinstall/lib/luks.py\n--- a/archinstall/lib/luks.py\n+++ b/archinstall/lib/luks.py\n@@ -43,7 +43,7 @@\n \n \tdef encrypt(self, partition, password=None, key_size=512, hash_type='sha512', iter_time=10000, key_file=None):\n \t\tif not self.partition.allow_formatting:\n-\t\t\traise DiskError(f'Could not encrypt volume {self.partition} due to it having a formatting lock.')\n+\t\t\traise DiskError(f'Could not encrypt volume {partition} due to it having a formatting lock.')\n \n \t\tlog(f'Encrypting {partition} (This might take a while)', level=logging.INFO)\n \n@@ -107,7 +107,7 @@\n \t\t\t\traise err\n \n \t\tif cmd_handle.exit_code != 0:\n-\t\t\traise DiskError(f'Could not encrypt volume \"{partition.path}\": {cmd_output}')\n+\t\t\traise DiskError(f'Could not encrypt volume \"{partition.path}\": {b\"\".join(cmd_handle)}')\n \n \t\treturn key_file\n", "issue": "cmd_output is undeclared in luks.py but is used when raising an exception\n```\r\n\t\tif cmd_handle.exit_code != 0:\r\n\t\t\traise DiskError(f'Could not encrypt volume \"{partition.path}\": {cmd_output}')\r\n```\r\n\r\nThis will need to be written get the output from the cmd_handle instance of the SysCommand class, or else if this is thrown the output won't be displayed, and we'll instead get an undeclared variable error.\n", "before_files": [{"content": "import pathlib\n\nfrom .disk import Partition\nfrom .general import *\nfrom .output import log\n\n\nclass luks2:\n\tdef __init__(self, partition, mountpoint, password, key_file=None, auto_unmount=False, *args, **kwargs):\n\t\tself.password = password\n\t\tself.partition = partition\n\t\tself.mountpoint = mountpoint\n\t\tself.args = args\n\t\tself.kwargs = kwargs\n\t\tself.key_file = key_file\n\t\tself.auto_unmount = auto_unmount\n\t\tself.filesystem = 'crypto_LUKS'\n\t\tself.mapdev = None\n\n\tdef __enter__(self):\n\t\t# if self.partition.allow_formatting:\n\t\t# \tself.key_file = self.encrypt(self.partition, *self.args, **self.kwargs)\n\t\t# else:\n\t\tif not self.key_file:\n\t\t\tself.key_file = f\"/tmp/{os.path.basename(self.partition.path)}.disk_pw\" # TODO: Make disk-pw-file randomly unique?\n\n\t\tif type(self.password) != bytes:\n\t\t\tself.password = bytes(self.password, 'UTF-8')\n\n\t\twith open(self.key_file, 'wb') as fh:\n\t\t\tfh.write(self.password)\n\n\t\treturn self.unlock(self.partition, self.mountpoint, self.key_file)\n\n\tdef __exit__(self, *args, **kwargs):\n\t\t# TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager\n\t\tif self.auto_unmount:\n\t\t\tself.close()\n\n\t\tif len(args) >= 2 and args[1]:\n\t\t\traise args[1]\n\t\treturn True\n\n\tdef encrypt(self, partition, password=None, key_size=512, hash_type='sha512', iter_time=10000, key_file=None):\n\t\tif not self.partition.allow_formatting:\n\t\t\traise DiskError(f'Could not encrypt volume {self.partition} due to it having a formatting lock.')\n\n\t\tlog(f'Encrypting {partition} (This might take a while)', level=logging.INFO)\n\n\t\tif not key_file:\n\t\t\tif self.key_file:\n\t\t\t\tkey_file = self.key_file\n\t\t\telse:\n\t\t\t\tkey_file = f\"/tmp/{os.path.basename(self.partition.path)}.disk_pw\" # TODO: Make disk-pw-file randomly unique?\n\n\t\tif not password:\n\t\t\tpassword = self.password\n\n\t\tif type(password) != bytes:\n\t\t\tpassword = bytes(password, 'UTF-8')\n\n\t\twith open(key_file, 'wb') as fh:\n\t\t\tfh.write(password)\n\n\t\tcryptsetup_args = shlex.join([\n\t\t\t'/usr/bin/cryptsetup',\n\t\t\t'--batch-mode',\n\t\t\t'--verbose',\n\t\t\t'--type', 'luks2',\n\t\t\t'--pbkdf', 'argon2id',\n\t\t\t'--hash', hash_type,\n\t\t\t'--key-size', str(key_size),\n\t\t\t'--iter-time', str(iter_time),\n\t\t\t'--key-file', os.path.abspath(key_file),\n\t\t\t'--use-urandom',\n\t\t\t'luksFormat', partition.path,\n\t\t])\n\n\t\ttry:\n\t\t\t# Try to setup the crypt-device\n\t\t\tcmd_handle = SysCommand(cryptsetup_args)\n\t\texcept SysCallError as err:\n\t\t\tif err.exit_code == 256:\n\t\t\t\tlog(f'{partition} is being used, trying to unmount and crypt-close the device and running one more attempt at encrypting the device.', level=logging.DEBUG)\n\t\t\t\t# Partition was in use, unmount it and try again\n\t\t\t\tpartition.unmount()\n\n\t\t\t\t# Get crypt-information about the device by doing a reverse lookup starting with the partition path\n\t\t\t\t# For instance: /dev/sda\n\t\t\t\tdevinfo = json.loads(b''.join(SysCommand(f\"lsblk --fs -J {partition.path}\")).decode('UTF-8'))['blockdevices'][0]\n\n\t\t\t\t# For each child (sub-partition/sub-device)\n\t\t\t\tif len(children := devinfo.get('children', [])):\n\t\t\t\t\tfor child in children:\n\t\t\t\t\t\t# Unmount the child location\n\t\t\t\t\t\tif child_mountpoint := child.get('mountpoint', None):\n\t\t\t\t\t\t\tlog(f'Unmounting {child_mountpoint}', level=logging.DEBUG)\n\t\t\t\t\t\t\tSysCommand(f\"umount -R {child_mountpoint}\")\n\n\t\t\t\t\t\t# And close it if possible.\n\t\t\t\t\t\tlog(f\"Closing crypt device {child['name']}\", level=logging.DEBUG)\n\t\t\t\t\t\tSysCommand(f\"cryptsetup close {child['name']}\")\n\n\t\t\t\t# Then try again to set up the crypt-device\n\t\t\t\tcmd_handle = SysCommand(cryptsetup_args)\n\t\t\telse:\n\t\t\t\traise err\n\n\t\tif cmd_handle.exit_code != 0:\n\t\t\traise DiskError(f'Could not encrypt volume \"{partition.path}\": {cmd_output}')\n\n\t\treturn key_file\n\n\tdef unlock(self, partition, mountpoint, key_file):\n\t\t\"\"\"\n\t\tMounts a luks2 compatible partition to a certain mountpoint.\n\t\tKeyfile must be specified as there's no way to interact with the pw-prompt atm.\n\n\t\t:param mountpoint: The name without absolute path, for instance \"luksdev\" will point to /dev/mapper/luksdev\n\t\t:type mountpoint: str\n\t\t\"\"\"\n\t\tfrom .disk import get_filesystem_type\n\n\t\tif '/' in mountpoint:\n\t\t\tos.path.basename(mountpoint) # TODO: Raise exception instead?\n\n\t\twait_timer = time.time()\n\t\twhile pathlib.Path(partition.path).exists() is False and time.time() - wait_timer < 10:\n\t\t\ttime.sleep(0.025)\n\n\t\tSysCommand(f'/usr/bin/cryptsetup open {partition.path} {mountpoint} --key-file {os.path.abspath(key_file)} --type luks2')\n\t\tif os.path.islink(f'/dev/mapper/{mountpoint}'):\n\t\t\tself.mapdev = f'/dev/mapper/{mountpoint}'\n\t\t\tunlocked_partition = Partition(self.mapdev, None, encrypted=True, filesystem=get_filesystem_type(self.mapdev), autodetect_filesystem=False)\n\t\t\tunlocked_partition.allow_formatting = self.partition.allow_formatting\n\t\t\treturn unlocked_partition\n\n\tdef close(self, mountpoint=None):\n\t\tif not mountpoint:\n\t\t\tmountpoint = self.mapdev\n\n\t\tSysCommand(f'/usr/bin/cryptsetup close {self.mapdev}')\n\t\treturn os.path.islink(self.mapdev) is False\n\n\tdef format(self, path):\n\t\tif (handle := SysCommand(f\"/usr/bin/cryptsetup -q -v luksErase {path}\")).exit_code != 0:\n\t\t\traise DiskError(f'Could not format {path} with {self.filesystem} because: {b\"\".join(handle)}')\n", "path": "archinstall/lib/luks.py"}]}
2,415
245
gh_patches_debug_31702
rasdani/github-patches
git_diff
napari__napari-6821
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Test vectors # Description This PR adds tests for the vectors layer, improves the doc strings and cleans up the code a tiny bit, but doesn't change any functionality. One question is - should the input parameters be `edge_width`, `edge_color`, and `length` for the width, color, and the multiplicative length factor for the vectors or should they be something else. They used to just be `width`, `color`, and `length` but I added `edge_` to make the parameters the same as for the `points` and `shapes` layer, though you could argue that for the `points` layer the parameters do different things and that in the vectors layer we don't have a `face` and an `edge` so it is just confusing. I'm open to suggestions - personally I like the consistency - but we can change it. Thoughts @bryantChhun @kevinyamauchi @jni? ## Type of change <!-- Please delete options that are not relevant. --> - [x] Bug-fix (non-breaking change which fixes an issue) # How has this been tested? <!-- Please describe the tests that you ran to verify your changes. --> - [x] adds `napari/layers/vectors/tests/test_vectors.py` ## Final checklist: - [x] My PR is the minimum possible work for the desired functionality - [x] I have commented my code, particularly in hard-to-understand areas - [x] I have made corresponding changes to the documentation - [x] I have added tests that prove my fix is effective or that my feature works </issue> <code> [start of napari/_app_model/actions/_view_actions.py] 1 """Actions related to the 'View' menu that do not require Qt. 2 3 View actions that do require Qt should go in 4 `napari/_qt/_qapp_model/qactions/_view.py`. 5 """ 6 7 from app_model.types import Action, ToggleRule 8 9 from napari._app_model.actions._toggle_action import ViewerToggleAction 10 from napari._app_model.constants import CommandId, MenuGroup, MenuId 11 from napari.settings import get_settings 12 13 VIEW_ACTIONS: list[Action] = [] 14 MENUID_DICT = {'axes': MenuId.VIEW_AXES, 'scale_bar': MenuId.VIEW_SCALEBAR} 15 16 for cmd, viewer_attr, sub_attr in ( 17 (CommandId.TOGGLE_VIEWER_AXES, 'axes', 'visible'), 18 (CommandId.TOGGLE_VIEWER_AXES_COLORED, 'axes', 'colored'), 19 (CommandId.TOGGLE_VIEWER_AXES_LABELS, 'axes', 'labels'), 20 (CommandId.TOGGLE_VIEWER_AXES_DASHED, 'axes', 'dashed'), 21 (CommandId.TOGGLE_VIEWER_AXES_ARROWS, 'axes', 'arrows'), 22 (CommandId.TOGGLE_VIEWER_SCALE_BAR, 'scale_bar', 'visible'), 23 (CommandId.TOGGLE_VIEWER_SCALE_BAR_COLORED, 'scale_bar', 'colored'), 24 (CommandId.TOGGLE_VIEWER_SCALE_BAR_TICKS, 'scale_bar', 'ticks'), 25 ): 26 VIEW_ACTIONS.append( 27 ViewerToggleAction( 28 id=cmd, 29 title=cmd.command_title, 30 viewer_attribute=viewer_attr, 31 sub_attribute=sub_attr, 32 menus=[{'id': MENUID_DICT[viewer_attr]}], 33 ) 34 ) 35 36 37 def _tooltip_visibility_toggle() -> None: 38 settings = get_settings().appearance 39 settings.layer_tooltip_visibility = not settings.layer_tooltip_visibility 40 41 42 def _get_current_tooltip_visibility() -> bool: 43 return get_settings().appearance.layer_tooltip_visibility 44 45 46 VIEW_ACTIONS.extend( 47 [ 48 # TODO: this could be made into a toggle setting Action subclass 49 # using a similar pattern to the above ViewerToggleAction classes 50 Action( 51 id=CommandId.TOGGLE_LAYER_TOOLTIPS, 52 title=CommandId.TOGGLE_LAYER_TOOLTIPS.command_title, 53 menus=[ 54 { 55 'id': MenuId.MENUBAR_VIEW, 56 'group': MenuGroup.RENDER, 57 'order': 10, 58 } 59 ], 60 callback=_tooltip_visibility_toggle, 61 toggled=ToggleRule(get_current=_get_current_tooltip_visibility), 62 ), 63 ] 64 ) 65 [end of napari/_app_model/actions/_view_actions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/napari/_app_model/actions/_view_actions.py b/napari/_app_model/actions/_view_actions.py deleted file mode 100644 --- a/napari/_app_model/actions/_view_actions.py +++ /dev/null @@ -1,64 +0,0 @@ -"""Actions related to the 'View' menu that do not require Qt. - -View actions that do require Qt should go in -`napari/_qt/_qapp_model/qactions/_view.py`. -""" - -from app_model.types import Action, ToggleRule - -from napari._app_model.actions._toggle_action import ViewerToggleAction -from napari._app_model.constants import CommandId, MenuGroup, MenuId -from napari.settings import get_settings - -VIEW_ACTIONS: list[Action] = [] -MENUID_DICT = {'axes': MenuId.VIEW_AXES, 'scale_bar': MenuId.VIEW_SCALEBAR} - -for cmd, viewer_attr, sub_attr in ( - (CommandId.TOGGLE_VIEWER_AXES, 'axes', 'visible'), - (CommandId.TOGGLE_VIEWER_AXES_COLORED, 'axes', 'colored'), - (CommandId.TOGGLE_VIEWER_AXES_LABELS, 'axes', 'labels'), - (CommandId.TOGGLE_VIEWER_AXES_DASHED, 'axes', 'dashed'), - (CommandId.TOGGLE_VIEWER_AXES_ARROWS, 'axes', 'arrows'), - (CommandId.TOGGLE_VIEWER_SCALE_BAR, 'scale_bar', 'visible'), - (CommandId.TOGGLE_VIEWER_SCALE_BAR_COLORED, 'scale_bar', 'colored'), - (CommandId.TOGGLE_VIEWER_SCALE_BAR_TICKS, 'scale_bar', 'ticks'), -): - VIEW_ACTIONS.append( - ViewerToggleAction( - id=cmd, - title=cmd.command_title, - viewer_attribute=viewer_attr, - sub_attribute=sub_attr, - menus=[{'id': MENUID_DICT[viewer_attr]}], - ) - ) - - -def _tooltip_visibility_toggle() -> None: - settings = get_settings().appearance - settings.layer_tooltip_visibility = not settings.layer_tooltip_visibility - - -def _get_current_tooltip_visibility() -> bool: - return get_settings().appearance.layer_tooltip_visibility - - -VIEW_ACTIONS.extend( - [ - # TODO: this could be made into a toggle setting Action subclass - # using a similar pattern to the above ViewerToggleAction classes - Action( - id=CommandId.TOGGLE_LAYER_TOOLTIPS, - title=CommandId.TOGGLE_LAYER_TOOLTIPS.command_title, - menus=[ - { - 'id': MenuId.MENUBAR_VIEW, - 'group': MenuGroup.RENDER, - 'order': 10, - } - ], - callback=_tooltip_visibility_toggle, - toggled=ToggleRule(get_current=_get_current_tooltip_visibility), - ), - ] -)
{"golden_diff": "diff --git a/napari/_app_model/actions/_view_actions.py b/napari/_app_model/actions/_view_actions.py\ndeleted file mode 100644\n--- a/napari/_app_model/actions/_view_actions.py\n+++ /dev/null\n@@ -1,64 +0,0 @@\n-\"\"\"Actions related to the 'View' menu that do not require Qt.\n-\n-View actions that do require Qt should go in\n-`napari/_qt/_qapp_model/qactions/_view.py`.\n-\"\"\"\n-\n-from app_model.types import Action, ToggleRule\n-\n-from napari._app_model.actions._toggle_action import ViewerToggleAction\n-from napari._app_model.constants import CommandId, MenuGroup, MenuId\n-from napari.settings import get_settings\n-\n-VIEW_ACTIONS: list[Action] = []\n-MENUID_DICT = {'axes': MenuId.VIEW_AXES, 'scale_bar': MenuId.VIEW_SCALEBAR}\n-\n-for cmd, viewer_attr, sub_attr in (\n- (CommandId.TOGGLE_VIEWER_AXES, 'axes', 'visible'),\n- (CommandId.TOGGLE_VIEWER_AXES_COLORED, 'axes', 'colored'),\n- (CommandId.TOGGLE_VIEWER_AXES_LABELS, 'axes', 'labels'),\n- (CommandId.TOGGLE_VIEWER_AXES_DASHED, 'axes', 'dashed'),\n- (CommandId.TOGGLE_VIEWER_AXES_ARROWS, 'axes', 'arrows'),\n- (CommandId.TOGGLE_VIEWER_SCALE_BAR, 'scale_bar', 'visible'),\n- (CommandId.TOGGLE_VIEWER_SCALE_BAR_COLORED, 'scale_bar', 'colored'),\n- (CommandId.TOGGLE_VIEWER_SCALE_BAR_TICKS, 'scale_bar', 'ticks'),\n-):\n- VIEW_ACTIONS.append(\n- ViewerToggleAction(\n- id=cmd,\n- title=cmd.command_title,\n- viewer_attribute=viewer_attr,\n- sub_attribute=sub_attr,\n- menus=[{'id': MENUID_DICT[viewer_attr]}],\n- )\n- )\n-\n-\n-def _tooltip_visibility_toggle() -> None:\n- settings = get_settings().appearance\n- settings.layer_tooltip_visibility = not settings.layer_tooltip_visibility\n-\n-\n-def _get_current_tooltip_visibility() -> bool:\n- return get_settings().appearance.layer_tooltip_visibility\n-\n-\n-VIEW_ACTIONS.extend(\n- [\n- # TODO: this could be made into a toggle setting Action subclass\n- # using a similar pattern to the above ViewerToggleAction classes\n- Action(\n- id=CommandId.TOGGLE_LAYER_TOOLTIPS,\n- title=CommandId.TOGGLE_LAYER_TOOLTIPS.command_title,\n- menus=[\n- {\n- 'id': MenuId.MENUBAR_VIEW,\n- 'group': MenuGroup.RENDER,\n- 'order': 10,\n- }\n- ],\n- callback=_tooltip_visibility_toggle,\n- toggled=ToggleRule(get_current=_get_current_tooltip_visibility),\n- ),\n- ]\n-)\n", "issue": "Test vectors\n# Description\r\nThis PR adds tests for the vectors layer, improves the doc strings and cleans up the code a tiny bit, but doesn't change any functionality.\r\n\r\nOne question is - should the input parameters be \r\n`edge_width`, `edge_color`, and `length` for the width, color, and the multiplicative length factor for the vectors or should they be something else. They used to just be `width`, `color`, and `length` but I added `edge_` to make the parameters the same as for the `points` and `shapes` layer, though you could argue that for the `points` layer the parameters do different things and that in the vectors layer we don't have a `face` and an `edge` so it is just confusing. I'm open to suggestions - personally I like the consistency - but we can change it. Thoughts @bryantChhun @kevinyamauchi @jni?\r\n \r\n## Type of change\r\n<!-- Please delete options that are not relevant. -->\r\n- [x] Bug-fix (non-breaking change which fixes an issue)\r\n\r\n# How has this been tested?\r\n<!-- Please describe the tests that you ran to verify your changes. -->\r\n- [x] adds `napari/layers/vectors/tests/test_vectors.py`\r\n\r\n## Final checklist:\r\n- [x] My PR is the minimum possible work for the desired functionality\r\n- [x] I have commented my code, particularly in hard-to-understand areas\r\n- [x] I have made corresponding changes to the documentation\r\n- [x] I have added tests that prove my fix is effective or that my feature works\r\n\n", "before_files": [{"content": "\"\"\"Actions related to the 'View' menu that do not require Qt.\n\nView actions that do require Qt should go in\n`napari/_qt/_qapp_model/qactions/_view.py`.\n\"\"\"\n\nfrom app_model.types import Action, ToggleRule\n\nfrom napari._app_model.actions._toggle_action import ViewerToggleAction\nfrom napari._app_model.constants import CommandId, MenuGroup, MenuId\nfrom napari.settings import get_settings\n\nVIEW_ACTIONS: list[Action] = []\nMENUID_DICT = {'axes': MenuId.VIEW_AXES, 'scale_bar': MenuId.VIEW_SCALEBAR}\n\nfor cmd, viewer_attr, sub_attr in (\n (CommandId.TOGGLE_VIEWER_AXES, 'axes', 'visible'),\n (CommandId.TOGGLE_VIEWER_AXES_COLORED, 'axes', 'colored'),\n (CommandId.TOGGLE_VIEWER_AXES_LABELS, 'axes', 'labels'),\n (CommandId.TOGGLE_VIEWER_AXES_DASHED, 'axes', 'dashed'),\n (CommandId.TOGGLE_VIEWER_AXES_ARROWS, 'axes', 'arrows'),\n (CommandId.TOGGLE_VIEWER_SCALE_BAR, 'scale_bar', 'visible'),\n (CommandId.TOGGLE_VIEWER_SCALE_BAR_COLORED, 'scale_bar', 'colored'),\n (CommandId.TOGGLE_VIEWER_SCALE_BAR_TICKS, 'scale_bar', 'ticks'),\n):\n VIEW_ACTIONS.append(\n ViewerToggleAction(\n id=cmd,\n title=cmd.command_title,\n viewer_attribute=viewer_attr,\n sub_attribute=sub_attr,\n menus=[{'id': MENUID_DICT[viewer_attr]}],\n )\n )\n\n\ndef _tooltip_visibility_toggle() -> None:\n settings = get_settings().appearance\n settings.layer_tooltip_visibility = not settings.layer_tooltip_visibility\n\n\ndef _get_current_tooltip_visibility() -> bool:\n return get_settings().appearance.layer_tooltip_visibility\n\n\nVIEW_ACTIONS.extend(\n [\n # TODO: this could be made into a toggle setting Action subclass\n # using a similar pattern to the above ViewerToggleAction classes\n Action(\n id=CommandId.TOGGLE_LAYER_TOOLTIPS,\n title=CommandId.TOGGLE_LAYER_TOOLTIPS.command_title,\n menus=[\n {\n 'id': MenuId.MENUBAR_VIEW,\n 'group': MenuGroup.RENDER,\n 'order': 10,\n }\n ],\n callback=_tooltip_visibility_toggle,\n toggled=ToggleRule(get_current=_get_current_tooltip_visibility),\n ),\n ]\n)\n", "path": "napari/_app_model/actions/_view_actions.py"}]}
1,548
660
gh_patches_debug_10596
rasdani/github-patches
git_diff
xonsh__xonsh-1630
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Autocomplete: don't display full path Using `prompt_kit`, when completing a path such as `/var/log/<TAB>`, the autocompletion menu shows fully prefixed entries. The problem is that when the path is very deep, the autocomplete panel starts to give unreadable results (see attached screenshot). The proposed solution is to only display the `os.path.basename` of the autocompleted path, suffixed by `/` if it is a directory. ![screenshot from 2016-05-19 10-33-09](https://cloud.githubusercontent.com/assets/14598/15397392/1170b2b8-1dae-11e6-85b4-ea31504f9346.png) </issue> <code> [start of xonsh/ptk/completer.py] 1 # -*- coding: utf-8 -*- 2 """Completer implementation to use with prompt_toolkit.""" 3 import os 4 import builtins 5 6 from prompt_toolkit.layout.dimension import LayoutDimension 7 from prompt_toolkit.completion import Completer, Completion 8 9 10 class PromptToolkitCompleter(Completer): 11 """Simple prompt_toolkit Completer object. 12 13 It just redirects requests to normal Xonsh completer. 14 """ 15 16 def __init__(self, completer, ctx): 17 """Takes instance of xonsh.completer.Completer and dict with context.""" 18 self.completer = completer 19 self.ctx = ctx 20 21 def get_completions(self, document, complete_event): 22 """Returns a generator for list of completions.""" 23 24 # Only generate completions when the user hits tab. 25 if complete_event.completion_requested: 26 if self.completer is None: 27 yield from [] 28 else: 29 line = document.current_line.lstrip() 30 endidx = document.cursor_position_col 31 begidx = line[:endidx].rfind(' ') + 1 if line[:endidx].rfind(' ') >= 0 else 0 32 prefix = line[begidx:endidx] 33 line = builtins.aliases.expand_alias(line) 34 completions, l = self.completer.complete(prefix, 35 line, 36 begidx, 37 endidx, 38 self.ctx) 39 if len(completions) <= 1: 40 pass 41 elif len(os.path.commonprefix(completions)) <= len(prefix): 42 self.reserve_space() 43 for comp in completions: 44 yield Completion(comp, -l) 45 46 def reserve_space(self): 47 cli = builtins.__xonsh_shell__.shell.prompter.cli 48 window = cli.application.layout.children[0].content.children[1] 49 50 if window and window.render_info: 51 h = window.render_info.content_height 52 r = builtins.__xonsh_env__.get('COMPLETIONS_MENU_ROWS') 53 size = h + r 54 55 def comp_height(cli): 56 # If there is an autocompletion menu to be shown, make sure that o 57 # layout has at least a minimal height in order to display it. 58 if not cli.is_done: 59 return LayoutDimension(min=size) 60 else: 61 return LayoutDimension() 62 window._height = comp_height 63 [end of xonsh/ptk/completer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/xonsh/ptk/completer.py b/xonsh/ptk/completer.py --- a/xonsh/ptk/completer.py +++ b/xonsh/ptk/completer.py @@ -40,7 +40,11 @@ pass elif len(os.path.commonprefix(completions)) <= len(prefix): self.reserve_space() + prefix, _, compprefix = prefix.rpartition('.') for comp in completions: + if comp.rsplit('.', 1)[0] in prefix: + comp = comp.rsplit('.', 1)[-1] + l = len(compprefix) if compprefix in comp else 0 yield Completion(comp, -l) def reserve_space(self):
{"golden_diff": "diff --git a/xonsh/ptk/completer.py b/xonsh/ptk/completer.py\n--- a/xonsh/ptk/completer.py\n+++ b/xonsh/ptk/completer.py\n@@ -40,7 +40,11 @@\n pass\n elif len(os.path.commonprefix(completions)) <= len(prefix):\n self.reserve_space()\n+ prefix, _, compprefix = prefix.rpartition('.')\n for comp in completions:\n+ if comp.rsplit('.', 1)[0] in prefix:\n+ comp = comp.rsplit('.', 1)[-1]\n+ l = len(compprefix) if compprefix in comp else 0\n yield Completion(comp, -l)\n \n def reserve_space(self):\n", "issue": "Autocomplete: don't display full path\nUsing `prompt_kit`, when completing a path such as `/var/log/<TAB>`, the autocompletion menu shows fully prefixed entries. The problem is that when the path is very deep, the autocomplete panel starts to give unreadable results (see attached screenshot).\n\nThe proposed solution is to only display the `os.path.basename` of the autocompleted path, suffixed by `/` if it is a directory.\n\n![screenshot from 2016-05-19 10-33-09](https://cloud.githubusercontent.com/assets/14598/15397392/1170b2b8-1dae-11e6-85b4-ea31504f9346.png)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Completer implementation to use with prompt_toolkit.\"\"\"\nimport os\nimport builtins\n\nfrom prompt_toolkit.layout.dimension import LayoutDimension\nfrom prompt_toolkit.completion import Completer, Completion\n\n\nclass PromptToolkitCompleter(Completer):\n \"\"\"Simple prompt_toolkit Completer object.\n\n It just redirects requests to normal Xonsh completer.\n \"\"\"\n\n def __init__(self, completer, ctx):\n \"\"\"Takes instance of xonsh.completer.Completer and dict with context.\"\"\"\n self.completer = completer\n self.ctx = ctx\n\n def get_completions(self, document, complete_event):\n \"\"\"Returns a generator for list of completions.\"\"\"\n\n # Only generate completions when the user hits tab.\n if complete_event.completion_requested:\n if self.completer is None:\n yield from []\n else:\n line = document.current_line.lstrip()\n endidx = document.cursor_position_col\n begidx = line[:endidx].rfind(' ') + 1 if line[:endidx].rfind(' ') >= 0 else 0\n prefix = line[begidx:endidx]\n line = builtins.aliases.expand_alias(line)\n completions, l = self.completer.complete(prefix,\n line,\n begidx,\n endidx,\n self.ctx)\n if len(completions) <= 1:\n pass\n elif len(os.path.commonprefix(completions)) <= len(prefix):\n self.reserve_space()\n for comp in completions:\n yield Completion(comp, -l)\n\n def reserve_space(self):\n cli = builtins.__xonsh_shell__.shell.prompter.cli\n window = cli.application.layout.children[0].content.children[1]\n\n if window and window.render_info:\n h = window.render_info.content_height\n r = builtins.__xonsh_env__.get('COMPLETIONS_MENU_ROWS')\n size = h + r\n\n def comp_height(cli):\n # If there is an autocompletion menu to be shown, make sure that o\n # layout has at least a minimal height in order to display it.\n if not cli.is_done:\n return LayoutDimension(min=size)\n else:\n return LayoutDimension()\n window._height = comp_height\n", "path": "xonsh/ptk/completer.py"}]}
1,337
170
gh_patches_debug_33734
rasdani/github-patches
git_diff
3cn-ecn__nantralPlatform-484
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Problème avec les liens vers les auteur.ic.es des suggestions Quand quelqu'un fait une suggestion depuis le site, le lien pour avoir le nom de la personne ne fonctionne pas. </issue> <code> [start of server/apps/home/forms.py] 1 from django import forms 2 3 class SuggestionForm(forms.Form): 4 title = forms.CharField(max_length=50, required=True) 5 description = forms.CharField(widget=forms.Textarea) 6 [end of server/apps/home/forms.py] [start of server/apps/utils/github.py] 1 import requests 2 from django.conf import settings 3 4 5 def create_issue(title: str, body: str): 6 issue = { 7 'title': title, 8 'body': body 9 } 10 resp = requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues', 11 json=issue, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN)) 12 if resp.status_code != 201: 13 raise Exception(f'Error while posting issue to Github: {resp.reason}') 14 return resp.json()['number'] 15 16 17 def close_issue(number: int): 18 """Function to close an issue in the repo.""" 19 update = {'state': 'closed'} 20 requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues/{number}', 21 json=update, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN)) 22 [end of server/apps/utils/github.py] [start of server/apps/home/views.py] 1 from datetime import * 2 from typing import List 3 from django.contrib.sites.shortcuts import get_current_site 4 from django.db.models.query import QuerySet 5 from django.shortcuts import render, redirect 6 from django.views.generic import TemplateView, FormView 7 from django.contrib import messages 8 from django.contrib.auth.mixins import LoginRequiredMixin 9 10 from apps.event.models import BaseEvent 11 from apps.post.models import Post 12 from apps.utils.github import create_issue 13 14 from .forms import SuggestionForm 15 16 17 class HomeView(LoginRequiredMixin, TemplateView): 18 template_name = 'home/home.html' 19 20 def get_context_data(self, **kwargs): 21 # Call the base implementation first to get a context 22 context = super().get_context_data(**kwargs) 23 posts: List[Post] = Post.objects.filter( 24 publication_date__gte=date.today()-timedelta(days=10)).order_by('-publication_date') 25 context['posts'] = [ 26 post for post in posts if post.can_view(self.request.user)] 27 return context 28 29 30 class SuggestionView(LoginRequiredMixin, FormView): 31 template_name = 'home/suggestions.html' 32 form_class = SuggestionForm 33 34 def form_valid(self, form): 35 create_issue( 36 title=form.cleaned_data['title'], 37 body=f"{form.cleaned_data['description']} <br/> <a href='http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url}'>Clique pour découvrir qui propose ça.</a>" 38 ) 39 messages.success( 40 self.request, 'Votre suggestion a été enregistrée merci') 41 return redirect('home:home') 42 43 44 def handler404(request, *args, **argv): 45 response = render(request, '404.html', context={}, status=404) 46 return response 47 48 49 def handler500(request, *args, **argv): 50 response = render(request, '500.html', context={}, 51 status=500) 52 return response 53 54 55 def event_sort(events, request): 56 tri = {} 57 jours = ["Lundi", "Mardi", "Mercredi", 58 "Jeudi", "Vendredi", "Samedi", "Dimanche"] 59 mois = ["Janvier", "Février", "Mars", "Avril", "Mai", "Juin", 60 "Juillet", "Août", "Septembre", "Octobre", "Novembre", "Décembre"] 61 for event in events: 62 if event.date.date() == date.today(): 63 if "Aujourd'hui" in tri: 64 tri["Aujourd'hui"].append( 65 (event, event.is_participating(request.user))) 66 else: 67 tri["Aujourd'hui"] = list() 68 tri["Aujourd'hui"].append( 69 (event, event.is_participating(request.user))) 70 elif event.date.date() == (date.today()+timedelta(days=1)): 71 if "Demain" in tri: 72 tri["Demain"].append( 73 (event, event.is_participating(request.user))) 74 else: 75 tri["Demain"] = list() 76 tri["Demain"].append( 77 (event, event.is_participating(request.user))) 78 else: 79 written_date = jours[event.date.weekday( 80 )] + " " + str(event.date.day) + " " + mois[event.date.month-1] 81 if written_date in tri: 82 tri[written_date].append( 83 (event, event.is_participating(request.user))) 84 else: 85 tri[written_date] = list() 86 tri[written_date].append( 87 (event, event.is_participating(request.user))) 88 return tri 89 [end of server/apps/home/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/server/apps/home/forms.py b/server/apps/home/forms.py --- a/server/apps/home/forms.py +++ b/server/apps/home/forms.py @@ -1,5 +1,14 @@ from django import forms +TYPE_CHOICES = ( + (1, ("Bug")), + (2, ("Suggestion")) +) + + class SuggestionForm(forms.Form): title = forms.CharField(max_length=50, required=True) description = forms.CharField(widget=forms.Textarea) + suggestionOrBug = forms.ChoiceField(label="Type", + choices=TYPE_CHOICES, + required=True) diff --git a/server/apps/home/views.py b/server/apps/home/views.py --- a/server/apps/home/views.py +++ b/server/apps/home/views.py @@ -34,7 +34,8 @@ def form_valid(self, form): create_issue( title=form.cleaned_data['title'], - body=f"{form.cleaned_data['description']} <br/> <a href='http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url}'>Clique pour découvrir qui propose ça.</a>" + body=f"{form.cleaned_data['description']} <br/> [Clique pour découvrir qui propose ça.](http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url()})", + label=form.cleaned_data['suggestionOrBug'] ) messages.success( self.request, 'Votre suggestion a été enregistrée merci') diff --git a/server/apps/utils/github.py b/server/apps/utils/github.py --- a/server/apps/utils/github.py +++ b/server/apps/utils/github.py @@ -2,15 +2,18 @@ from django.conf import settings -def create_issue(title: str, body: str): +def create_issue(title: str, body: str, label): + label = "bug" if int(label) == 1 else "suggestion" issue = { 'title': title, - 'body': body + 'body': body, + 'labels': [label] } resp = requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues', json=issue, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN)) if resp.status_code != 201: - raise Exception(f'Error while posting issue to Github: {resp.reason}') + raise Exception( + f'Error while posting issue to Github: {resp.reason}') return resp.json()['number']
{"golden_diff": "diff --git a/server/apps/home/forms.py b/server/apps/home/forms.py\n--- a/server/apps/home/forms.py\n+++ b/server/apps/home/forms.py\n@@ -1,5 +1,14 @@\n from django import forms\n \n+TYPE_CHOICES = (\n+ (1, (\"Bug\")),\n+ (2, (\"Suggestion\"))\n+)\n+\n+\n class SuggestionForm(forms.Form):\n title = forms.CharField(max_length=50, required=True)\n description = forms.CharField(widget=forms.Textarea)\n+ suggestionOrBug = forms.ChoiceField(label=\"Type\",\n+ choices=TYPE_CHOICES,\n+ required=True)\ndiff --git a/server/apps/home/views.py b/server/apps/home/views.py\n--- a/server/apps/home/views.py\n+++ b/server/apps/home/views.py\n@@ -34,7 +34,8 @@\n def form_valid(self, form):\n create_issue(\n title=form.cleaned_data['title'],\n- body=f\"{form.cleaned_data['description']} <br/> <a href='http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url}'>Clique pour d\u00e9couvrir qui propose \u00e7a.</a>\"\n+ body=f\"{form.cleaned_data['description']} <br/> [Clique pour d\u00e9couvrir qui propose \u00e7a.](http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url()})\",\n+ label=form.cleaned_data['suggestionOrBug']\n )\n messages.success(\n self.request, 'Votre suggestion a \u00e9t\u00e9 enregistr\u00e9e merci')\ndiff --git a/server/apps/utils/github.py b/server/apps/utils/github.py\n--- a/server/apps/utils/github.py\n+++ b/server/apps/utils/github.py\n@@ -2,15 +2,18 @@\n from django.conf import settings\n \n \n-def create_issue(title: str, body: str):\n+def create_issue(title: str, body: str, label):\n+ label = \"bug\" if int(label) == 1 else \"suggestion\"\n issue = {\n 'title': title,\n- 'body': body\n+ 'body': body,\n+ 'labels': [label]\n }\n resp = requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues',\n json=issue, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN))\n if resp.status_code != 201:\n- raise Exception(f'Error while posting issue to Github: {resp.reason}')\n+ raise Exception(\n+ f'Error while posting issue to Github: {resp.reason}')\n return resp.json()['number']\n", "issue": "Probl\u00e8me avec les liens vers les auteur.ic.es des suggestions\nQuand quelqu'un fait une suggestion depuis le site, le lien pour avoir le nom de la personne ne fonctionne pas.\n", "before_files": [{"content": "from django import forms\n\nclass SuggestionForm(forms.Form):\n title = forms.CharField(max_length=50, required=True)\n description = forms.CharField(widget=forms.Textarea)\n", "path": "server/apps/home/forms.py"}, {"content": "import requests\nfrom django.conf import settings\n\n\ndef create_issue(title: str, body: str):\n issue = {\n 'title': title,\n 'body': body\n }\n resp = requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues',\n json=issue, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN))\n if resp.status_code != 201:\n raise Exception(f'Error while posting issue to Github: {resp.reason}')\n return resp.json()['number']\n\n\ndef close_issue(number: int):\n \"\"\"Function to close an issue in the repo.\"\"\"\n update = {'state': 'closed'}\n requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues/{number}',\n json=update, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN))\n", "path": "server/apps/utils/github.py"}, {"content": "from datetime import *\nfrom typing import List\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.db.models.query import QuerySet\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import TemplateView, FormView\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom apps.event.models import BaseEvent\nfrom apps.post.models import Post\nfrom apps.utils.github import create_issue\n\nfrom .forms import SuggestionForm\n\n\nclass HomeView(LoginRequiredMixin, TemplateView):\n template_name = 'home/home.html'\n\n def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super().get_context_data(**kwargs)\n posts: List[Post] = Post.objects.filter(\n publication_date__gte=date.today()-timedelta(days=10)).order_by('-publication_date')\n context['posts'] = [\n post for post in posts if post.can_view(self.request.user)]\n return context\n\n\nclass SuggestionView(LoginRequiredMixin, FormView):\n template_name = 'home/suggestions.html'\n form_class = SuggestionForm\n\n def form_valid(self, form):\n create_issue(\n title=form.cleaned_data['title'],\n body=f\"{form.cleaned_data['description']} <br/> <a href='http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url}'>Clique pour d\u00e9couvrir qui propose \u00e7a.</a>\"\n )\n messages.success(\n self.request, 'Votre suggestion a \u00e9t\u00e9 enregistr\u00e9e merci')\n return redirect('home:home')\n\n\ndef handler404(request, *args, **argv):\n response = render(request, '404.html', context={}, status=404)\n return response\n\n\ndef handler500(request, *args, **argv):\n response = render(request, '500.html', context={},\n status=500)\n return response\n\n\ndef event_sort(events, request):\n tri = {}\n jours = [\"Lundi\", \"Mardi\", \"Mercredi\",\n \"Jeudi\", \"Vendredi\", \"Samedi\", \"Dimanche\"]\n mois = [\"Janvier\", \"F\u00e9vrier\", \"Mars\", \"Avril\", \"Mai\", \"Juin\",\n \"Juillet\", \"Ao\u00fbt\", \"Septembre\", \"Octobre\", \"Novembre\", \"D\u00e9cembre\"]\n for event in events:\n if event.date.date() == date.today():\n if \"Aujourd'hui\" in tri:\n tri[\"Aujourd'hui\"].append(\n (event, event.is_participating(request.user)))\n else:\n tri[\"Aujourd'hui\"] = list()\n tri[\"Aujourd'hui\"].append(\n (event, event.is_participating(request.user)))\n elif event.date.date() == (date.today()+timedelta(days=1)):\n if \"Demain\" in tri:\n tri[\"Demain\"].append(\n (event, event.is_participating(request.user)))\n else:\n tri[\"Demain\"] = list()\n tri[\"Demain\"].append(\n (event, event.is_participating(request.user)))\n else:\n written_date = jours[event.date.weekday(\n )] + \" \" + str(event.date.day) + \" \" + mois[event.date.month-1]\n if written_date in tri:\n tri[written_date].append(\n (event, event.is_participating(request.user)))\n else:\n tri[written_date] = list()\n tri[written_date].append(\n (event, event.is_participating(request.user)))\n return tri\n", "path": "server/apps/home/views.py"}]}
1,812
538
gh_patches_debug_20575
rasdani/github-patches
git_diff
PokemonGoF__PokemonGo-Bot-2720
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 'ascii' codec can't decode byte 0xc3 ``` 2016-08-07 10:51:36,268 [RecycleItems] [INFO] [item_discarded] Discarded 1x Razz Berry (maximum 20). 2016-08-07 10:51:36,875 [TransferPokemon] [INFO] [future_pokemon_release] Releasing Charmander (CP 172/IV 0.18) based on rule: CP < 9 OR IV < 0.97 2016-08-07 10:51:37,437 [TransferPokemon] [INFO] [pokemon_release] Exchanged Charmander [CP 172] [IV 0.18] for candy. 2016-08-07 10:51:37,953 [MoveToFort] [INFO] [moving_to_lured_fort] Moving towards pokestop Estátua Moore - 0.05km (attraction of lure 0.05km) 2016-08-07 10:51:37,953 [MoveToFort] [INFO] [arrived_at_fort] Arrived at fort. 2016-08-07 10:51:39,679 [PokemonCatchWorker] [INFO] [pokemon_appeared] A wild Magnemite appeared! [CP 422] [Potential 0.71] [S/A/D 10/11/11] 2016-08-07 10:51:42,526 [PokemonCatchWorker] [INFO] [threw_pokeball] Used Pokeball, with chance 35.29 (127 left) 2016-08-07 10:51:43,728 [PokemonCatchWorker] [INFO] [pokemon_caught] Captured Magnemite! [CP 422] [Potential 0.71] [10/11/11] [+210 exp] 2016-08-07 10:51:44,706 [PokemonCatchWorker] [INFO] [pokemon_evolve_fail] Failed to evolve Magnemite! 2016-08-07 10:51:50,245 [ cli] [INFO] 2016-08-07 10:51:50,245 [ cli] [INFO] Ran for 0:01:58 2016-08-07 10:51:50,245 [ cli] [INFO] Total XP Earned: 1450 Average: 44164.12/h 2016-08-07 10:51:50,245 [ cli] [INFO] Travelled 0.00km 2016-08-07 10:51:50,246 [ cli] [INFO] Visited 8 stops 2016-08-07 10:51:50,246 [ cli] [INFO] Encountered 5 pokemon, 5 caught, 0 released, 0 evolved, 0 never seen before 2016-08-07 10:51:50,246 [ cli] [INFO] Threw 5 poke balls 2016-08-07 10:51:50,246 [ cli] [INFO] Earned 500 Stardust 2016-08-07 10:51:50,246 [ cli] [INFO] 2016-08-07 10:51:50,246 [ cli] [INFO] Highest CP Pokemon: Machop [CP: 645] [IV: 9/15/11] Potential: 0.78 2016-08-07 10:51:50,246 [ cli] [INFO] Most Perfect Pokemon: Machop [CP: 645] [IV: 9/15/11] Potential: 0.78 Traceback (most recent call last): File "pokecli.py", line 499, in <module> main() File "pokecli.py", line 130, in main raise e UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 3: ordinal not in range(128) ``` error appears only with movetofort with path config no error appears </issue> <code> [start of pokemongo_bot/cell_workers/catch_lured_pokemon.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import unicode_literals 3 4 from pokemongo_bot.cell_workers.utils import fort_details 5 from pokemongo_bot.cell_workers.pokemon_catch_worker import PokemonCatchWorker 6 from pokemongo_bot.base_task import BaseTask 7 8 9 class CatchLuredPokemon(BaseTask): 10 def work(self): 11 lured_pokemon = self.get_lured_pokemon() 12 if lured_pokemon: 13 self.catch_pokemon(lured_pokemon) 14 15 def get_lured_pokemon(self): 16 forts = self.bot.get_forts(order_by_distance=True) 17 18 if len(forts) == 0: 19 return False 20 21 fort = forts[0] 22 details = fort_details(self.bot, fort_id=fort['id'], 23 latitude=fort['latitude'], 24 longitude=fort['longitude']) 25 fort_name = details.get('name', 'Unknown').encode('utf8', 'replace') 26 27 encounter_id = fort.get('lure_info', {}).get('encounter_id', None) 28 29 if encounter_id: 30 result = { 31 'encounter_id': encounter_id, 32 'fort_id': fort['id'], 33 'fort_name': fort_name, 34 'latitude': fort['latitude'], 35 'longitude': fort['longitude'] 36 } 37 38 self.emit_event( 39 'lured_pokemon_found', 40 formatted='Lured pokemon at fort {fort_name} ({fort_id})', 41 data=result 42 ) 43 return result 44 45 return False 46 47 def catch_pokemon(self, pokemon): 48 worker = PokemonCatchWorker(pokemon, self.bot) 49 return_value = worker.work() 50 51 return return_value 52 [end of pokemongo_bot/cell_workers/catch_lured_pokemon.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pokemongo_bot/cell_workers/catch_lured_pokemon.py b/pokemongo_bot/cell_workers/catch_lured_pokemon.py --- a/pokemongo_bot/cell_workers/catch_lured_pokemon.py +++ b/pokemongo_bot/cell_workers/catch_lured_pokemon.py @@ -22,7 +22,7 @@ details = fort_details(self.bot, fort_id=fort['id'], latitude=fort['latitude'], longitude=fort['longitude']) - fort_name = details.get('name', 'Unknown').encode('utf8', 'replace') + fort_name = details.get('name', 'Unknown') encounter_id = fort.get('lure_info', {}).get('encounter_id', None) @@ -30,7 +30,7 @@ result = { 'encounter_id': encounter_id, 'fort_id': fort['id'], - 'fort_name': fort_name, + 'fort_name': u"{}".format(fort_name), 'latitude': fort['latitude'], 'longitude': fort['longitude'] }
{"golden_diff": "diff --git a/pokemongo_bot/cell_workers/catch_lured_pokemon.py b/pokemongo_bot/cell_workers/catch_lured_pokemon.py\n--- a/pokemongo_bot/cell_workers/catch_lured_pokemon.py\n+++ b/pokemongo_bot/cell_workers/catch_lured_pokemon.py\n@@ -22,7 +22,7 @@\n details = fort_details(self.bot, fort_id=fort['id'],\n latitude=fort['latitude'],\n longitude=fort['longitude'])\n- fort_name = details.get('name', 'Unknown').encode('utf8', 'replace')\n+ fort_name = details.get('name', 'Unknown')\n \n encounter_id = fort.get('lure_info', {}).get('encounter_id', None)\n \n@@ -30,7 +30,7 @@\n result = {\n 'encounter_id': encounter_id,\n 'fort_id': fort['id'],\n- 'fort_name': fort_name,\n+ 'fort_name': u\"{}\".format(fort_name),\n 'latitude': fort['latitude'],\n 'longitude': fort['longitude']\n }\n", "issue": "'ascii' codec can't decode byte 0xc3\n```\n2016-08-07 10:51:36,268 [RecycleItems] [INFO] [item_discarded] Discarded 1x Razz Berry (maximum 20).\n2016-08-07 10:51:36,875 [TransferPokemon] [INFO] [future_pokemon_release] Releasing Charmander (CP 172/IV 0.18) based on rule: CP < 9 OR IV < 0.97\n2016-08-07 10:51:37,437 [TransferPokemon] [INFO] [pokemon_release] Exchanged Charmander [CP 172] [IV 0.18] for candy.\n2016-08-07 10:51:37,953 [MoveToFort] [INFO] [moving_to_lured_fort] Moving towards pokestop Est\u00e1tua Moore - 0.05km (attraction of lure 0.05km)\n2016-08-07 10:51:37,953 [MoveToFort] [INFO] [arrived_at_fort] Arrived at fort.\n2016-08-07 10:51:39,679 [PokemonCatchWorker] [INFO] [pokemon_appeared] A wild Magnemite appeared! [CP 422] [Potential 0.71] [S/A/D 10/11/11]\n2016-08-07 10:51:42,526 [PokemonCatchWorker] [INFO] [threw_pokeball] Used Pokeball, with chance 35.29 (127 left)\n2016-08-07 10:51:43,728 [PokemonCatchWorker] [INFO] [pokemon_caught] Captured Magnemite! [CP 422] [Potential 0.71] [10/11/11] [+210 exp]\n2016-08-07 10:51:44,706 [PokemonCatchWorker] [INFO] [pokemon_evolve_fail] Failed to evolve Magnemite!\n2016-08-07 10:51:50,245 [ cli] [INFO] \n2016-08-07 10:51:50,245 [ cli] [INFO] Ran for 0:01:58\n2016-08-07 10:51:50,245 [ cli] [INFO] Total XP Earned: 1450 Average: 44164.12/h\n2016-08-07 10:51:50,245 [ cli] [INFO] Travelled 0.00km\n2016-08-07 10:51:50,246 [ cli] [INFO] Visited 8 stops\n2016-08-07 10:51:50,246 [ cli] [INFO] Encountered 5 pokemon, 5 caught, 0 released, 0 evolved, 0 never seen before\n2016-08-07 10:51:50,246 [ cli] [INFO] Threw 5 poke balls\n2016-08-07 10:51:50,246 [ cli] [INFO] Earned 500 Stardust\n2016-08-07 10:51:50,246 [ cli] [INFO] \n2016-08-07 10:51:50,246 [ cli] [INFO] Highest CP Pokemon: Machop [CP: 645] [IV: 9/15/11] Potential: 0.78 \n2016-08-07 10:51:50,246 [ cli] [INFO] Most Perfect Pokemon: Machop [CP: 645] [IV: 9/15/11] Potential: 0.78 \nTraceback (most recent call last):\n File \"pokecli.py\", line 499, in <module>\n main()\n File \"pokecli.py\", line 130, in main\n raise e\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 3: ordinal not in range(128)\n```\n\nerror appears only with movetofort\nwith path config no error appears \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom pokemongo_bot.cell_workers.utils import fort_details\nfrom pokemongo_bot.cell_workers.pokemon_catch_worker import PokemonCatchWorker\nfrom pokemongo_bot.base_task import BaseTask\n\n\nclass CatchLuredPokemon(BaseTask):\n def work(self):\n lured_pokemon = self.get_lured_pokemon()\n if lured_pokemon:\n self.catch_pokemon(lured_pokemon)\n\n def get_lured_pokemon(self):\n forts = self.bot.get_forts(order_by_distance=True)\n\n if len(forts) == 0:\n return False\n\n fort = forts[0]\n details = fort_details(self.bot, fort_id=fort['id'],\n latitude=fort['latitude'],\n longitude=fort['longitude'])\n fort_name = details.get('name', 'Unknown').encode('utf8', 'replace')\n\n encounter_id = fort.get('lure_info', {}).get('encounter_id', None)\n\n if encounter_id:\n result = {\n 'encounter_id': encounter_id,\n 'fort_id': fort['id'],\n 'fort_name': fort_name,\n 'latitude': fort['latitude'],\n 'longitude': fort['longitude']\n }\n\n self.emit_event(\n 'lured_pokemon_found',\n formatted='Lured pokemon at fort {fort_name} ({fort_id})',\n data=result\n )\n return result\n\n return False\n\n def catch_pokemon(self, pokemon):\n worker = PokemonCatchWorker(pokemon, self.bot)\n return_value = worker.work()\n\n return return_value\n", "path": "pokemongo_bot/cell_workers/catch_lured_pokemon.py"}]}
2,137
244
gh_patches_debug_17126
rasdani/github-patches
git_diff
rucio__rucio-5505
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Deprecation message in Paramiko Motivation ---------- Paramiko outputs a deprecation message due to an outdated security algorithm. We do not depend on that algorithm. https://github.com/paramiko/paramiko/pull/2039 Modification ------------ The paramiko team is aware of the problem. They opened a PR to fix it. </issue> <code> [start of lib/rucio/common/extra.py] 1 # -*- coding: utf-8 -*- 2 # Copyright 2021 CERN 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 # 16 # Authors: 17 # - Benedikt Ziemons <[email protected]>, 2021 18 19 import importlib 20 21 22 def import_extras(module_list): 23 out = dict() 24 for mod in module_list: 25 out[mod] = None 26 try: 27 out[mod] = importlib.import_module(mod) 28 except ImportError: 29 pass 30 return out 31 [end of lib/rucio/common/extra.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/rucio/common/extra.py b/lib/rucio/common/extra.py --- a/lib/rucio/common/extra.py +++ b/lib/rucio/common/extra.py @@ -17,6 +17,7 @@ # - Benedikt Ziemons <[email protected]>, 2021 import importlib +import warnings def import_extras(module_list): @@ -24,7 +25,12 @@ for mod in module_list: out[mod] = None try: - out[mod] = importlib.import_module(mod) + with warnings.catch_warnings(): + # TODO: remove when https://github.com/paramiko/paramiko/issues/2038 is fixed + warnings.filterwarnings('ignore', 'Blowfish has been deprecated', module='paramiko') + # TODO: deprecated python 2 and 3.6 too ... + warnings.filterwarnings('ignore', 'Python .* is no longer supported', module='paramiko') + out[mod] = importlib.import_module(mod) except ImportError: pass return out
{"golden_diff": "diff --git a/lib/rucio/common/extra.py b/lib/rucio/common/extra.py\n--- a/lib/rucio/common/extra.py\n+++ b/lib/rucio/common/extra.py\n@@ -17,6 +17,7 @@\n # - Benedikt Ziemons <[email protected]>, 2021\n \n import importlib\n+import warnings\n \n \n def import_extras(module_list):\n@@ -24,7 +25,12 @@\n for mod in module_list:\n out[mod] = None\n try:\n- out[mod] = importlib.import_module(mod)\n+ with warnings.catch_warnings():\n+ # TODO: remove when https://github.com/paramiko/paramiko/issues/2038 is fixed\n+ warnings.filterwarnings('ignore', 'Blowfish has been deprecated', module='paramiko')\n+ # TODO: deprecated python 2 and 3.6 too ...\n+ warnings.filterwarnings('ignore', 'Python .* is no longer supported', module='paramiko')\n+ out[mod] = importlib.import_module(mod)\n except ImportError:\n pass\n return out\n", "issue": "Deprecation message in Paramiko\nMotivation\r\n----------\r\nParamiko outputs a deprecation message due to an outdated security algorithm. We do not depend on that algorithm.\r\nhttps://github.com/paramiko/paramiko/pull/2039\r\n\r\nModification\r\n------------\r\nThe paramiko team is aware of the problem. They opened a PR to fix it.\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2021 CERN\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Benedikt Ziemons <[email protected]>, 2021\n\nimport importlib\n\n\ndef import_extras(module_list):\n out = dict()\n for mod in module_list:\n out[mod] = None\n try:\n out[mod] = importlib.import_module(mod)\n except ImportError:\n pass\n return out\n", "path": "lib/rucio/common/extra.py"}]}
896
255
gh_patches_debug_16935
rasdani/github-patches
git_diff
napari__napari-6546
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Keybinding to set a new label doesn't work consistently ### 🐛 Bug Report After creating a labels layer, I observed that the `m` keybinding does not behave consistently. ### 💡 Steps to Reproduce Create a labels layer. - If the layer is selected, the `m` keybinding will either do nothing or select another layer wiht a name that starts with the letter `m`; - If the paintbrush, fill bucket or polygon tools are selected, the `m` keybinding doesn't do anything; - If the paintbrush has been used in the canvas, the `m` keybinding correctly adds 1 to the label control and a new label is selected and can be used to paint. ### 💡 Expected Behavior The `m` keybinding should increase the label number in all those situations. ### 🌎 Environment napari: 0.5.0a2.dev486+g4d60a7ce Platform: Linux-6.1.64-1-MANJARO-x86_64-with-glibc2.38 System: Manjaro Linux Python: 3.11.6 | packaged by conda-forge | (main, Oct 3 2023, 10:40:35) [GCC 12.3.0] Qt: 5.15.2 PyQt5: 5.15.10 NumPy: 1.26.2 SciPy: 1.11.4 Dask: 2023.11.0 VisPy: 0.14.1 magicgui: 0.8.0 superqt: 0.6.1 in-n-out: 0.1.9 app-model: 0.2.2 npe2: 0.7.3 OpenGL: - GL version: 4.6 (Compatibility Profile) Mesa 23.1.9-manjaro1.1 - MAX_TEXTURE_SIZE: 16384 Screens: - screen 1: resolution 1920x1080, scale 1.0 - screen 2: resolution 1920x1080, scale 1.0 Settings path: - /home/melissa/.config/napari/napari-dev_f5bfbd9c5d96bcb503f816d91f8db95d3b6d554f/settings.yaml Plugins: - napari: 0.5.0a2.dev486+g4d60a7ce (77 contributions) - napari-console: 0.0.9 (0 contributions) - napari-svg: 0.1.10 (2 contributions) ### 💡 Additional Context _No response_ </issue> <code> [start of napari/layers/labels/_labels_key_bindings.py] 1 import numpy as np 2 from app_model.types import KeyCode, KeyMod 3 4 from napari.layers.labels._labels_constants import Mode 5 from napari.layers.labels.labels import Labels 6 from napari.layers.utils.layer_utils import ( 7 register_layer_action, 8 register_layer_attr_action, 9 ) 10 from napari.utils.notifications import show_info 11 from napari.utils.translations import trans 12 13 MIN_BRUSH_SIZE = 1 14 15 16 def register_label_action(description: str, repeatable: bool = False): 17 return register_layer_action(Labels, description, repeatable) 18 19 20 def register_label_mode_action(description): 21 return register_layer_attr_action(Labels, description, 'mode') 22 23 24 @register_label_mode_action(trans._('Transform')) 25 def activate_labels_transform_mode(layer: Labels): 26 layer.mode = Mode.TRANSFORM 27 28 29 @register_label_mode_action(trans._('Pan/zoom')) 30 def activate_labels_pan_zoom_mode(layer: Labels): 31 layer.mode = Mode.PAN_ZOOM 32 33 34 @register_label_mode_action(trans._("Activate the paint brush")) 35 def activate_labels_paint_mode(layer: Labels): 36 layer.mode = Mode.PAINT 37 38 39 @register_label_mode_action(trans._("Activate the polygon tool")) 40 def activate_labels_polygon_mode(layer: Labels): 41 layer.mode = Mode.POLYGON 42 43 44 @register_label_mode_action(trans._("Activate the fill bucket")) 45 def activate_labels_fill_mode(layer: Labels): 46 layer.mode = Mode.FILL 47 48 49 @register_label_mode_action(trans._('Pick mode')) 50 def activate_labels_picker_mode(layer: Labels): 51 """Activate the label picker.""" 52 layer.mode = Mode.PICK 53 54 55 @register_label_mode_action(trans._("Activate the label eraser")) 56 def activate_labels_erase_mode(layer: Labels): 57 layer.mode = Mode.ERASE 58 59 60 labels_fun_to_mode = [ 61 (activate_labels_pan_zoom_mode, Mode.PAN_ZOOM), 62 (activate_labels_transform_mode, Mode.TRANSFORM), 63 (activate_labels_erase_mode, Mode.ERASE), 64 (activate_labels_paint_mode, Mode.PAINT), 65 (activate_labels_polygon_mode, Mode.POLYGON), 66 (activate_labels_fill_mode, Mode.FILL), 67 (activate_labels_picker_mode, Mode.PICK), 68 ] 69 70 71 @register_label_action( 72 trans._( 73 "Set the currently selected label to the largest used label plus one." 74 ), 75 ) 76 def new_label(layer: Labels): 77 """Set the currently selected label to the largest used label plus one.""" 78 if isinstance(layer.data, np.ndarray): 79 layer.selected_label = np.max(layer.data) + 1 80 else: 81 show_info( 82 "Calculating empty label on non-numpy array is not supported" 83 ) 84 85 86 @register_label_action( 87 trans._("Swap between the selected label and the background label."), 88 ) 89 def swap_selected_and_background_labels(layer: Labels): 90 """Swap between the selected label and the background label.""" 91 layer.swap_selected_and_background_labels() 92 93 94 @register_label_action( 95 trans._("Decrease the currently selected label by one."), 96 ) 97 def decrease_label_id(layer: Labels): 98 layer.selected_label -= 1 99 100 101 @register_label_action( 102 trans._("Increase the currently selected label by one."), 103 ) 104 def increase_label_id(layer: Labels): 105 layer.selected_label += 1 106 107 108 @register_label_action( 109 trans._("Decrease the paint brush size by one."), 110 repeatable=True, 111 ) 112 def decrease_brush_size(layer: Labels): 113 """Decrease the brush size""" 114 if ( 115 layer.brush_size > MIN_BRUSH_SIZE 116 ): # here we should probably add a non-hard-coded 117 # reference to the limit values of brush size? 118 layer.brush_size -= 1 119 120 121 @register_label_action( 122 trans._("Increase the paint brush size by one."), 123 repeatable=True, 124 ) 125 def increase_brush_size(layer: Labels): 126 """Increase the brush size""" 127 layer.brush_size += 1 128 129 130 @register_layer_attr_action( 131 Labels, trans._("Toggle preserve labels"), "preserve_labels" 132 ) 133 def toggle_preserve_labels(layer: Labels): 134 layer.preserve_labels = not layer.preserve_labels 135 136 137 @Labels.bind_key(KeyMod.CtrlCmd | KeyCode.KeyZ, overwrite=True) 138 def undo(layer: Labels): 139 """Undo the last paint or fill action since the view slice has changed.""" 140 layer.undo() 141 142 143 @Labels.bind_key(KeyMod.CtrlCmd | KeyMod.Shift | KeyCode.KeyZ, overwrite=True) 144 def redo(layer: Labels): 145 """Redo any previously undone actions.""" 146 layer.redo() 147 148 149 @register_label_action( 150 trans._("Reset the current polygon"), 151 ) 152 def reset_polygon(layer: Labels): 153 """Reset the drawing of the current polygon.""" 154 layer._overlays["polygon"].points = [] 155 156 157 @register_label_action( 158 trans._("Complete the current polygon"), 159 ) 160 def complete_polygon(layer: Labels): 161 """Complete the drawing of the current polygon.""" 162 # Because layer._overlays has type Overlay, mypy doesn't know that 163 # ._overlays["polygon"] has type LabelsPolygonOverlay, so type ignore for now 164 # TODO: Improve typing of layer._overlays to fix this 165 layer._overlays["polygon"].add_polygon_to_labels(layer) 166 [end of napari/layers/labels/_labels_key_bindings.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/napari/layers/labels/_labels_key_bindings.py b/napari/layers/labels/_labels_key_bindings.py --- a/napari/layers/labels/_labels_key_bindings.py +++ b/napari/layers/labels/_labels_key_bindings.py @@ -76,10 +76,21 @@ def new_label(layer: Labels): """Set the currently selected label to the largest used label plus one.""" if isinstance(layer.data, np.ndarray): - layer.selected_label = np.max(layer.data) + 1 + new_selected_label = np.max(layer.data) + 1 + if layer.selected_label == new_selected_label: + show_info( + trans._( + "Current selected label is not being used. You will need to use it first " + "to be able to set the current select label to the next one available", + ) + ) + else: + layer.selected_label = new_selected_label else: show_info( - "Calculating empty label on non-numpy array is not supported" + trans._( + "Calculating empty label on non-numpy array is not supported" + ) )
{"golden_diff": "diff --git a/napari/layers/labels/_labels_key_bindings.py b/napari/layers/labels/_labels_key_bindings.py\n--- a/napari/layers/labels/_labels_key_bindings.py\n+++ b/napari/layers/labels/_labels_key_bindings.py\n@@ -76,10 +76,21 @@\n def new_label(layer: Labels):\n \"\"\"Set the currently selected label to the largest used label plus one.\"\"\"\n if isinstance(layer.data, np.ndarray):\n- layer.selected_label = np.max(layer.data) + 1\n+ new_selected_label = np.max(layer.data) + 1\n+ if layer.selected_label == new_selected_label:\n+ show_info(\n+ trans._(\n+ \"Current selected label is not being used. You will need to use it first \"\n+ \"to be able to set the current select label to the next one available\",\n+ )\n+ )\n+ else:\n+ layer.selected_label = new_selected_label\n else:\n show_info(\n- \"Calculating empty label on non-numpy array is not supported\"\n+ trans._(\n+ \"Calculating empty label on non-numpy array is not supported\"\n+ )\n )\n", "issue": "Keybinding to set a new label doesn't work consistently\n### \ud83d\udc1b Bug Report\n\nAfter creating a labels layer, I observed that the `m` keybinding does not behave consistently.\r\n\r\n\n\n### \ud83d\udca1 Steps to Reproduce\n\nCreate a labels layer.\r\n\r\n- If the layer is selected, the `m` keybinding will either do nothing or select another layer wiht a name that starts with the letter `m`;\r\n- If the paintbrush, fill bucket or polygon tools are selected, the `m` keybinding doesn't do anything;\r\n- If the paintbrush has been used in the canvas, the `m` keybinding correctly adds 1 to the label control and a new label is selected and can be used to paint.\n\n### \ud83d\udca1 Expected Behavior\n\nThe `m` keybinding should increase the label number in all those situations.\n\n### \ud83c\udf0e Environment\n\nnapari: 0.5.0a2.dev486+g4d60a7ce\r\nPlatform: Linux-6.1.64-1-MANJARO-x86_64-with-glibc2.38\r\nSystem: Manjaro Linux\r\nPython: 3.11.6 | packaged by conda-forge | (main, Oct 3 2023, 10:40:35) [GCC 12.3.0]\r\nQt: 5.15.2\r\nPyQt5: 5.15.10\r\nNumPy: 1.26.2\r\nSciPy: 1.11.4\r\nDask: 2023.11.0\r\nVisPy: 0.14.1\r\nmagicgui: 0.8.0\r\nsuperqt: 0.6.1\r\nin-n-out: 0.1.9\r\napp-model: 0.2.2\r\nnpe2: 0.7.3\r\n\r\nOpenGL:\r\n - GL version: 4.6 (Compatibility Profile) Mesa 23.1.9-manjaro1.1\r\n - MAX_TEXTURE_SIZE: 16384\r\n\r\nScreens:\r\n - screen 1: resolution 1920x1080, scale 1.0\r\n - screen 2: resolution 1920x1080, scale 1.0\r\n\r\nSettings path:\r\n - /home/melissa/.config/napari/napari-dev_f5bfbd9c5d96bcb503f816d91f8db95d3b6d554f/settings.yaml\r\nPlugins:\r\n - napari: 0.5.0a2.dev486+g4d60a7ce (77 contributions)\r\n - napari-console: 0.0.9 (0 contributions)\r\n - napari-svg: 0.1.10 (2 contributions)\n\n### \ud83d\udca1 Additional Context\n\n_No response_\n", "before_files": [{"content": "import numpy as np\nfrom app_model.types import KeyCode, KeyMod\n\nfrom napari.layers.labels._labels_constants import Mode\nfrom napari.layers.labels.labels import Labels\nfrom napari.layers.utils.layer_utils import (\n register_layer_action,\n register_layer_attr_action,\n)\nfrom napari.utils.notifications import show_info\nfrom napari.utils.translations import trans\n\nMIN_BRUSH_SIZE = 1\n\n\ndef register_label_action(description: str, repeatable: bool = False):\n return register_layer_action(Labels, description, repeatable)\n\n\ndef register_label_mode_action(description):\n return register_layer_attr_action(Labels, description, 'mode')\n\n\n@register_label_mode_action(trans._('Transform'))\ndef activate_labels_transform_mode(layer: Labels):\n layer.mode = Mode.TRANSFORM\n\n\n@register_label_mode_action(trans._('Pan/zoom'))\ndef activate_labels_pan_zoom_mode(layer: Labels):\n layer.mode = Mode.PAN_ZOOM\n\n\n@register_label_mode_action(trans._(\"Activate the paint brush\"))\ndef activate_labels_paint_mode(layer: Labels):\n layer.mode = Mode.PAINT\n\n\n@register_label_mode_action(trans._(\"Activate the polygon tool\"))\ndef activate_labels_polygon_mode(layer: Labels):\n layer.mode = Mode.POLYGON\n\n\n@register_label_mode_action(trans._(\"Activate the fill bucket\"))\ndef activate_labels_fill_mode(layer: Labels):\n layer.mode = Mode.FILL\n\n\n@register_label_mode_action(trans._('Pick mode'))\ndef activate_labels_picker_mode(layer: Labels):\n \"\"\"Activate the label picker.\"\"\"\n layer.mode = Mode.PICK\n\n\n@register_label_mode_action(trans._(\"Activate the label eraser\"))\ndef activate_labels_erase_mode(layer: Labels):\n layer.mode = Mode.ERASE\n\n\nlabels_fun_to_mode = [\n (activate_labels_pan_zoom_mode, Mode.PAN_ZOOM),\n (activate_labels_transform_mode, Mode.TRANSFORM),\n (activate_labels_erase_mode, Mode.ERASE),\n (activate_labels_paint_mode, Mode.PAINT),\n (activate_labels_polygon_mode, Mode.POLYGON),\n (activate_labels_fill_mode, Mode.FILL),\n (activate_labels_picker_mode, Mode.PICK),\n]\n\n\n@register_label_action(\n trans._(\n \"Set the currently selected label to the largest used label plus one.\"\n ),\n)\ndef new_label(layer: Labels):\n \"\"\"Set the currently selected label to the largest used label plus one.\"\"\"\n if isinstance(layer.data, np.ndarray):\n layer.selected_label = np.max(layer.data) + 1\n else:\n show_info(\n \"Calculating empty label on non-numpy array is not supported\"\n )\n\n\n@register_label_action(\n trans._(\"Swap between the selected label and the background label.\"),\n)\ndef swap_selected_and_background_labels(layer: Labels):\n \"\"\"Swap between the selected label and the background label.\"\"\"\n layer.swap_selected_and_background_labels()\n\n\n@register_label_action(\n trans._(\"Decrease the currently selected label by one.\"),\n)\ndef decrease_label_id(layer: Labels):\n layer.selected_label -= 1\n\n\n@register_label_action(\n trans._(\"Increase the currently selected label by one.\"),\n)\ndef increase_label_id(layer: Labels):\n layer.selected_label += 1\n\n\n@register_label_action(\n trans._(\"Decrease the paint brush size by one.\"),\n repeatable=True,\n)\ndef decrease_brush_size(layer: Labels):\n \"\"\"Decrease the brush size\"\"\"\n if (\n layer.brush_size > MIN_BRUSH_SIZE\n ): # here we should probably add a non-hard-coded\n # reference to the limit values of brush size?\n layer.brush_size -= 1\n\n\n@register_label_action(\n trans._(\"Increase the paint brush size by one.\"),\n repeatable=True,\n)\ndef increase_brush_size(layer: Labels):\n \"\"\"Increase the brush size\"\"\"\n layer.brush_size += 1\n\n\n@register_layer_attr_action(\n Labels, trans._(\"Toggle preserve labels\"), \"preserve_labels\"\n)\ndef toggle_preserve_labels(layer: Labels):\n layer.preserve_labels = not layer.preserve_labels\n\n\[email protected]_key(KeyMod.CtrlCmd | KeyCode.KeyZ, overwrite=True)\ndef undo(layer: Labels):\n \"\"\"Undo the last paint or fill action since the view slice has changed.\"\"\"\n layer.undo()\n\n\[email protected]_key(KeyMod.CtrlCmd | KeyMod.Shift | KeyCode.KeyZ, overwrite=True)\ndef redo(layer: Labels):\n \"\"\"Redo any previously undone actions.\"\"\"\n layer.redo()\n\n\n@register_label_action(\n trans._(\"Reset the current polygon\"),\n)\ndef reset_polygon(layer: Labels):\n \"\"\"Reset the drawing of the current polygon.\"\"\"\n layer._overlays[\"polygon\"].points = []\n\n\n@register_label_action(\n trans._(\"Complete the current polygon\"),\n)\ndef complete_polygon(layer: Labels):\n \"\"\"Complete the drawing of the current polygon.\"\"\"\n # Because layer._overlays has type Overlay, mypy doesn't know that\n # ._overlays[\"polygon\"] has type LabelsPolygonOverlay, so type ignore for now\n # TODO: Improve typing of layer._overlays to fix this\n layer._overlays[\"polygon\"].add_polygon_to_labels(layer)\n", "path": "napari/layers/labels/_labels_key_bindings.py"}]}
2,683
262
gh_patches_debug_3406
rasdani/github-patches
git_diff
vllm-project__vllm-153
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Got OOM when using TP I got this when running OPT-13B on two A100s, with FP16. The error didn't occur when I decreased `gpu_memory_utilization` to 0.9. It seems our memory profiling is somehow inaccurate when using TP. Find the command and error msg below: ``` $ python benchmarks/benchmark_latency.py --model facebook/opt-13b -tp 2 Namespace(model='facebook/opt-13b', tensor_parallel_size=2, input_len=32, output_len=128, batch_size=8, n=1, use_beam_search=False, num_iters=3, profile=False) 2023-06-09 09:17:47,945 INFO worker.py:1625 -- Started a local Ray instance. INFO 06-09 09:17:48 llm_server.py:60] Initializing an LLM server with config: model='facebook/opt-13b', dtype=torch.float16, use_dummy_weights=False, download_dir=None, use_np_weights=False, tensor_parallel_size=2, seed=0) INFO 06-09 09:18:11 llm_server.py:129] # GPU blocks: 4150, # CPU blocks: 655 Traceback (most recent call last): File "/home/gcpuser/workspace/cacheflow/benchmarks/benchmark_latency.py", line 80, in <module> main(args) File "/home/gcpuser/workspace/cacheflow/benchmarks/benchmark_latency.py", line 17, in main llm = LLM( File "/home/gcpuser/workspace/cacheflow/cacheflow/entrypoints/llm.py", line 55, in __init__ self.llm_server = LLMServer.from_server_args(server_args) File "/home/gcpuser/workspace/cacheflow/cacheflow/server/llm_server.py", line 146, in from_server_args server = cls(*server_configs, distributed_init_method, devices, File "/home/gcpuser/workspace/cacheflow/cacheflow/server/llm_server.py", line 103, in __init__ self._init_cache() File "/home/gcpuser/workspace/cacheflow/cacheflow/server/llm_server.py", line 135, in _init_cache self._run_workers("init_cache_engine", cache_config=self.cache_config) File "/home/gcpuser/workspace/cacheflow/cacheflow/server/llm_server.py", line 312, in _run_workers all_outputs = ray.get(all_outputs) File "/opt/conda/envs/dev/lib/python3.9/site-packages/ray/_private/client_mode_hook.py", line 105, in wrapper return func(*args, **kwargs) File "/opt/conda/envs/dev/lib/python3.9/site-packages/ray/_private/worker.py", line 2521, in get raise value.as_instanceof_cause() ray.exceptions.RayTaskError(OutOfMemoryError): ray::Worker.init_cache_engine() (pid=11306, ip=10.128.0.79, repr=<cacheflow.worker.worker.Worker object at 0x7f4aa97596d0>) File "/home/gcpuser/workspace/cacheflow/cacheflow/worker/worker.py", line 127, in init_cache_engine self.cache_engine = CacheEngine( File "/home/gcpuser/workspace/cacheflow/cacheflow/worker/cache_engine.py", line 40, in __init__ self.gpu_cache = self.allocate_gpu_cache() File "/home/gcpuser/workspace/cacheflow/cacheflow/worker/cache_engine.py", line 76, in allocate_gpu_cache value_blocks = torch.empty( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 326.00 MiB (GPU 0; 39.41 GiB total capacity; 37.08 GiB already allocated; 188.56 MiB free; 37.25 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF ``` </issue> <code> [start of vllm/engine/arg_utils.py] 1 import argparse 2 import dataclasses 3 from dataclasses import dataclass 4 from typing import Optional, Tuple 5 6 from vllm.config import (CacheConfig, ModelConfig, ParallelConfig, 7 SchedulerConfig) 8 9 10 @dataclass 11 class EngineArgs: 12 """Arguments for vLLM engine.""" 13 model: str 14 download_dir: Optional[str] = None 15 use_np_weights: bool = False 16 use_dummy_weights: bool = False 17 dtype: str = "auto" 18 seed: int = 0 19 worker_use_ray: bool = False 20 pipeline_parallel_size: int = 1 21 tensor_parallel_size: int = 1 22 block_size: int = 16 23 swap_space: int = 4 # GiB 24 gpu_memory_utilization: float = 0.95 25 max_num_batched_tokens: int = 2560 26 max_num_seqs: int = 256 27 disable_log_stats: bool = False 28 29 def __post_init__(self): 30 self.max_num_seqs = min(self.max_num_seqs, self.max_num_batched_tokens) 31 32 @staticmethod 33 def add_cli_args( 34 parser: argparse.ArgumentParser, 35 ) -> argparse.ArgumentParser: 36 """Shared CLI arguments for vLLM engine.""" 37 # Model arguments 38 parser.add_argument('--model', type=str, default='facebook/opt-125m', 39 help='name or path of the huggingface model to use') 40 parser.add_argument('--download-dir', type=str, 41 default=EngineArgs.download_dir, 42 help='directory to download and load the weights, ' 43 'default to the default cache dir of ' 44 'huggingface') 45 parser.add_argument('--use-np-weights', action='store_true', 46 help='save a numpy copy of model weights for ' 47 'faster loading. This can increase the disk ' 48 'usage by up to 2x.') 49 parser.add_argument('--use-dummy-weights', action='store_true', 50 help='use dummy values for model weights') 51 # TODO(woosuk): Support FP32. 52 parser.add_argument('--dtype', type=str, default=EngineArgs.dtype, 53 choices=['auto', 'half', 'bfloat16', 'float'], 54 help='data type for model weights and activations. ' 55 'The "auto" option will use FP16 precision ' 56 'for FP32 and FP16 models, and BF16 precision ' 57 'for BF16 models.') 58 # Parallel arguments 59 parser.add_argument('--worker-use-ray', action='store_true', 60 help='use Ray for distributed serving, will be ' 61 'automatically set when using more than 1 GPU') 62 parser.add_argument('--pipeline-parallel-size', '-pp', type=int, 63 default=EngineArgs.pipeline_parallel_size, 64 help='number of pipeline stages') 65 parser.add_argument('--tensor-parallel-size', '-tp', type=int, 66 default=EngineArgs.tensor_parallel_size, 67 help='number of tensor parallel replicas') 68 # KV cache arguments 69 parser.add_argument('--block-size', type=int, 70 default=EngineArgs.block_size, 71 choices=[8, 16, 32], 72 help='token block size') 73 # TODO(woosuk): Support fine-grained seeds (e.g., seed per request). 74 parser.add_argument('--seed', type=int, default=EngineArgs.seed, 75 help='random seed') 76 parser.add_argument('--swap-space', type=int, 77 default=EngineArgs.swap_space, 78 help='CPU swap space size (GiB) per GPU') 79 parser.add_argument('--gpu-memory-utilization', type=float, 80 default=EngineArgs.gpu_memory_utilization, 81 help='the percentage of GPU memory to be used for' 82 'the model executor') 83 parser.add_argument('--max-num-batched-tokens', type=int, 84 default=EngineArgs.max_num_batched_tokens, 85 help='maximum number of batched tokens per ' 86 'iteration') 87 parser.add_argument('--max-num-seqs', type=int, 88 default=EngineArgs.max_num_seqs, 89 help='maximum number of sequences per iteration') 90 parser.add_argument('--disable-log-stats', action='store_true', 91 help='disable logging statistics') 92 return parser 93 94 @classmethod 95 def from_cli_args(cls, args: argparse.Namespace) -> "EngineArgs": 96 # Get the list of attributes of this dataclass. 97 attrs = [attr.name for attr in dataclasses.fields(cls)] 98 # Set the attributes from the parsed arguments. 99 engine_args = cls(**{attr: getattr(args, attr) for attr in attrs}) 100 return engine_args 101 102 def create_engine_configs( 103 self, 104 ) -> Tuple[ModelConfig, CacheConfig, ParallelConfig, SchedulerConfig]: 105 # Initialize the configs. 106 model_config = ModelConfig( 107 self.model, self.download_dir, self.use_np_weights, 108 self.use_dummy_weights, self.dtype, self.seed) 109 cache_config = CacheConfig(self.block_size, self.gpu_memory_utilization, 110 self.swap_space) 111 parallel_config = ParallelConfig(self.pipeline_parallel_size, 112 self.tensor_parallel_size, 113 self.worker_use_ray) 114 scheduler_config = SchedulerConfig(self.max_num_batched_tokens, 115 self.max_num_seqs) 116 return model_config, cache_config, parallel_config, scheduler_config 117 118 119 @dataclass 120 class AsyncEngineArgs(EngineArgs): 121 """Arguments for asynchronous vLLM engine.""" 122 engine_use_ray: bool = False 123 disable_log_requests: bool = False 124 125 @staticmethod 126 def add_cli_args( 127 parser: argparse.ArgumentParser, 128 ) -> argparse.ArgumentParser: 129 parser = EngineArgs.add_cli_args(parser) 130 parser.add_argument('--engine-use-ray', action='store_true', 131 help='use Ray to start the LLM engine in a ' 132 'separate process as the server process.') 133 parser.add_argument('--disable-log-requests', action='store_true', 134 help='disable logging requests') 135 return parser 136 [end of vllm/engine/arg_utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -21,7 +21,7 @@ tensor_parallel_size: int = 1 block_size: int = 16 swap_space: int = 4 # GiB - gpu_memory_utilization: float = 0.95 + gpu_memory_utilization: float = 0.90 max_num_batched_tokens: int = 2560 max_num_seqs: int = 256 disable_log_stats: bool = False
{"golden_diff": "diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py\n--- a/vllm/engine/arg_utils.py\n+++ b/vllm/engine/arg_utils.py\n@@ -21,7 +21,7 @@\n tensor_parallel_size: int = 1\n block_size: int = 16\n swap_space: int = 4 # GiB\n- gpu_memory_utilization: float = 0.95\n+ gpu_memory_utilization: float = 0.90\n max_num_batched_tokens: int = 2560\n max_num_seqs: int = 256\n disable_log_stats: bool = False\n", "issue": "Got OOM when using TP\nI got this when running OPT-13B on two A100s, with FP16. The error didn't occur when I decreased `gpu_memory_utilization` to 0.9. It seems our memory profiling is somehow inaccurate when using TP.\r\n\r\nFind the command and error msg below:\r\n```\r\n$ python benchmarks/benchmark_latency.py --model facebook/opt-13b -tp 2\r\nNamespace(model='facebook/opt-13b', tensor_parallel_size=2, input_len=32, output_len=128, batch_size=8, n=1, use_beam_search=False, num_iters=3, profile=False)\r\n2023-06-09 09:17:47,945 INFO worker.py:1625 -- Started a local Ray instance.\r\nINFO 06-09 09:17:48 llm_server.py:60] Initializing an LLM server with config: model='facebook/opt-13b', dtype=torch.float16, use_dummy_weights=False, download_dir=None, use_np_weights=False, tensor_parallel_size=2, seed=0)\r\nINFO 06-09 09:18:11 llm_server.py:129] # GPU blocks: 4150, # CPU blocks: 655\r\nTraceback (most recent call last):\r\n File \"/home/gcpuser/workspace/cacheflow/benchmarks/benchmark_latency.py\", line 80, in <module>\r\n main(args)\r\n File \"/home/gcpuser/workspace/cacheflow/benchmarks/benchmark_latency.py\", line 17, in main\r\n llm = LLM(\r\n File \"/home/gcpuser/workspace/cacheflow/cacheflow/entrypoints/llm.py\", line 55, in __init__\r\n self.llm_server = LLMServer.from_server_args(server_args)\r\n File \"/home/gcpuser/workspace/cacheflow/cacheflow/server/llm_server.py\", line 146, in from_server_args\r\n server = cls(*server_configs, distributed_init_method, devices,\r\n File \"/home/gcpuser/workspace/cacheflow/cacheflow/server/llm_server.py\", line 103, in __init__\r\n self._init_cache()\r\n File \"/home/gcpuser/workspace/cacheflow/cacheflow/server/llm_server.py\", line 135, in _init_cache\r\n self._run_workers(\"init_cache_engine\", cache_config=self.cache_config)\r\n File \"/home/gcpuser/workspace/cacheflow/cacheflow/server/llm_server.py\", line 312, in _run_workers\r\n all_outputs = ray.get(all_outputs)\r\n File \"/opt/conda/envs/dev/lib/python3.9/site-packages/ray/_private/client_mode_hook.py\", line 105, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/opt/conda/envs/dev/lib/python3.9/site-packages/ray/_private/worker.py\", line 2521, in get\r\n raise value.as_instanceof_cause()\r\nray.exceptions.RayTaskError(OutOfMemoryError): ray::Worker.init_cache_engine() (pid=11306, ip=10.128.0.79, repr=<cacheflow.worker.worker.Worker object at 0x7f4aa97596d0>)\r\n File \"/home/gcpuser/workspace/cacheflow/cacheflow/worker/worker.py\", line 127, in init_cache_engine\r\n self.cache_engine = CacheEngine(\r\n File \"/home/gcpuser/workspace/cacheflow/cacheflow/worker/cache_engine.py\", line 40, in __init__\r\n self.gpu_cache = self.allocate_gpu_cache()\r\n File \"/home/gcpuser/workspace/cacheflow/cacheflow/worker/cache_engine.py\", line 76, in allocate_gpu_cache\r\n value_blocks = torch.empty(\r\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 326.00 MiB (GPU 0; 39.41 GiB total capacity; 37.08 GiB already allocated; 188.56 MiB free; 37.25 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\r\n```\r\n\r\n\n", "before_files": [{"content": "import argparse\nimport dataclasses\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nfrom vllm.config import (CacheConfig, ModelConfig, ParallelConfig,\n SchedulerConfig)\n\n\n@dataclass\nclass EngineArgs:\n \"\"\"Arguments for vLLM engine.\"\"\"\n model: str\n download_dir: Optional[str] = None\n use_np_weights: bool = False\n use_dummy_weights: bool = False\n dtype: str = \"auto\"\n seed: int = 0\n worker_use_ray: bool = False\n pipeline_parallel_size: int = 1\n tensor_parallel_size: int = 1\n block_size: int = 16\n swap_space: int = 4 # GiB\n gpu_memory_utilization: float = 0.95\n max_num_batched_tokens: int = 2560\n max_num_seqs: int = 256\n disable_log_stats: bool = False\n\n def __post_init__(self):\n self.max_num_seqs = min(self.max_num_seqs, self.max_num_batched_tokens)\n\n @staticmethod\n def add_cli_args(\n parser: argparse.ArgumentParser,\n ) -> argparse.ArgumentParser:\n \"\"\"Shared CLI arguments for vLLM engine.\"\"\"\n # Model arguments\n parser.add_argument('--model', type=str, default='facebook/opt-125m',\n help='name or path of the huggingface model to use')\n parser.add_argument('--download-dir', type=str,\n default=EngineArgs.download_dir,\n help='directory to download and load the weights, '\n 'default to the default cache dir of '\n 'huggingface')\n parser.add_argument('--use-np-weights', action='store_true',\n help='save a numpy copy of model weights for '\n 'faster loading. This can increase the disk '\n 'usage by up to 2x.')\n parser.add_argument('--use-dummy-weights', action='store_true',\n help='use dummy values for model weights')\n # TODO(woosuk): Support FP32.\n parser.add_argument('--dtype', type=str, default=EngineArgs.dtype,\n choices=['auto', 'half', 'bfloat16', 'float'],\n help='data type for model weights and activations. '\n 'The \"auto\" option will use FP16 precision '\n 'for FP32 and FP16 models, and BF16 precision '\n 'for BF16 models.')\n # Parallel arguments\n parser.add_argument('--worker-use-ray', action='store_true',\n help='use Ray for distributed serving, will be '\n 'automatically set when using more than 1 GPU')\n parser.add_argument('--pipeline-parallel-size', '-pp', type=int,\n default=EngineArgs.pipeline_parallel_size,\n help='number of pipeline stages')\n parser.add_argument('--tensor-parallel-size', '-tp', type=int,\n default=EngineArgs.tensor_parallel_size,\n help='number of tensor parallel replicas')\n # KV cache arguments\n parser.add_argument('--block-size', type=int,\n default=EngineArgs.block_size,\n choices=[8, 16, 32],\n help='token block size')\n # TODO(woosuk): Support fine-grained seeds (e.g., seed per request).\n parser.add_argument('--seed', type=int, default=EngineArgs.seed,\n help='random seed')\n parser.add_argument('--swap-space', type=int,\n default=EngineArgs.swap_space,\n help='CPU swap space size (GiB) per GPU')\n parser.add_argument('--gpu-memory-utilization', type=float,\n default=EngineArgs.gpu_memory_utilization,\n help='the percentage of GPU memory to be used for'\n 'the model executor')\n parser.add_argument('--max-num-batched-tokens', type=int,\n default=EngineArgs.max_num_batched_tokens,\n help='maximum number of batched tokens per '\n 'iteration')\n parser.add_argument('--max-num-seqs', type=int,\n default=EngineArgs.max_num_seqs,\n help='maximum number of sequences per iteration')\n parser.add_argument('--disable-log-stats', action='store_true',\n help='disable logging statistics')\n return parser\n\n @classmethod\n def from_cli_args(cls, args: argparse.Namespace) -> \"EngineArgs\":\n # Get the list of attributes of this dataclass.\n attrs = [attr.name for attr in dataclasses.fields(cls)]\n # Set the attributes from the parsed arguments.\n engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})\n return engine_args\n\n def create_engine_configs(\n self,\n ) -> Tuple[ModelConfig, CacheConfig, ParallelConfig, SchedulerConfig]:\n # Initialize the configs.\n model_config = ModelConfig(\n self.model, self.download_dir, self.use_np_weights,\n self.use_dummy_weights, self.dtype, self.seed)\n cache_config = CacheConfig(self.block_size, self.gpu_memory_utilization,\n self.swap_space)\n parallel_config = ParallelConfig(self.pipeline_parallel_size,\n self.tensor_parallel_size,\n self.worker_use_ray)\n scheduler_config = SchedulerConfig(self.max_num_batched_tokens,\n self.max_num_seqs)\n return model_config, cache_config, parallel_config, scheduler_config\n\n\n@dataclass\nclass AsyncEngineArgs(EngineArgs):\n \"\"\"Arguments for asynchronous vLLM engine.\"\"\"\n engine_use_ray: bool = False\n disable_log_requests: bool = False\n\n @staticmethod\n def add_cli_args(\n parser: argparse.ArgumentParser,\n ) -> argparse.ArgumentParser:\n parser = EngineArgs.add_cli_args(parser)\n parser.add_argument('--engine-use-ray', action='store_true',\n help='use Ray to start the LLM engine in a '\n 'separate process as the server process.')\n parser.add_argument('--disable-log-requests', action='store_true',\n help='disable logging requests')\n return parser\n", "path": "vllm/engine/arg_utils.py"}]}
3,100
155
gh_patches_debug_16106
rasdani/github-patches
git_diff
python-pillow__Pillow-3279
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PySide2 The PySide2 (Qt for Python)[1] project developed by qt is now available even in pypi[2], I have always changed ImageQt.py manually, but I think it's time to add such changing to the official source [1] https://wiki.qt.io/Qt_for_Python [2] https://pypi.org/project/PySide2 </issue> <code> [start of src/PIL/ImageQt.py] 1 # 2 # The Python Imaging Library. 3 # $Id$ 4 # 5 # a simple Qt image interface. 6 # 7 # history: 8 # 2006-06-03 fl: created 9 # 2006-06-04 fl: inherit from QImage instead of wrapping it 10 # 2006-06-05 fl: removed toimage helper; move string support to ImageQt 11 # 2013-11-13 fl: add support for Qt5 ([email protected]) 12 # 13 # Copyright (c) 2006 by Secret Labs AB 14 # Copyright (c) 2006 by Fredrik Lundh 15 # 16 # See the README file for information on usage and redistribution. 17 # 18 19 from . import Image 20 from ._util import isPath, py3 21 from io import BytesIO 22 import sys 23 24 qt_versions = [ 25 ['5', 'PyQt5'], 26 ['4', 'PyQt4'], 27 ['side', 'PySide'] 28 ] 29 # If a version has already been imported, attempt it first 30 qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True) 31 for qt_version, qt_module in qt_versions: 32 try: 33 if qt_module == 'PyQt5': 34 from PyQt5.QtGui import QImage, qRgba, QPixmap 35 from PyQt5.QtCore import QBuffer, QIODevice 36 elif qt_module == 'PyQt4': 37 from PyQt4.QtGui import QImage, qRgba, QPixmap 38 from PyQt4.QtCore import QBuffer, QIODevice 39 elif qt_module == 'PySide': 40 from PySide.QtGui import QImage, qRgba, QPixmap 41 from PySide.QtCore import QBuffer, QIODevice 42 except (ImportError, RuntimeError): 43 continue 44 qt_is_installed = True 45 break 46 else: 47 qt_is_installed = False 48 qt_version = None 49 50 51 def rgb(r, g, b, a=255): 52 """(Internal) Turns an RGB color into a Qt compatible color integer.""" 53 # use qRgb to pack the colors, and then turn the resulting long 54 # into a negative integer with the same bitpattern. 55 return (qRgba(r, g, b, a) & 0xffffffff) 56 57 58 def fromqimage(im): 59 """ 60 :param im: A PIL Image object, or a file name 61 (given either as Python string or a PyQt string object) 62 """ 63 buffer = QBuffer() 64 buffer.open(QIODevice.ReadWrite) 65 # preserve alha channel with png 66 # otherwise ppm is more friendly with Image.open 67 if im.hasAlphaChannel(): 68 im.save(buffer, 'png') 69 else: 70 im.save(buffer, 'ppm') 71 72 b = BytesIO() 73 try: 74 b.write(buffer.data()) 75 except TypeError: 76 # workaround for Python 2 77 b.write(str(buffer.data())) 78 buffer.close() 79 b.seek(0) 80 81 return Image.open(b) 82 83 84 def fromqpixmap(im): 85 return fromqimage(im) 86 # buffer = QBuffer() 87 # buffer.open(QIODevice.ReadWrite) 88 # # im.save(buffer) 89 # # What if png doesn't support some image features like animation? 90 # im.save(buffer, 'ppm') 91 # bytes_io = BytesIO() 92 # bytes_io.write(buffer.data()) 93 # buffer.close() 94 # bytes_io.seek(0) 95 # return Image.open(bytes_io) 96 97 98 def align8to32(bytes, width, mode): 99 """ 100 converts each scanline of data from 8 bit to 32 bit aligned 101 """ 102 103 bits_per_pixel = { 104 '1': 1, 105 'L': 8, 106 'P': 8, 107 }[mode] 108 109 # calculate bytes per line and the extra padding if needed 110 bits_per_line = bits_per_pixel * width 111 full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8) 112 bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0) 113 114 extra_padding = -bytes_per_line % 4 115 116 # already 32 bit aligned by luck 117 if not extra_padding: 118 return bytes 119 120 new_data = [] 121 for i in range(len(bytes) // bytes_per_line): 122 new_data.append(bytes[i*bytes_per_line:(i+1)*bytes_per_line] 123 + b'\x00' * extra_padding) 124 125 return b''.join(new_data) 126 127 128 def _toqclass_helper(im): 129 data = None 130 colortable = None 131 132 # handle filename, if given instead of image name 133 if hasattr(im, "toUtf8"): 134 # FIXME - is this really the best way to do this? 135 if py3: 136 im = str(im.toUtf8(), "utf-8") 137 else: 138 im = unicode(im.toUtf8(), "utf-8") 139 if isPath(im): 140 im = Image.open(im) 141 142 if im.mode == "1": 143 format = QImage.Format_Mono 144 elif im.mode == "L": 145 format = QImage.Format_Indexed8 146 colortable = [] 147 for i in range(256): 148 colortable.append(rgb(i, i, i)) 149 elif im.mode == "P": 150 format = QImage.Format_Indexed8 151 colortable = [] 152 palette = im.getpalette() 153 for i in range(0, len(palette), 3): 154 colortable.append(rgb(*palette[i:i+3])) 155 elif im.mode == "RGB": 156 data = im.tobytes("raw", "BGRX") 157 format = QImage.Format_RGB32 158 elif im.mode == "RGBA": 159 try: 160 data = im.tobytes("raw", "BGRA") 161 except SystemError: 162 # workaround for earlier versions 163 r, g, b, a = im.split() 164 im = Image.merge("RGBA", (b, g, r, a)) 165 format = QImage.Format_ARGB32 166 else: 167 raise ValueError("unsupported image mode %r" % im.mode) 168 169 __data = data or align8to32(im.tobytes(), im.size[0], im.mode) 170 return { 171 'data': __data, 'im': im, 'format': format, 'colortable': colortable 172 } 173 174 175 if qt_is_installed: 176 class ImageQt(QImage): 177 178 def __init__(self, im): 179 """ 180 An PIL image wrapper for Qt. This is a subclass of PyQt's QImage 181 class. 182 183 :param im: A PIL Image object, or a file name (given either as Python 184 string or a PyQt string object). 185 """ 186 im_data = _toqclass_helper(im) 187 # must keep a reference, or Qt will crash! 188 # All QImage constructors that take data operate on an existing 189 # buffer, so this buffer has to hang on for the life of the image. 190 # Fixes https://github.com/python-pillow/Pillow/issues/1370 191 self.__data = im_data['data'] 192 QImage.__init__(self, 193 self.__data, im_data['im'].size[0], 194 im_data['im'].size[1], im_data['format']) 195 if im_data['colortable']: 196 self.setColorTable(im_data['colortable']) 197 198 199 def toqimage(im): 200 return ImageQt(im) 201 202 203 def toqpixmap(im): 204 # # This doesn't work. For now using a dumb approach. 205 # im_data = _toqclass_helper(im) 206 # result = QPixmap(im_data['im'].size[0], im_data['im'].size[1]) 207 # result.loadFromData(im_data['data']) 208 # Fix some strange bug that causes 209 if im.mode == 'RGB': 210 im = im.convert('RGBA') 211 212 qimage = toqimage(im) 213 return QPixmap.fromImage(qimage) 214 [end of src/PIL/ImageQt.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/PIL/ImageQt.py b/src/PIL/ImageQt.py --- a/src/PIL/ImageQt.py +++ b/src/PIL/ImageQt.py @@ -23,6 +23,7 @@ qt_versions = [ ['5', 'PyQt5'], + ['side2', 'PySide2'], ['4', 'PyQt4'], ['side', 'PySide'] ] @@ -33,6 +34,9 @@ if qt_module == 'PyQt5': from PyQt5.QtGui import QImage, qRgba, QPixmap from PyQt5.QtCore import QBuffer, QIODevice + elif qt_module == 'PySide2': + from PySide2.QtGui import QImage, qRgba, QPixmap + from PySide2.QtCore import QBuffer, QIODevice elif qt_module == 'PyQt4': from PyQt4.QtGui import QImage, qRgba, QPixmap from PyQt4.QtCore import QBuffer, QIODevice
{"golden_diff": "diff --git a/src/PIL/ImageQt.py b/src/PIL/ImageQt.py\n--- a/src/PIL/ImageQt.py\n+++ b/src/PIL/ImageQt.py\n@@ -23,6 +23,7 @@\n \n qt_versions = [\n ['5', 'PyQt5'],\n+ ['side2', 'PySide2'],\n ['4', 'PyQt4'],\n ['side', 'PySide']\n ]\n@@ -33,6 +34,9 @@\n if qt_module == 'PyQt5':\n from PyQt5.QtGui import QImage, qRgba, QPixmap\n from PyQt5.QtCore import QBuffer, QIODevice\n+ elif qt_module == 'PySide2':\n+ from PySide2.QtGui import QImage, qRgba, QPixmap\n+ from PySide2.QtCore import QBuffer, QIODevice\n elif qt_module == 'PyQt4':\n from PyQt4.QtGui import QImage, qRgba, QPixmap\n from PyQt4.QtCore import QBuffer, QIODevice\n", "issue": "PySide2\nThe PySide2 (Qt for Python)[1] project developed by qt is now available even in pypi[2], I have always changed ImageQt.py manually, but I think it's time to add such changing to the official source\r\n\r\n[1] https://wiki.qt.io/Qt_for_Python\r\n[2] https://pypi.org/project/PySide2\n", "before_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# a simple Qt image interface.\n#\n# history:\n# 2006-06-03 fl: created\n# 2006-06-04 fl: inherit from QImage instead of wrapping it\n# 2006-06-05 fl: removed toimage helper; move string support to ImageQt\n# 2013-11-13 fl: add support for Qt5 ([email protected])\n#\n# Copyright (c) 2006 by Secret Labs AB\n# Copyright (c) 2006 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nfrom . import Image\nfrom ._util import isPath, py3\nfrom io import BytesIO\nimport sys\n\nqt_versions = [\n ['5', 'PyQt5'],\n ['4', 'PyQt4'],\n ['side', 'PySide']\n]\n# If a version has already been imported, attempt it first\nqt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True)\nfor qt_version, qt_module in qt_versions:\n try:\n if qt_module == 'PyQt5':\n from PyQt5.QtGui import QImage, qRgba, QPixmap\n from PyQt5.QtCore import QBuffer, QIODevice\n elif qt_module == 'PyQt4':\n from PyQt4.QtGui import QImage, qRgba, QPixmap\n from PyQt4.QtCore import QBuffer, QIODevice\n elif qt_module == 'PySide':\n from PySide.QtGui import QImage, qRgba, QPixmap\n from PySide.QtCore import QBuffer, QIODevice\n except (ImportError, RuntimeError):\n continue\n qt_is_installed = True\n break\nelse:\n qt_is_installed = False\n qt_version = None\n\n\ndef rgb(r, g, b, a=255):\n \"\"\"(Internal) Turns an RGB color into a Qt compatible color integer.\"\"\"\n # use qRgb to pack the colors, and then turn the resulting long\n # into a negative integer with the same bitpattern.\n return (qRgba(r, g, b, a) & 0xffffffff)\n\n\ndef fromqimage(im):\n \"\"\"\n :param im: A PIL Image object, or a file name\n (given either as Python string or a PyQt string object)\n \"\"\"\n buffer = QBuffer()\n buffer.open(QIODevice.ReadWrite)\n # preserve alha channel with png\n # otherwise ppm is more friendly with Image.open\n if im.hasAlphaChannel():\n im.save(buffer, 'png')\n else:\n im.save(buffer, 'ppm')\n\n b = BytesIO()\n try:\n b.write(buffer.data())\n except TypeError:\n # workaround for Python 2\n b.write(str(buffer.data()))\n buffer.close()\n b.seek(0)\n\n return Image.open(b)\n\n\ndef fromqpixmap(im):\n return fromqimage(im)\n # buffer = QBuffer()\n # buffer.open(QIODevice.ReadWrite)\n # # im.save(buffer)\n # # What if png doesn't support some image features like animation?\n # im.save(buffer, 'ppm')\n # bytes_io = BytesIO()\n # bytes_io.write(buffer.data())\n # buffer.close()\n # bytes_io.seek(0)\n # return Image.open(bytes_io)\n\n\ndef align8to32(bytes, width, mode):\n \"\"\"\n converts each scanline of data from 8 bit to 32 bit aligned\n \"\"\"\n\n bits_per_pixel = {\n '1': 1,\n 'L': 8,\n 'P': 8,\n }[mode]\n\n # calculate bytes per line and the extra padding if needed\n bits_per_line = bits_per_pixel * width\n full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)\n bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)\n\n extra_padding = -bytes_per_line % 4\n\n # already 32 bit aligned by luck\n if not extra_padding:\n return bytes\n\n new_data = []\n for i in range(len(bytes) // bytes_per_line):\n new_data.append(bytes[i*bytes_per_line:(i+1)*bytes_per_line]\n + b'\\x00' * extra_padding)\n\n return b''.join(new_data)\n\n\ndef _toqclass_helper(im):\n data = None\n colortable = None\n\n # handle filename, if given instead of image name\n if hasattr(im, \"toUtf8\"):\n # FIXME - is this really the best way to do this?\n if py3:\n im = str(im.toUtf8(), \"utf-8\")\n else:\n im = unicode(im.toUtf8(), \"utf-8\")\n if isPath(im):\n im = Image.open(im)\n\n if im.mode == \"1\":\n format = QImage.Format_Mono\n elif im.mode == \"L\":\n format = QImage.Format_Indexed8\n colortable = []\n for i in range(256):\n colortable.append(rgb(i, i, i))\n elif im.mode == \"P\":\n format = QImage.Format_Indexed8\n colortable = []\n palette = im.getpalette()\n for i in range(0, len(palette), 3):\n colortable.append(rgb(*palette[i:i+3]))\n elif im.mode == \"RGB\":\n data = im.tobytes(\"raw\", \"BGRX\")\n format = QImage.Format_RGB32\n elif im.mode == \"RGBA\":\n try:\n data = im.tobytes(\"raw\", \"BGRA\")\n except SystemError:\n # workaround for earlier versions\n r, g, b, a = im.split()\n im = Image.merge(\"RGBA\", (b, g, r, a))\n format = QImage.Format_ARGB32\n else:\n raise ValueError(\"unsupported image mode %r\" % im.mode)\n\n __data = data or align8to32(im.tobytes(), im.size[0], im.mode)\n return {\n 'data': __data, 'im': im, 'format': format, 'colortable': colortable\n }\n\n\nif qt_is_installed:\n class ImageQt(QImage):\n\n def __init__(self, im):\n \"\"\"\n An PIL image wrapper for Qt. This is a subclass of PyQt's QImage\n class.\n\n :param im: A PIL Image object, or a file name (given either as Python\n string or a PyQt string object).\n \"\"\"\n im_data = _toqclass_helper(im)\n # must keep a reference, or Qt will crash!\n # All QImage constructors that take data operate on an existing\n # buffer, so this buffer has to hang on for the life of the image.\n # Fixes https://github.com/python-pillow/Pillow/issues/1370\n self.__data = im_data['data']\n QImage.__init__(self,\n self.__data, im_data['im'].size[0],\n im_data['im'].size[1], im_data['format'])\n if im_data['colortable']:\n self.setColorTable(im_data['colortable'])\n\n\ndef toqimage(im):\n return ImageQt(im)\n\n\ndef toqpixmap(im):\n # # This doesn't work. For now using a dumb approach.\n # im_data = _toqclass_helper(im)\n # result = QPixmap(im_data['im'].size[0], im_data['im'].size[1])\n # result.loadFromData(im_data['data'])\n # Fix some strange bug that causes\n if im.mode == 'RGB':\n im = im.convert('RGBA')\n\n qimage = toqimage(im)\n return QPixmap.fromImage(qimage)\n", "path": "src/PIL/ImageQt.py"}]}
2,882
222
gh_patches_debug_1801
rasdani/github-patches
git_diff
mit-ll-responsible-ai__hydra-zen-355
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `validates_with_beartype` considers `Partial` as `NoneType` Hi @rsokl. I was having a blast using this fascinating library. But It seems when used with `hydra_zen.third_party.validates_with_beartype`, it casts `hydra_zen.typing.Partial` as `NoneType`. ```python from hydra_zen.typing import Partial from hydra_zen.third_party.beartype import validates_with_beartype def f(x: Partial[list]): return x val_f = validates_with_beartype(f) val_f(3) ``` It raises the following error. Can you take a look? ```bash beartype.roar.BeartypeCallHintParamViolation: @beartyped __main__.f() parameter x=3 violates type hint None, as int 3 not instance of <class "builtins.NoneType">. ``` </issue> <code> [start of src/hydra_zen/typing/_implementations.py] 1 # Copyright (c) 2022 Massachusetts Institute of Technology 2 # SPDX-License-Identifier: MIT 3 4 # pyright: strict 5 6 import sys 7 import types 8 from enum import Enum 9 from pathlib import Path, PosixPath, WindowsPath 10 from typing import ( 11 TYPE_CHECKING, 12 Any, 13 ByteString, 14 Callable, 15 ClassVar, 16 Dict, 17 FrozenSet, 18 Generic, 19 List, 20 Mapping, 21 NewType, 22 Optional, 23 Sequence, 24 Set, 25 Tuple, 26 Type, 27 TypeVar, 28 Union, 29 ) 30 31 from omegaconf import DictConfig, ListConfig 32 from typing_extensions import ( 33 Final, 34 Literal, 35 ParamSpec, 36 Protocol, 37 Self, 38 TypeAlias, 39 TypedDict, 40 runtime_checkable, 41 ) 42 43 __all__ = [ 44 "Just", 45 "Builds", 46 "PartialBuilds", 47 "Partial", 48 "Importable", 49 "SupportedPrimitive", 50 "ZenWrappers", 51 "ZenPartialBuilds", 52 "HydraPartialBuilds", 53 "ZenConvert", 54 ] 55 56 P = ParamSpec("P") 57 R = TypeVar("R") 58 59 60 class EmptyDict(TypedDict): 61 pass 62 63 64 T = TypeVar("T", covariant=True) 65 T2 = TypeVar("T2") 66 T3 = TypeVar("T3") 67 68 T4 = TypeVar("T4", bound=Callable[..., Any]) 69 70 71 InstOrType: TypeAlias = Union[T, Type[T]] 72 73 74 if TYPE_CHECKING: 75 from dataclasses import Field # provided by typestub but not generic at runtime 76 else: 77 78 class Field(Protocol[T2]): 79 name: str 80 type: Type[T2] 81 default: T2 82 default_factory: Callable[[], T2] 83 repr: bool 84 hash: Optional[bool] 85 init: bool 86 compare: bool 87 metadata: Mapping[str, Any] 88 89 90 @runtime_checkable 91 class Partial(Protocol[T2]): 92 __call__: Callable[..., T2] 93 94 @property 95 def func(self) -> Callable[..., T2]: 96 ... 97 98 @property 99 def args(self) -> Tuple[Any, ...]: 100 ... 101 102 @property 103 def keywords(self) -> Dict[str, Any]: 104 ... 105 106 def __new__( 107 cls: Type[Self], __func: Callable[..., T2], *args: Any, **kwargs: Any 108 ) -> Self: 109 ... 110 111 if sys.version_info >= (3, 9): # pragma: no cover 112 113 def __class_getitem__(cls, item: Any) -> types.GenericAlias: 114 ... 115 116 117 InterpStr = NewType("InterpStr", str) 118 119 120 class DataClass_(Protocol): 121 # doesn't provide __init__, __getattribute__, etc. 122 __dataclass_fields__: ClassVar[Dict[str, Field[Any]]] 123 124 125 class DataClass(DataClass_, Protocol): 126 def __init__(self, *args: Any, **kwargs: Any) -> None: 127 ... 128 129 def __getattribute__(self, __name: str) -> Any: 130 ... 131 132 def __setattr__(self, __name: str, __value: Any) -> None: 133 ... 134 135 136 @runtime_checkable 137 class Builds(DataClass, Protocol[T]): 138 _target_: ClassVar[str] 139 140 141 class BuildsWithSig(Builds[T], Protocol[T, P]): 142 def __init__(self, *args: P.args, **kwds: P.kwargs): 143 ... 144 145 146 @runtime_checkable 147 class Just(Builds[T], Protocol[T]): 148 path: ClassVar[str] # interpolated string for importing obj 149 _target_: ClassVar[Literal["hydra_zen.funcs.get_obj"]] = "hydra_zen.funcs.get_obj" 150 151 152 class ZenPartialMixin(Protocol[T]): 153 _zen_target: ClassVar[str] 154 _zen_partial: ClassVar[Literal[True]] = True 155 156 157 class HydraPartialMixin(Protocol[T]): 158 _partial_: ClassVar[Literal[True]] = True 159 160 161 @runtime_checkable 162 class ZenPartialBuilds(Builds[T], ZenPartialMixin[T], Protocol[T]): 163 _target_: ClassVar[ 164 Literal["hydra_zen.funcs.zen_processing"] 165 ] = "hydra_zen.funcs.zen_processing" 166 167 168 @runtime_checkable 169 class HydraPartialBuilds(Builds[T], HydraPartialMixin[T], Protocol[T]): 170 ... 171 172 173 # Necessary, but not sufficient, check for PartialBuilds; useful for creating 174 # non-overlapping overloads 175 IsPartial: TypeAlias = Union[ZenPartialMixin[T], HydraPartialMixin[T]] 176 177 PartialBuilds: TypeAlias = Union[ZenPartialBuilds[T], HydraPartialBuilds[T]] 178 179 180 @runtime_checkable 181 class HasTarget(Protocol): 182 _target_: str 183 184 185 Importable = TypeVar("Importable", bound=Callable[..., Any]) 186 187 _HydraPrimitive: TypeAlias = Union[ 188 bool, None, int, float, str, ByteString, Path, WindowsPath, PosixPath 189 ] 190 191 _SupportedViaBuilds = Union[ 192 Partial[Any], 193 range, 194 Set[Any], 195 ] 196 197 _SupportedPrimitive: TypeAlias = Union[ 198 _HydraPrimitive, 199 ListConfig, 200 DictConfig, 201 Callable[..., Any], 202 Enum, 203 DataClass_, 204 complex, 205 _SupportedViaBuilds, 206 EmptyDict, # not covered by Mapping[..., ...]] 207 ] 208 209 if TYPE_CHECKING: 210 SupportedPrimitive: TypeAlias = Union[ 211 _SupportedPrimitive, 212 FrozenSet["SupportedPrimitive"], 213 # Even thought this is redundant with Sequence, it seems to 214 # be needed for pyright to do proper checking of tuple contents 215 Tuple["SupportedPrimitive", ...], 216 # Mutable generic containers need to be invariant, so 217 # we have to settle for Sequence/Mapping. While this 218 # is overly permissive in terms of sequence-type, it 219 # at least affords quality checking of sequence content 220 Sequence["SupportedPrimitive"], 221 # Mapping is covariant only in value 222 Mapping[Any, "SupportedPrimitive"], 223 ] 224 else: 225 # cleans up annotations for REPLs 226 SupportedPrimitive = TypeVar("SupportedPrimitive") 227 228 229 ZenWrapper: TypeAlias = Union[ 230 None, 231 Builds[Callable[[T4], T4]], 232 PartialBuilds[Callable[[T4], T4]], 233 Just[Callable[[T4], T4]], 234 Type[Builds[Callable[[T4], T4]]], 235 Type[PartialBuilds[Callable[[T4], T4]]], 236 Type[Just[Callable[[T4], T4]]], 237 Callable[[T4], T4], 238 str, 239 ] 240 if TYPE_CHECKING: 241 ZenWrappers: TypeAlias = Union[ZenWrapper[T4], Sequence[ZenWrapper[T4]]] 242 else: 243 # cleans up annotations for REPLs 244 class ZenWrappers(Generic[T2]): # pragma: no cover 245 pass 246 247 248 DefaultsList = List[ 249 Union[str, DataClass_, Mapping[str, Union[None, str, Sequence[str]]]] 250 ] 251 252 253 # Lists all zen-convert settings and their types. Not part of public API 254 class AllConvert(TypedDict, total=True): 255 dataclass: bool 256 257 258 # used for runtime type-checking 259 convert_types: Final = {"dataclass": bool} 260 261 GroupName: TypeAlias = Optional[str] 262 NodeName: TypeAlias = str 263 Node: TypeAlias = Any 264 265 266 # TODO: make immutable 267 class StoreEntry(TypedDict): 268 name: NodeName 269 group: GroupName 270 package: Optional[str] 271 provider: Optional[str] 272 node: Node 273 274 275 class ZenConvert(TypedDict, total=False): 276 """A TypedDict that provides a type-checked interface for specifying zen-convert 277 options that configure the hydra-zen config-creation functions (e.g., `builds`, 278 `just`, and `make_config`). 279 280 Note that, at runtime, `ZenConvert` is simply a dictionary with type-annotations. There is no enforced runtime validation of its keys and values. 281 282 Parameters 283 ---------- 284 dataclass : bool 285 If `True` any dataclass type/instance without a `_target_` field is 286 automatically converted to a targeted config that will instantiate to that type/ 287 instance. Otherwise the dataclass type/instance will be passed through as-is. 288 289 Note that this only works with statically-defined dataclass types, whereas 290 :func:`~hydra_zen.make_config` and :py:func:`dataclasses.make_dataclass` 291 dynamically generate dataclass types. Additionally, this feature is not 292 compatible with a dataclass instance whose type possesses an `InitVar` field. 293 294 Examples 295 -------- 296 >>> from hydra_zen.typing import ZenConvert as zc 297 >>> zc() 298 {} 299 >>> zc(dataclass=True) 300 {"dataclass": True} 301 >>> # static type-checker will raise, but runtime will not 302 >>> zc(apple=1) # type: ignore 303 {"apple": 1} 304 305 **Configuring dataclass auto-config behaviors** 306 307 >>> from hydra_zen import instantiate as I 308 >>> from hydra_zen import builds, just 309 >>> from dataclasses import dataclass 310 >>> @dataclass 311 ... class B: 312 ... x: int 313 >>> b = B(x=1) 314 315 >>> I(just(b)) 316 B(x=1) 317 >>> I(just(b, zen_convert=zc(dataclass=False))) # returns omegaconf.DictConfig 318 {"x": 1} 319 320 >>> I(builds(dict, y=b)) 321 {'y': B(x=1)} 322 >>> I(builds(dict, y=b, zen_convert=zc(dataclass=False))) # returns omegaconf.DictConfig 323 {'y': {'x': 1}} 324 325 >>> I(make_config(y=b)) # returns omegaconf.DictConfig 326 {'y': {'x': 1}} 327 >>> I(make_config(y=b, zen_convert=zc(dataclass=True), hydra_convert="all")) 328 {'y': B(x=1)} 329 330 Auto-config support does not work with dynamically-generated dataclass types 331 332 >>> just(make_config(z=1)) 333 HydraZenUnsupportedPrimitiveError: ... 334 >>> I(just(make_config(z=1), zen_convert=zc(dataclass=False))) 335 {'z': 1} 336 337 A dataclass with a `_target_` field will not be converted: 338 339 >>> @dataclass 340 ... class BuildsStr: 341 ... _target_: str = 'builtins.str' 342 ... 343 >>> BuildsStr is just(BuildsStr) 344 True 345 >>> (builds_str := BuildsStr()) is just(builds_str) 346 True 347 """ 348 349 dataclass: bool 350 [end of src/hydra_zen/typing/_implementations.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/hydra_zen/typing/_implementations.py b/src/hydra_zen/typing/_implementations.py --- a/src/hydra_zen/typing/_implementations.py +++ b/src/hydra_zen/typing/_implementations.py @@ -108,7 +108,7 @@ ) -> Self: ... - if sys.version_info >= (3, 9): # pragma: no cover + if TYPE_CHECKING and sys.version_info >= (3, 9): # pragma: no cover def __class_getitem__(cls, item: Any) -> types.GenericAlias: ...
{"golden_diff": "diff --git a/src/hydra_zen/typing/_implementations.py b/src/hydra_zen/typing/_implementations.py\n--- a/src/hydra_zen/typing/_implementations.py\n+++ b/src/hydra_zen/typing/_implementations.py\n@@ -108,7 +108,7 @@\n ) -> Self:\n ...\n \n- if sys.version_info >= (3, 9): # pragma: no cover\n+ if TYPE_CHECKING and sys.version_info >= (3, 9): # pragma: no cover\n \n def __class_getitem__(cls, item: Any) -> types.GenericAlias:\n ...\n", "issue": "`validates_with_beartype` considers `Partial` as `NoneType`\nHi @rsokl. I was having a blast using this fascinating library. But It seems when used with `hydra_zen.third_party.validates_with_beartype`, it casts `hydra_zen.typing.Partial` as `NoneType`. \r\n\r\n\r\n```python\r\nfrom hydra_zen.typing import Partial\r\nfrom hydra_zen.third_party.beartype import validates_with_beartype\r\n\r\n\r\ndef f(x: Partial[list]):\r\n return x\r\n\r\n\r\nval_f = validates_with_beartype(f)\r\n\r\nval_f(3)\r\n```\r\n\r\nIt raises the following error. Can you take a look?\r\n\r\n```bash\r\nbeartype.roar.BeartypeCallHintParamViolation: @beartyped __main__.f() \r\nparameter x=3 violates type hint None, as int 3 not instance of <class \"builtins.NoneType\">.\r\n```\n", "before_files": [{"content": "# Copyright (c) 2022 Massachusetts Institute of Technology\n# SPDX-License-Identifier: MIT\n\n# pyright: strict\n\nimport sys\nimport types\nfrom enum import Enum\nfrom pathlib import Path, PosixPath, WindowsPath\nfrom typing import (\n TYPE_CHECKING,\n Any,\n ByteString,\n Callable,\n ClassVar,\n Dict,\n FrozenSet,\n Generic,\n List,\n Mapping,\n NewType,\n Optional,\n Sequence,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nfrom omegaconf import DictConfig, ListConfig\nfrom typing_extensions import (\n Final,\n Literal,\n ParamSpec,\n Protocol,\n Self,\n TypeAlias,\n TypedDict,\n runtime_checkable,\n)\n\n__all__ = [\n \"Just\",\n \"Builds\",\n \"PartialBuilds\",\n \"Partial\",\n \"Importable\",\n \"SupportedPrimitive\",\n \"ZenWrappers\",\n \"ZenPartialBuilds\",\n \"HydraPartialBuilds\",\n \"ZenConvert\",\n]\n\nP = ParamSpec(\"P\")\nR = TypeVar(\"R\")\n\n\nclass EmptyDict(TypedDict):\n pass\n\n\nT = TypeVar(\"T\", covariant=True)\nT2 = TypeVar(\"T2\")\nT3 = TypeVar(\"T3\")\n\nT4 = TypeVar(\"T4\", bound=Callable[..., Any])\n\n\nInstOrType: TypeAlias = Union[T, Type[T]]\n\n\nif TYPE_CHECKING:\n from dataclasses import Field # provided by typestub but not generic at runtime\nelse:\n\n class Field(Protocol[T2]):\n name: str\n type: Type[T2]\n default: T2\n default_factory: Callable[[], T2]\n repr: bool\n hash: Optional[bool]\n init: bool\n compare: bool\n metadata: Mapping[str, Any]\n\n\n@runtime_checkable\nclass Partial(Protocol[T2]):\n __call__: Callable[..., T2]\n\n @property\n def func(self) -> Callable[..., T2]:\n ...\n\n @property\n def args(self) -> Tuple[Any, ...]:\n ...\n\n @property\n def keywords(self) -> Dict[str, Any]:\n ...\n\n def __new__(\n cls: Type[Self], __func: Callable[..., T2], *args: Any, **kwargs: Any\n ) -> Self:\n ...\n\n if sys.version_info >= (3, 9): # pragma: no cover\n\n def __class_getitem__(cls, item: Any) -> types.GenericAlias:\n ...\n\n\nInterpStr = NewType(\"InterpStr\", str)\n\n\nclass DataClass_(Protocol):\n # doesn't provide __init__, __getattribute__, etc.\n __dataclass_fields__: ClassVar[Dict[str, Field[Any]]]\n\n\nclass DataClass(DataClass_, Protocol):\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n ...\n\n def __getattribute__(self, __name: str) -> Any:\n ...\n\n def __setattr__(self, __name: str, __value: Any) -> None:\n ...\n\n\n@runtime_checkable\nclass Builds(DataClass, Protocol[T]):\n _target_: ClassVar[str]\n\n\nclass BuildsWithSig(Builds[T], Protocol[T, P]):\n def __init__(self, *args: P.args, **kwds: P.kwargs):\n ...\n\n\n@runtime_checkable\nclass Just(Builds[T], Protocol[T]):\n path: ClassVar[str] # interpolated string for importing obj\n _target_: ClassVar[Literal[\"hydra_zen.funcs.get_obj\"]] = \"hydra_zen.funcs.get_obj\"\n\n\nclass ZenPartialMixin(Protocol[T]):\n _zen_target: ClassVar[str]\n _zen_partial: ClassVar[Literal[True]] = True\n\n\nclass HydraPartialMixin(Protocol[T]):\n _partial_: ClassVar[Literal[True]] = True\n\n\n@runtime_checkable\nclass ZenPartialBuilds(Builds[T], ZenPartialMixin[T], Protocol[T]):\n _target_: ClassVar[\n Literal[\"hydra_zen.funcs.zen_processing\"]\n ] = \"hydra_zen.funcs.zen_processing\"\n\n\n@runtime_checkable\nclass HydraPartialBuilds(Builds[T], HydraPartialMixin[T], Protocol[T]):\n ...\n\n\n# Necessary, but not sufficient, check for PartialBuilds; useful for creating\n# non-overlapping overloads\nIsPartial: TypeAlias = Union[ZenPartialMixin[T], HydraPartialMixin[T]]\n\nPartialBuilds: TypeAlias = Union[ZenPartialBuilds[T], HydraPartialBuilds[T]]\n\n\n@runtime_checkable\nclass HasTarget(Protocol):\n _target_: str\n\n\nImportable = TypeVar(\"Importable\", bound=Callable[..., Any])\n\n_HydraPrimitive: TypeAlias = Union[\n bool, None, int, float, str, ByteString, Path, WindowsPath, PosixPath\n]\n\n_SupportedViaBuilds = Union[\n Partial[Any],\n range,\n Set[Any],\n]\n\n_SupportedPrimitive: TypeAlias = Union[\n _HydraPrimitive,\n ListConfig,\n DictConfig,\n Callable[..., Any],\n Enum,\n DataClass_,\n complex,\n _SupportedViaBuilds,\n EmptyDict, # not covered by Mapping[..., ...]]\n]\n\nif TYPE_CHECKING:\n SupportedPrimitive: TypeAlias = Union[\n _SupportedPrimitive,\n FrozenSet[\"SupportedPrimitive\"],\n # Even thought this is redundant with Sequence, it seems to\n # be needed for pyright to do proper checking of tuple contents\n Tuple[\"SupportedPrimitive\", ...],\n # Mutable generic containers need to be invariant, so\n # we have to settle for Sequence/Mapping. While this\n # is overly permissive in terms of sequence-type, it\n # at least affords quality checking of sequence content\n Sequence[\"SupportedPrimitive\"],\n # Mapping is covariant only in value\n Mapping[Any, \"SupportedPrimitive\"],\n ]\nelse:\n # cleans up annotations for REPLs\n SupportedPrimitive = TypeVar(\"SupportedPrimitive\")\n\n\nZenWrapper: TypeAlias = Union[\n None,\n Builds[Callable[[T4], T4]],\n PartialBuilds[Callable[[T4], T4]],\n Just[Callable[[T4], T4]],\n Type[Builds[Callable[[T4], T4]]],\n Type[PartialBuilds[Callable[[T4], T4]]],\n Type[Just[Callable[[T4], T4]]],\n Callable[[T4], T4],\n str,\n]\nif TYPE_CHECKING:\n ZenWrappers: TypeAlias = Union[ZenWrapper[T4], Sequence[ZenWrapper[T4]]]\nelse:\n # cleans up annotations for REPLs\n class ZenWrappers(Generic[T2]): # pragma: no cover\n pass\n\n\nDefaultsList = List[\n Union[str, DataClass_, Mapping[str, Union[None, str, Sequence[str]]]]\n]\n\n\n# Lists all zen-convert settings and their types. Not part of public API\nclass AllConvert(TypedDict, total=True):\n dataclass: bool\n\n\n# used for runtime type-checking\nconvert_types: Final = {\"dataclass\": bool}\n\nGroupName: TypeAlias = Optional[str]\nNodeName: TypeAlias = str\nNode: TypeAlias = Any\n\n\n# TODO: make immutable\nclass StoreEntry(TypedDict):\n name: NodeName\n group: GroupName\n package: Optional[str]\n provider: Optional[str]\n node: Node\n\n\nclass ZenConvert(TypedDict, total=False):\n \"\"\"A TypedDict that provides a type-checked interface for specifying zen-convert\n options that configure the hydra-zen config-creation functions (e.g., `builds`,\n `just`, and `make_config`).\n\n Note that, at runtime, `ZenConvert` is simply a dictionary with type-annotations. There is no enforced runtime validation of its keys and values.\n\n Parameters\n ----------\n dataclass : bool\n If `True` any dataclass type/instance without a `_target_` field is\n automatically converted to a targeted config that will instantiate to that type/\n instance. Otherwise the dataclass type/instance will be passed through as-is.\n\n Note that this only works with statically-defined dataclass types, whereas\n :func:`~hydra_zen.make_config` and :py:func:`dataclasses.make_dataclass`\n dynamically generate dataclass types. Additionally, this feature is not\n compatible with a dataclass instance whose type possesses an `InitVar` field.\n\n Examples\n --------\n >>> from hydra_zen.typing import ZenConvert as zc\n >>> zc()\n {}\n >>> zc(dataclass=True)\n {\"dataclass\": True}\n >>> # static type-checker will raise, but runtime will not\n >>> zc(apple=1) # type: ignore\n {\"apple\": 1}\n\n **Configuring dataclass auto-config behaviors**\n\n >>> from hydra_zen import instantiate as I\n >>> from hydra_zen import builds, just\n >>> from dataclasses import dataclass\n >>> @dataclass\n ... class B:\n ... x: int\n >>> b = B(x=1)\n\n >>> I(just(b))\n B(x=1)\n >>> I(just(b, zen_convert=zc(dataclass=False))) # returns omegaconf.DictConfig\n {\"x\": 1}\n\n >>> I(builds(dict, y=b))\n {'y': B(x=1)}\n >>> I(builds(dict, y=b, zen_convert=zc(dataclass=False))) # returns omegaconf.DictConfig\n {'y': {'x': 1}}\n\n >>> I(make_config(y=b)) # returns omegaconf.DictConfig\n {'y': {'x': 1}}\n >>> I(make_config(y=b, zen_convert=zc(dataclass=True), hydra_convert=\"all\"))\n {'y': B(x=1)}\n\n Auto-config support does not work with dynamically-generated dataclass types\n\n >>> just(make_config(z=1))\n HydraZenUnsupportedPrimitiveError: ...\n >>> I(just(make_config(z=1), zen_convert=zc(dataclass=False)))\n {'z': 1}\n\n A dataclass with a `_target_` field will not be converted:\n\n >>> @dataclass\n ... class BuildsStr:\n ... _target_: str = 'builtins.str'\n ...\n >>> BuildsStr is just(BuildsStr)\n True\n >>> (builds_str := BuildsStr()) is just(builds_str)\n True\n \"\"\"\n\n dataclass: bool\n", "path": "src/hydra_zen/typing/_implementations.py"}]}
4,023
146
gh_patches_debug_14548
rasdani/github-patches
git_diff
zestedesavoir__zds-site-5261
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ajouter les unités temporelles sur les graphs des stats Cete amélioration est demandée par un auteur : > Ce serait bien, aussi, de mettre les unités à côté du temps. Parce que là, j’ai 03:51, mais je ne sais pas si c’est min:sec ou sec:mil </issue> <code> [start of zds/utils/templatetags/seconds_to_duration.py] 1 from django import template 2 import datetime 3 4 register = template.Library() 5 6 7 # TODO add unit test 8 @register.filter('seconds_to_duration') 9 def seconds_to_duration(value): 10 """ 11 Display a human-readable reading-time (or any other duration) 12 from a duration in seconds. 13 """ 14 if value <= 0: 15 return '' 16 17 duration = datetime.timedelta(seconds=value) 18 return str(duration) 19 [end of zds/utils/templatetags/seconds_to_duration.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zds/utils/templatetags/seconds_to_duration.py b/zds/utils/templatetags/seconds_to_duration.py --- a/zds/utils/templatetags/seconds_to_duration.py +++ b/zds/utils/templatetags/seconds_to_duration.py @@ -4,6 +4,14 @@ register = template.Library() +# https://stackoverflow.com/a/8907269/2226755 +def strfdelta(tdelta, fmt): + d = {'days': tdelta.days} + d['hours'], rem = divmod(tdelta.seconds, 3600) + d['minutes'], d['seconds'] = divmod(rem, 60) + return fmt.format(**d) + + # TODO add unit test @register.filter('seconds_to_duration') def seconds_to_duration(value): @@ -15,4 +23,7 @@ return '' duration = datetime.timedelta(seconds=value) - return str(duration) + if duration < 3600 + return strfdelta(duration, '{minutes}m{seconds}s') + else + return strfdelta(duration, '{hours}h{minutes}m{seconds}s')
{"golden_diff": "diff --git a/zds/utils/templatetags/seconds_to_duration.py b/zds/utils/templatetags/seconds_to_duration.py\n--- a/zds/utils/templatetags/seconds_to_duration.py\n+++ b/zds/utils/templatetags/seconds_to_duration.py\n@@ -4,6 +4,14 @@\n register = template.Library()\n \n \n+# https://stackoverflow.com/a/8907269/2226755\n+def strfdelta(tdelta, fmt):\n+ d = {'days': tdelta.days}\n+ d['hours'], rem = divmod(tdelta.seconds, 3600)\n+ d['minutes'], d['seconds'] = divmod(rem, 60)\n+ return fmt.format(**d)\n+\n+\n # TODO add unit test\n @register.filter('seconds_to_duration')\n def seconds_to_duration(value):\n@@ -15,4 +23,7 @@\n return ''\n \n duration = datetime.timedelta(seconds=value)\n- return str(duration)\n+ if duration < 3600\n+ return strfdelta(duration, '{minutes}m{seconds}s')\n+ else\n+ return strfdelta(duration, '{hours}h{minutes}m{seconds}s')\n", "issue": "Ajouter les unit\u00e9s temporelles sur les graphs des stats\nCete am\u00e9lioration est demand\u00e9e par un auteur : \r\n\r\n> Ce serait bien, aussi, de mettre les unit\u00e9s \u00e0 c\u00f4t\u00e9 du temps. Parce que l\u00e0, j\u2019ai 03:51, mais je ne sais pas si c\u2019est min:sec ou sec:mil\n", "before_files": [{"content": "from django import template\nimport datetime\n\nregister = template.Library()\n\n\n# TODO add unit test\[email protected]('seconds_to_duration')\ndef seconds_to_duration(value):\n \"\"\"\n Display a human-readable reading-time (or any other duration)\n from a duration in seconds.\n \"\"\"\n if value <= 0:\n return ''\n\n duration = datetime.timedelta(seconds=value)\n return str(duration)\n", "path": "zds/utils/templatetags/seconds_to_duration.py"}]}
741
276
gh_patches_debug_20717
rasdani/github-patches
git_diff
digitalfabrik__integreat-cms-865
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Crash when trying to delete a region ### Describe the Bug Deleting a region via this page: ![grafik](https://user-images.githubusercontent.com/78504586/122641895-558a4280-d108-11eb-93db-2bd36f8b3ce2.png) fails with a `FieldError`: ![grafik](https://user-images.githubusercontent.com/78504586/122641888-42777280-d108-11eb-9344-86ecfb40b78a.png) ### Steps to Reproduce 1. Go to the regions tab 2. Edit any region 3. Scroll down and click on delete. Click ok on the warning. 4. See the error ### Expected Behavior The server does not fail and deletes the region ### Actual Behavior The server crashes and does not delete the region ### Additional Information / </issue> <code> [start of src/cms/views/regions/region_actions.py] 1 """ 2 This module contains view actions for region objects. 3 """ 4 import logging 5 6 from django.contrib import messages 7 from django.contrib.auth import get_user_model 8 from django.contrib.auth.decorators import login_required, permission_required 9 from django.shortcuts import redirect 10 from django.utils.translation import ugettext as _ 11 from django.views.decorators.http import require_POST 12 13 from ...decorators import staff_required 14 from ...models import Region 15 16 logger = logging.getLogger(__name__) 17 18 19 @require_POST 20 @login_required 21 @staff_required 22 @permission_required("cms.manage_regions", raise_exception=True) 23 # pylint: disable=unused-argument 24 def delete_region(request, *args, **kwargs): 25 """ 26 This view deletes a region. All content is cascade deleted. Region users, who are not assigned to any other region, 27 are manually removed. 28 29 :param request: The current request 30 :type request: ~django.http.HttpResponse 31 32 :param args: The supplied arguments 33 :type args: list 34 35 :param kwargs: The supplied keyword arguments 36 :type kwargs: dict 37 38 :raises ~django.core.exceptions.PermissionDenied: If user does not have the permission to manage regions 39 40 :return: A redirection to the media library 41 :rtype: ~django.http.HttpResponseRedirect 42 """ 43 44 region = Region.get_current_region(request) 45 # Remove hierarchy to prevent ProtectedError when children get deleted before their parents 46 region.pages.update(parent=None) 47 region.language_tree_nodes.update(parent=None) 48 # Delete region and cascade delete all contents 49 deleted_objects = region.delete() 50 logger.info( 51 "%r deleted %r, cascade deleted objects: %r", 52 request.user.profile, 53 region, 54 deleted_objects, 55 ) 56 # Get orphan users who aren't superuser or staff and don't have a region assigned 57 # (Creating users with these combination is impossible, so they were region users of the deleted region before) 58 orphan_users = get_user_model().objects.filter( 59 is_superuser=False, is_staff=False, profile__regions=None 60 ) 61 if orphan_users.exists(): 62 logger.info( 63 "Deleted orphan users: %r", 64 orphan_users, 65 ) 66 orphan_users.delete() 67 68 messages.success(request, _("Region was successfully deleted")) 69 70 return redirect("regions") 71 [end of src/cms/views/regions/region_actions.py] [start of src/cms/models/languages/language_tree_node.py] 1 from mptt.fields import TreeForeignKey 2 from mptt.models import MPTTModel, raise_if_unsaved 3 4 from django.db import models 5 from django.utils import timezone 6 from django.utils.translation import ugettext_lazy as _ 7 8 from .language import Language 9 from ..regions.region import Region 10 11 12 class LanguageTreeNode(MPTTModel): 13 """ 14 Data model representing a region's language tree. Each tree node is a single object instance and the whole tree is 15 identified by the root node. The base functionality inherits from the package `django-mptt 16 <https://django-mptt.readthedocs.io/en/latest/index.html>`_ (Modified Preorder Tree Traversal). 17 """ 18 19 language = models.ForeignKey( 20 Language, 21 on_delete=models.PROTECT, 22 related_name="language_tree_nodes", 23 verbose_name=_("language"), 24 ) 25 parent = TreeForeignKey( 26 "self", 27 blank=True, 28 null=True, 29 on_delete=models.PROTECT, 30 related_name="children", 31 verbose_name=_("source language"), 32 ) 33 region = models.ForeignKey( 34 Region, 35 on_delete=models.CASCADE, 36 related_name="language_tree_nodes", 37 verbose_name=_("region"), 38 ) 39 visible = models.BooleanField( 40 default=True, 41 verbose_name=_("visible"), 42 help_text=_("Defined if this language should be delivered via the API"), 43 ) 44 active = models.BooleanField( 45 default=True, 46 verbose_name=_("active"), 47 help_text=_("Defined if content in this language can be created or edited"), 48 ) 49 created_date = models.DateTimeField( 50 default=timezone.now, 51 verbose_name=_("creation date"), 52 ) 53 last_updated = models.DateTimeField( 54 auto_now=True, 55 verbose_name=_("modification date"), 56 ) 57 58 @property 59 def slug(self): 60 """ 61 Returns the slug of this node's language 62 63 :return: The language slug of this language node 64 :rtype: str 65 """ 66 return self.language.slug 67 68 @property 69 def native_name(self): 70 """ 71 Returns the native name of this node's language 72 73 :return: The native name of this language node 74 :rtype: str 75 """ 76 return self.language.native_name 77 78 @property 79 def english_name(self): 80 """ 81 Returns the name of this node's language in English 82 83 :return: The English name of this language node 84 :rtype: str 85 """ 86 return self.language.english_name 87 88 @property 89 def translated_name(self): 90 """ 91 Returns the name of this node's language in the current backend language 92 93 :return: The translated name of this language node 94 :rtype: str 95 """ 96 return self.language.translated_name 97 98 @property 99 def text_direction(self): 100 """ 101 Returns the text direction (e.g. left-to-right) of this node's language 102 103 :return: The text direction name of this language node 104 :rtype: str 105 """ 106 return self.language.text_direction 107 108 @property 109 def depth(self): 110 """ 111 Counts how many ancestors the node has. If the node is the root node, its depth is `0`. 112 113 :return: The depth of this language node 114 :rtype: str 115 """ 116 return len(self.get_ancestors()) 117 118 # Explicitly define functions to show documentation of base model 119 @raise_if_unsaved 120 def get_ancestors(self, ascending=False, include_self=False): 121 return super().get_ancestors(ascending, include_self) 122 123 # pylint: disable=useless-super-delegation 124 @raise_if_unsaved 125 def get_family(self): 126 return super().get_family() 127 128 @raise_if_unsaved 129 def get_children(self): 130 return super().get_children() 131 132 @raise_if_unsaved 133 def get_descendants(self, include_self=False): 134 return super().get_descendants(include_self) 135 136 def get_descendant_count(self): 137 return super().get_descendant_count() 138 139 @raise_if_unsaved 140 def get_root(self): 141 return super().get_root() 142 143 # pylint: disable=useless-super-delegation 144 def insert_at( 145 self, 146 target, 147 position="first-child", 148 save=False, 149 allow_existing_pk=False, 150 refresh_target=True, 151 ): 152 return super().insert_at( 153 target, position, save, allow_existing_pk, refresh_target 154 ) 155 156 # pylint: disable=useless-super-delegation 157 def move_to(self, target, position="first-child"): 158 return super().move_to(target, position) 159 160 def __str__(self): 161 """ 162 This overwrites the default Django :meth:`~django.db.models.Model.__str__` method which would return ``LanguageTreeNode object (id)``. 163 It is used in the Django admin backend and as label for ModelChoiceFields. 164 165 :return: A readable string representation of the language node 166 :rtype: str 167 """ 168 return self.translated_name 169 170 def __repr__(self): 171 """ 172 This overwrites the default Django ``__repr__()`` method which would return ``<LanguageTreeNode: LanguageTreeNode object (id)>``. 173 It is used for logging. 174 175 :return: The canonical string representation of the language node 176 :rtype: str 177 """ 178 return f"<LanguageTreeNode (id: {self.id}, language: {self.language.slug}, region: {self.region})>" 179 180 class Meta: 181 #: The verbose name of the model 182 verbose_name = _("language tree node") 183 #: The plural verbose name of the model 184 verbose_name_plural = _("language tree nodes") 185 #: There cannot be two language tree nodes with the same region and language 186 unique_together = ( 187 ( 188 "language", 189 "region", 190 ), 191 ) 192 #: The default permissions for this model 193 default_permissions = () 194 #: The custom permissions for this model 195 permissions = (("manage_language_tree", "Can manage language tree"),) 196 #: The fields which are used to sort the returned objects of a QuerySet 197 ordering = ["region", "level", "parent"] 198 [end of src/cms/models/languages/language_tree_node.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cms/models/languages/language_tree_node.py b/src/cms/models/languages/language_tree_node.py --- a/src/cms/models/languages/language_tree_node.py +++ b/src/cms/models/languages/language_tree_node.py @@ -194,4 +194,4 @@ #: The custom permissions for this model permissions = (("manage_language_tree", "Can manage language tree"),) #: The fields which are used to sort the returned objects of a QuerySet - ordering = ["region", "level", "parent"] + ordering = ["region", "level", "parent__pk"] diff --git a/src/cms/views/regions/region_actions.py b/src/cms/views/regions/region_actions.py --- a/src/cms/views/regions/region_actions.py +++ b/src/cms/views/regions/region_actions.py @@ -45,6 +45,8 @@ # Remove hierarchy to prevent ProtectedError when children get deleted before their parents region.pages.update(parent=None) region.language_tree_nodes.update(parent=None) + # Prevent ProtectedError when location gets deleted before their events + region.events.update(location=None) # Delete region and cascade delete all contents deleted_objects = region.delete() logger.info(
{"golden_diff": "diff --git a/src/cms/models/languages/language_tree_node.py b/src/cms/models/languages/language_tree_node.py\n--- a/src/cms/models/languages/language_tree_node.py\n+++ b/src/cms/models/languages/language_tree_node.py\n@@ -194,4 +194,4 @@\n #: The custom permissions for this model\n permissions = ((\"manage_language_tree\", \"Can manage language tree\"),)\n #: The fields which are used to sort the returned objects of a QuerySet\n- ordering = [\"region\", \"level\", \"parent\"]\n+ ordering = [\"region\", \"level\", \"parent__pk\"]\ndiff --git a/src/cms/views/regions/region_actions.py b/src/cms/views/regions/region_actions.py\n--- a/src/cms/views/regions/region_actions.py\n+++ b/src/cms/views/regions/region_actions.py\n@@ -45,6 +45,8 @@\n # Remove hierarchy to prevent ProtectedError when children get deleted before their parents\n region.pages.update(parent=None)\n region.language_tree_nodes.update(parent=None)\n+ # Prevent ProtectedError when location gets deleted before their events\n+ region.events.update(location=None)\n # Delete region and cascade delete all contents\n deleted_objects = region.delete()\n logger.info(\n", "issue": "Crash when trying to delete a region\n### Describe the Bug\r\nDeleting a region via this page: \r\n![grafik](https://user-images.githubusercontent.com/78504586/122641895-558a4280-d108-11eb-93db-2bd36f8b3ce2.png)\r\nfails with a `FieldError`:\r\n![grafik](https://user-images.githubusercontent.com/78504586/122641888-42777280-d108-11eb-9344-86ecfb40b78a.png)\r\n\r\n\r\n### Steps to Reproduce\r\n\r\n1. Go to the regions tab\r\n2. Edit any region\r\n3. Scroll down and click on delete. Click ok on the warning.\r\n4. See the error\r\n\r\n### Expected Behavior\r\nThe server does not fail and deletes the region\r\n\r\n\r\n### Actual Behavior\r\nThe server crashes and does not delete the region\r\n\r\n\r\n### Additional Information\r\n/\n", "before_files": [{"content": "\"\"\"\nThis module contains view actions for region objects.\n\"\"\"\nimport logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.shortcuts import redirect\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.http import require_POST\n\nfrom ...decorators import staff_required\nfrom ...models import Region\n\nlogger = logging.getLogger(__name__)\n\n\n@require_POST\n@login_required\n@staff_required\n@permission_required(\"cms.manage_regions\", raise_exception=True)\n# pylint: disable=unused-argument\ndef delete_region(request, *args, **kwargs):\n \"\"\"\n This view deletes a region. All content is cascade deleted. Region users, who are not assigned to any other region,\n are manually removed.\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param args: The supplied arguments\n :type args: list\n\n :param kwargs: The supplied keyword arguments\n :type kwargs: dict\n\n :raises ~django.core.exceptions.PermissionDenied: If user does not have the permission to manage regions\n\n :return: A redirection to the media library\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n\n region = Region.get_current_region(request)\n # Remove hierarchy to prevent ProtectedError when children get deleted before their parents\n region.pages.update(parent=None)\n region.language_tree_nodes.update(parent=None)\n # Delete region and cascade delete all contents\n deleted_objects = region.delete()\n logger.info(\n \"%r deleted %r, cascade deleted objects: %r\",\n request.user.profile,\n region,\n deleted_objects,\n )\n # Get orphan users who aren't superuser or staff and don't have a region assigned\n # (Creating users with these combination is impossible, so they were region users of the deleted region before)\n orphan_users = get_user_model().objects.filter(\n is_superuser=False, is_staff=False, profile__regions=None\n )\n if orphan_users.exists():\n logger.info(\n \"Deleted orphan users: %r\",\n orphan_users,\n )\n orphan_users.delete()\n\n messages.success(request, _(\"Region was successfully deleted\"))\n\n return redirect(\"regions\")\n", "path": "src/cms/views/regions/region_actions.py"}, {"content": "from mptt.fields import TreeForeignKey\nfrom mptt.models import MPTTModel, raise_if_unsaved\n\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .language import Language\nfrom ..regions.region import Region\n\n\nclass LanguageTreeNode(MPTTModel):\n \"\"\"\n Data model representing a region's language tree. Each tree node is a single object instance and the whole tree is\n identified by the root node. The base functionality inherits from the package `django-mptt\n <https://django-mptt.readthedocs.io/en/latest/index.html>`_ (Modified Preorder Tree Traversal).\n \"\"\"\n\n language = models.ForeignKey(\n Language,\n on_delete=models.PROTECT,\n related_name=\"language_tree_nodes\",\n verbose_name=_(\"language\"),\n )\n parent = TreeForeignKey(\n \"self\",\n blank=True,\n null=True,\n on_delete=models.PROTECT,\n related_name=\"children\",\n verbose_name=_(\"source language\"),\n )\n region = models.ForeignKey(\n Region,\n on_delete=models.CASCADE,\n related_name=\"language_tree_nodes\",\n verbose_name=_(\"region\"),\n )\n visible = models.BooleanField(\n default=True,\n verbose_name=_(\"visible\"),\n help_text=_(\"Defined if this language should be delivered via the API\"),\n )\n active = models.BooleanField(\n default=True,\n verbose_name=_(\"active\"),\n help_text=_(\"Defined if content in this language can be created or edited\"),\n )\n created_date = models.DateTimeField(\n default=timezone.now,\n verbose_name=_(\"creation date\"),\n )\n last_updated = models.DateTimeField(\n auto_now=True,\n verbose_name=_(\"modification date\"),\n )\n\n @property\n def slug(self):\n \"\"\"\n Returns the slug of this node's language\n\n :return: The language slug of this language node\n :rtype: str\n \"\"\"\n return self.language.slug\n\n @property\n def native_name(self):\n \"\"\"\n Returns the native name of this node's language\n\n :return: The native name of this language node\n :rtype: str\n \"\"\"\n return self.language.native_name\n\n @property\n def english_name(self):\n \"\"\"\n Returns the name of this node's language in English\n\n :return: The English name of this language node\n :rtype: str\n \"\"\"\n return self.language.english_name\n\n @property\n def translated_name(self):\n \"\"\"\n Returns the name of this node's language in the current backend language\n\n :return: The translated name of this language node\n :rtype: str\n \"\"\"\n return self.language.translated_name\n\n @property\n def text_direction(self):\n \"\"\"\n Returns the text direction (e.g. left-to-right) of this node's language\n\n :return: The text direction name of this language node\n :rtype: str\n \"\"\"\n return self.language.text_direction\n\n @property\n def depth(self):\n \"\"\"\n Counts how many ancestors the node has. If the node is the root node, its depth is `0`.\n\n :return: The depth of this language node\n :rtype: str\n \"\"\"\n return len(self.get_ancestors())\n\n # Explicitly define functions to show documentation of base model\n @raise_if_unsaved\n def get_ancestors(self, ascending=False, include_self=False):\n return super().get_ancestors(ascending, include_self)\n\n # pylint: disable=useless-super-delegation\n @raise_if_unsaved\n def get_family(self):\n return super().get_family()\n\n @raise_if_unsaved\n def get_children(self):\n return super().get_children()\n\n @raise_if_unsaved\n def get_descendants(self, include_self=False):\n return super().get_descendants(include_self)\n\n def get_descendant_count(self):\n return super().get_descendant_count()\n\n @raise_if_unsaved\n def get_root(self):\n return super().get_root()\n\n # pylint: disable=useless-super-delegation\n def insert_at(\n self,\n target,\n position=\"first-child\",\n save=False,\n allow_existing_pk=False,\n refresh_target=True,\n ):\n return super().insert_at(\n target, position, save, allow_existing_pk, refresh_target\n )\n\n # pylint: disable=useless-super-delegation\n def move_to(self, target, position=\"first-child\"):\n return super().move_to(target, position)\n\n def __str__(self):\n \"\"\"\n This overwrites the default Django :meth:`~django.db.models.Model.__str__` method which would return ``LanguageTreeNode object (id)``.\n It is used in the Django admin backend and as label for ModelChoiceFields.\n\n :return: A readable string representation of the language node\n :rtype: str\n \"\"\"\n return self.translated_name\n\n def __repr__(self):\n \"\"\"\n This overwrites the default Django ``__repr__()`` method which would return ``<LanguageTreeNode: LanguageTreeNode object (id)>``.\n It is used for logging.\n\n :return: The canonical string representation of the language node\n :rtype: str\n \"\"\"\n return f\"<LanguageTreeNode (id: {self.id}, language: {self.language.slug}, region: {self.region})>\"\n\n class Meta:\n #: The verbose name of the model\n verbose_name = _(\"language tree node\")\n #: The plural verbose name of the model\n verbose_name_plural = _(\"language tree nodes\")\n #: There cannot be two language tree nodes with the same region and language\n unique_together = (\n (\n \"language\",\n \"region\",\n ),\n )\n #: The default permissions for this model\n default_permissions = ()\n #: The custom permissions for this model\n permissions = ((\"manage_language_tree\", \"Can manage language tree\"),)\n #: The fields which are used to sort the returned objects of a QuerySet\n ordering = [\"region\", \"level\", \"parent\"]\n", "path": "src/cms/models/languages/language_tree_node.py"}]}
3,205
266
gh_patches_debug_7043
rasdani/github-patches
git_diff
TabbycatDebate__tabbycat-1620
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Summernote boxes don't show I've had this happen a few times today, not sure if reliably reproducible yet. Only affects develop branch. Master branch works fine. Probably a dependency issue. Making a note so we remember to do something about it before Nebelung. ![image](https://user-images.githubusercontent.com/1725499/85212714-5d9f7e80-b30a-11ea-8f0f-362b7f7b8706.png) ![image](https://user-images.githubusercontent.com/1725499/85212739-8b84c300-b30a-11ea-9fdd-bbcab720f3f4.png) Browser shows error: > Refused to display 'https://hidden-caverns-06472.herokuapp.com/summernote/editor/id_tournament_staff/' in a frame because it set 'X-Frame-Options' to 'deny'. </issue> <code> [start of tabbycat/settings/core.py] 1 import os 2 3 from django.contrib.messages import constants as messages 4 from django.utils.translation import gettext_lazy as _ 5 6 7 BASE_DIR = os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))) 8 MEDIA_ROOT = os.path.join(BASE_DIR, 'media') 9 10 # ============================================================================== 11 # Overwritten in local.py or heroku.py 12 # ============================================================================== 13 14 ADMINS = ('Philip and Chuan-Zheng', '[email protected]'), 15 MANAGERS = ADMINS 16 DEBUG = bool(int(os.environ['DEBUG'])) if 'DEBUG' in os.environ else False 17 ENABLE_DEBUG_TOOLBAR = False # Must default to false; overriden in Dev config 18 DISABLE_SENTRY = True # Overriden in Heroku config 19 SECRET_KEY = r'#2q43u&tp4((4&m3i8v%w-6z6pp7m(v0-6@w@i!j5n)n15epwc' 20 21 # ============================================================================== 22 # Version 23 # ============================================================================== 24 25 TABBYCAT_VERSION = '2.5.0-dev' 26 TABBYCAT_CODENAME = 'Nebelung' 27 READTHEDOCS_VERSION = 'v2.5.0-dev' 28 29 # ============================================================================== 30 # Internationalization and Localization 31 # ============================================================================== 32 33 USE_I18N = True 34 USE_TZ = True 35 USE_L10N = True 36 LANGUAGE_CODE = 'en' 37 TIME_ZONE = os.environ.get('TIME_ZONE', 'Australia/Melbourne') 38 39 LOCALE_PATHS = [ 40 os.path.join(BASE_DIR, 'locale'), 41 ] 42 43 # Languages that should be available in the switcher 44 EXTRA_LANG_INFO = { 45 'ms': { 46 'bidi': False, 47 'code': 'ms', 48 'name': 'Malay', 49 'name_local': 'Bahasa Melayu', #unicode codepoints here 50 }, 51 } 52 53 # Add custom languages not provided by Django 54 import django.conf.locale 55 LANG_INFO = dict(django.conf.locale.LANG_INFO, **EXTRA_LANG_INFO) 56 django.conf.locale.LANG_INFO = LANG_INFO 57 58 LANGUAGES = [ 59 ('ar', _('Arabic')), 60 ('bn', _('Bengali')), 61 ('en', _('English')), 62 ('es', _('Spanish')), 63 ('fr', _('French')), 64 ('ja', _('Japanese')), 65 ('ms', _('Malay')), 66 ('pt', _('Portuguese')), 67 ('ru', _('Russian')), 68 ('zh-hans', _('Simplified Chinese')), 69 ] 70 71 STATICI18N_ROOT = os.path.join(BASE_DIR, "locale") 72 73 FORMAT_MODULE_PATH = [ 74 'utils.formats', 75 ] 76 77 # ============================================================================== 78 # Django-specific Modules 79 # ============================================================================== 80 81 MIDDLEWARE = [ 82 'django.middleware.gzip.GZipMiddleware', 83 'django.middleware.security.SecurityMiddleware', 84 'django.contrib.sessions.middleware.SessionMiddleware', 85 # User language preferences; must be after Session 86 'django.middleware.locale.LocaleMiddleware', 87 # Set Etags; i.e. cached requests not on network; must precede Common 88 'django.middleware.http.ConditionalGetMiddleware', 89 'django.middleware.common.CommonMiddleware', 90 # Must be after SessionMiddleware 91 'django.contrib.auth.middleware.AuthenticationMiddleware', 92 'django.middleware.common.CommonMiddleware', 93 # Must be after SessionMiddleware 94 'django.contrib.messages.middleware.MessageMiddleware', 95 'django.middleware.clickjacking.XFrameOptionsMiddleware', 96 'utils.middleware.DebateMiddleware', 97 ] 98 99 TABBYCAT_APPS = ( 100 'actionlog', 101 'adjallocation', 102 'adjfeedback', 103 'api', 104 'availability', 105 'breakqual', 106 'checkins', 107 'divisions', # obsolete 108 'draw', 109 'motions', 110 'options', 111 'participants', 112 'printing', 113 'privateurls', 114 'results', 115 'tournaments', 116 'venues', 117 'utils', 118 'users', 119 'standings', 120 'notifications', 121 'importer', 122 ) 123 124 INSTALLED_APPS = ( 125 'jet', 126 'django.contrib.admin', 127 'django.contrib.auth', 128 'django.contrib.contenttypes', 129 'django.contrib.sessions', 130 'channels', # For Websockets / real-time connections (above whitenoise) 131 'django.contrib.staticfiles', 132 'django.contrib.humanize', 133 'django_summernote', # Keep above our apps; as we unregister an admin model 134 'django.contrib.messages') \ 135 + TABBYCAT_APPS + ( 136 'dynamic_preferences', 137 'django_extensions', # For Secret Generation Command 138 'gfklookupwidget', 139 'formtools', 140 'statici18n', # Compile js translations as static file; saving requests 141 'polymorphic', 142 'rest_framework', 143 'rest_framework.authtoken', 144 ) 145 146 ROOT_URLCONF = 'urls' 147 LOGIN_REDIRECT_URL = '/' 148 FIXTURE_DIRS = (os.path.join(os.path.dirname(BASE_DIR), 'data', 'fixtures'), ) 149 SILENCED_SYSTEM_CHECKS = ('urls.W002',) 150 151 # ============================================================================== 152 # Templates 153 # ============================================================================== 154 155 TEMPLATES = [ 156 { 157 'BACKEND': 'django.template.backends.django.DjangoTemplates', 158 'DIRS': [os.path.join(BASE_DIR, 'templates')], 159 'OPTIONS': { 160 'context_processors': [ 161 'django.contrib.auth.context_processors.auth', 162 'django.contrib.messages.context_processors.messages', 163 'django.template.context_processors.debug', 164 'django.template.context_processors.i18n', 165 'django.template.context_processors.media', 166 'django.template.context_processors.static', 167 'django.template.context_processors.tz', 168 'django.template.context_processors.request', # for Jet 169 'utils.context_processors.debate_context', # for tournament config vars 170 'django.template.context_processors.i18n' # for serving static language translations, 171 ], 172 'loaders': [ 173 ('django.template.loaders.cached.Loader', [ 174 'django.template.loaders.filesystem.Loader', 175 'django.template.loaders.app_directories.Loader', 176 ]), 177 ], 178 } 179 }, 180 ] 181 182 # ============================================================================== 183 # Caching 184 # ============================================================================== 185 186 PUBLIC_FAST_CACHE_TIMEOUT = int(os.environ.get('PUBLIC_FAST_CACHE_TIMEOUT', 60 * 1)) 187 PUBLIC_SLOW_CACHE_TIMEOUT = int(os.environ.get('PUBLIC_SLOW_CACHE_TIMEOUT', 60 * 3.5)) 188 TAB_PAGES_CACHE_TIMEOUT = int(os.environ.get('TAB_PAGES_CACHE_TIMEOUT', 60 * 120)) 189 190 # Default non-heroku cache is to use local memory 191 CACHES = { 192 'default': { 193 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 194 }, 195 } 196 197 SESSION_ENGINE = 'django.contrib.sessions.backends.cache' 198 199 # ============================================================================== 200 # Static Files and Compilation 201 # ============================================================================== 202 203 STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') 204 STATIC_URL = '/static/' 205 206 STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'), ) 207 208 STATICFILES_FINDERS = ( 209 'django.contrib.staticfiles.finders.FileSystemFinder', 210 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 211 ) 212 213 STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage' 214 215 # ============================================================================== 216 # Logging 217 # ============================================================================== 218 219 MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage' 220 221 LOGGING = { 222 'version': 1, 223 'disable_existing_loggers': False, 224 'handlers': { 225 'console': { 226 'class': 'logging.StreamHandler', 227 'formatter': 'standard', 228 }, 229 }, 230 'loggers': { 231 'django': { 232 'handlers': ['console'], 233 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), 234 }, 235 'sentry.errors': { 236 'level': 'INFO', 237 'handlers': ['console'], 238 'propagate': False, 239 }, 240 }, 241 'formatters': { 242 'standard': { 243 'format': '[%(asctime)s] %(levelname)s %(name)s: %(message)s', 244 }, 245 }, 246 } 247 248 for app in TABBYCAT_APPS: 249 LOGGING['loggers'][app] = { 250 'handlers': ['console'], 251 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), 252 } 253 254 # ============================================================================== 255 # Messages 256 # ============================================================================== 257 258 MESSAGE_TAGS = {messages.ERROR: 'danger', } 259 260 # ============================================================================== 261 # Summernote (WYSWIG) 262 # ============================================================================== 263 264 SUMMERNOTE_THEME = 'bs4' # Bootstrap 4 265 266 SUMMERNOTE_CONFIG = { 267 'width': '100%', 268 'height': '480', 269 'toolbar': [ 270 ['style', ['bold', 'italic', 'underline', 'fontsize', 'color', 'clear']], 271 ['para', ['ul', 'ol']], 272 ['insert', ['link', 'picture']], 273 ['misc', ['undo', 'redo', 'codeview']], 274 ], 275 'disable_upload': True, 276 'iframe': True, # Necessary; if just to compartmentalise jQuery dependency, 277 } 278 279 # ============================================================================== 280 # Database 281 # ============================================================================== 282 283 DATABASES = { 284 'default': { 285 'ENGINE': 'django.db.backends.postgresql', 286 }, 287 } 288 289 # ============================================================================== 290 # Channels 291 # ============================================================================== 292 293 ASGI_APPLICATION = "routing.application" 294 295 CHANNEL_LAYERS = { 296 "default": { 297 "BACKEND": "channels.layers.InMemoryChannelLayer", 298 }, 299 } 300 301 # ============================================================================== 302 # Dynamic preferences 303 # ============================================================================== 304 305 DYNAMIC_PREFERENCES = { 306 'REGISTRY_MODULE': 'preferences', 307 } 308 309 # ============================================================================== 310 # REST Framework 311 # ============================================================================== 312 313 REST_FRAMEWORK = { 314 'DEFAULT_RENDERER_CLASSES': [ 315 'rest_framework.renderers.JSONRenderer', 316 ], 317 'DEFAULT_PARSER_CLASSES': [ 318 'rest_framework.parsers.JSONParser', 319 ], 320 'DEFAULT_AUTHENTICATION_CLASSES': [ 321 'rest_framework.authentication.TokenAuthentication', 322 ], 323 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', 324 } 325 [end of tabbycat/settings/core.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tabbycat/settings/core.py b/tabbycat/settings/core.py --- a/tabbycat/settings/core.py +++ b/tabbycat/settings/core.py @@ -276,6 +276,8 @@ 'iframe': True, # Necessary; if just to compartmentalise jQuery dependency, } +X_FRAME_OPTIONS = 'SAMEORIGIN' # Necessary to get Django-Summernote working because of Django 3 changes + # ============================================================================== # Database # ==============================================================================
{"golden_diff": "diff --git a/tabbycat/settings/core.py b/tabbycat/settings/core.py\n--- a/tabbycat/settings/core.py\n+++ b/tabbycat/settings/core.py\n@@ -276,6 +276,8 @@\n 'iframe': True, # Necessary; if just to compartmentalise jQuery dependency,\n }\n \n+X_FRAME_OPTIONS = 'SAMEORIGIN' # Necessary to get Django-Summernote working because of Django 3 changes\n+\n # ==============================================================================\n # Database\n # ==============================================================================\n", "issue": "Summernote boxes don't show\nI've had this happen a few times today, not sure if reliably reproducible yet. Only affects develop branch. Master branch works fine. Probably a dependency issue. Making a note so we remember to do something about it before Nebelung.\r\n\r\n![image](https://user-images.githubusercontent.com/1725499/85212714-5d9f7e80-b30a-11ea-8f0f-362b7f7b8706.png)\r\n\r\n![image](https://user-images.githubusercontent.com/1725499/85212739-8b84c300-b30a-11ea-9fdd-bbcab720f3f4.png)\r\n\r\nBrowser shows error:\r\n\r\n> Refused to display 'https://hidden-caverns-06472.herokuapp.com/summernote/editor/id_tournament_staff/' in a frame because it set 'X-Frame-Options' to 'deny'.\r\n\n", "before_files": [{"content": "import os\n\nfrom django.contrib.messages import constants as messages\nfrom django.utils.translation import gettext_lazy as _\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir)))\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# ==============================================================================\n# Overwritten in local.py or heroku.py\n# ==============================================================================\n\nADMINS = ('Philip and Chuan-Zheng', '[email protected]'),\nMANAGERS = ADMINS\nDEBUG = bool(int(os.environ['DEBUG'])) if 'DEBUG' in os.environ else False\nENABLE_DEBUG_TOOLBAR = False # Must default to false; overriden in Dev config\nDISABLE_SENTRY = True # Overriden in Heroku config\nSECRET_KEY = r'#2q43u&tp4((4&m3i8v%w-6z6pp7m(v0-6@w@i!j5n)n15epwc'\n\n# ==============================================================================\n# Version\n# ==============================================================================\n\nTABBYCAT_VERSION = '2.5.0-dev'\nTABBYCAT_CODENAME = 'Nebelung'\nREADTHEDOCS_VERSION = 'v2.5.0-dev'\n\n# ==============================================================================\n# Internationalization and Localization\n# ==============================================================================\n\nUSE_I18N = True\nUSE_TZ = True\nUSE_L10N = True\nLANGUAGE_CODE = 'en'\nTIME_ZONE = os.environ.get('TIME_ZONE', 'Australia/Melbourne')\n\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, 'locale'),\n]\n\n# Languages that should be available in the switcher\nEXTRA_LANG_INFO = {\n 'ms': {\n 'bidi': False,\n 'code': 'ms',\n 'name': 'Malay',\n 'name_local': 'Bahasa Melayu', #unicode codepoints here\n },\n}\n\n# Add custom languages not provided by Django\nimport django.conf.locale\nLANG_INFO = dict(django.conf.locale.LANG_INFO, **EXTRA_LANG_INFO)\ndjango.conf.locale.LANG_INFO = LANG_INFO\n\nLANGUAGES = [\n ('ar', _('Arabic')),\n ('bn', _('Bengali')),\n ('en', _('English')),\n ('es', _('Spanish')),\n ('fr', _('French')),\n ('ja', _('Japanese')),\n ('ms', _('Malay')),\n ('pt', _('Portuguese')),\n ('ru', _('Russian')),\n ('zh-hans', _('Simplified Chinese')),\n]\n\nSTATICI18N_ROOT = os.path.join(BASE_DIR, \"locale\")\n\nFORMAT_MODULE_PATH = [\n 'utils.formats',\n]\n\n# ==============================================================================\n# Django-specific Modules\n# ==============================================================================\n\nMIDDLEWARE = [\n 'django.middleware.gzip.GZipMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n # User language preferences; must be after Session\n 'django.middleware.locale.LocaleMiddleware',\n # Set Etags; i.e. cached requests not on network; must precede Common\n 'django.middleware.http.ConditionalGetMiddleware',\n 'django.middleware.common.CommonMiddleware',\n # Must be after SessionMiddleware\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.middleware.common.CommonMiddleware',\n # Must be after SessionMiddleware\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'utils.middleware.DebateMiddleware',\n]\n\nTABBYCAT_APPS = (\n 'actionlog',\n 'adjallocation',\n 'adjfeedback',\n 'api',\n 'availability',\n 'breakqual',\n 'checkins',\n 'divisions', # obsolete\n 'draw',\n 'motions',\n 'options',\n 'participants',\n 'printing',\n 'privateurls',\n 'results',\n 'tournaments',\n 'venues',\n 'utils',\n 'users',\n 'standings',\n 'notifications',\n 'importer',\n)\n\nINSTALLED_APPS = (\n 'jet',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'channels', # For Websockets / real-time connections (above whitenoise)\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_summernote', # Keep above our apps; as we unregister an admin model\n 'django.contrib.messages') \\\n + TABBYCAT_APPS + (\n 'dynamic_preferences',\n 'django_extensions', # For Secret Generation Command\n 'gfklookupwidget',\n 'formtools',\n 'statici18n', # Compile js translations as static file; saving requests\n 'polymorphic',\n 'rest_framework',\n 'rest_framework.authtoken',\n)\n\nROOT_URLCONF = 'urls'\nLOGIN_REDIRECT_URL = '/'\nFIXTURE_DIRS = (os.path.join(os.path.dirname(BASE_DIR), 'data', 'fixtures'), )\nSILENCED_SYSTEM_CHECKS = ('urls.W002',)\n\n# ==============================================================================\n# Templates\n# ==============================================================================\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.template.context_processors.request', # for Jet\n 'utils.context_processors.debate_context', # for tournament config vars\n 'django.template.context_processors.i18n' # for serving static language translations,\n ],\n 'loaders': [\n ('django.template.loaders.cached.Loader', [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ]),\n ],\n }\n },\n]\n\n# ==============================================================================\n# Caching\n# ==============================================================================\n\nPUBLIC_FAST_CACHE_TIMEOUT = int(os.environ.get('PUBLIC_FAST_CACHE_TIMEOUT', 60 * 1))\nPUBLIC_SLOW_CACHE_TIMEOUT = int(os.environ.get('PUBLIC_SLOW_CACHE_TIMEOUT', 60 * 3.5))\nTAB_PAGES_CACHE_TIMEOUT = int(os.environ.get('TAB_PAGES_CACHE_TIMEOUT', 60 * 120))\n\n# Default non-heroku cache is to use local memory\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n },\n}\n\nSESSION_ENGINE = 'django.contrib.sessions.backends.cache'\n\n# ==============================================================================\n# Static Files and Compilation\n# ==============================================================================\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'), )\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'\n\n# ==============================================================================\n# Logging\n# ==============================================================================\n\nMESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'sentry.errors': {\n 'level': 'INFO',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n 'formatters': {\n 'standard': {\n 'format': '[%(asctime)s] %(levelname)s %(name)s: %(message)s',\n },\n },\n}\n\nfor app in TABBYCAT_APPS:\n LOGGING['loggers'][app] = {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n }\n\n# ==============================================================================\n# Messages\n# ==============================================================================\n\nMESSAGE_TAGS = {messages.ERROR: 'danger', }\n\n# ==============================================================================\n# Summernote (WYSWIG)\n# ==============================================================================\n\nSUMMERNOTE_THEME = 'bs4' # Bootstrap 4\n\nSUMMERNOTE_CONFIG = {\n 'width': '100%',\n 'height': '480',\n 'toolbar': [\n ['style', ['bold', 'italic', 'underline', 'fontsize', 'color', 'clear']],\n ['para', ['ul', 'ol']],\n ['insert', ['link', 'picture']],\n ['misc', ['undo', 'redo', 'codeview']],\n ],\n 'disable_upload': True,\n 'iframe': True, # Necessary; if just to compartmentalise jQuery dependency,\n}\n\n# ==============================================================================\n# Database\n# ==============================================================================\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n },\n}\n\n# ==============================================================================\n# Channels\n# ==============================================================================\n\nASGI_APPLICATION = \"routing.application\"\n\nCHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"channels.layers.InMemoryChannelLayer\",\n },\n}\n\n# ==============================================================================\n# Dynamic preferences\n# ==============================================================================\n\nDYNAMIC_PREFERENCES = {\n 'REGISTRY_MODULE': 'preferences',\n}\n\n# ==============================================================================\n# REST Framework\n# ==============================================================================\n\nREST_FRAMEWORK = {\n 'DEFAULT_RENDERER_CLASSES': [\n 'rest_framework.renderers.JSONRenderer',\n ],\n 'DEFAULT_PARSER_CLASSES': [\n 'rest_framework.parsers.JSONParser',\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',\n}\n", "path": "tabbycat/settings/core.py"}]}
3,731
106
gh_patches_debug_1553
rasdani/github-patches
git_diff
feast-dev__feast-3756
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Postgres engine default keepalives_idle value causes setsockopt(TCP_KEEPIDLE) invalid value Get `OperationalError: connection to server at "localhost" (127.0.0.1), port 5432 failed: setsockopt(TCP_KEEPIDLE) failed: Invalid argument` when run `feast apply`. Because of `keepalives_idle=config.keepalives_idle` field in function '_get_conn' in `infra/utils/postgres/connection_utils.py` file. For example, to avoid this error I need to pass 'keepalives_idle=1', but that argument isn't parsed for the registry in feature_store.yaml and pass 'keepalives_idle=0' by default setting in `infra/utils/postgres/postgres_config.py`. - Version: 0.33.1 - Platform: linux ubuntu 20.04 - Subsystem: ## Possible Solution Check this issue with the same problem https://github.com/TobikoData/sqlmesh/issues/750. I think you shouldn't pass 'keepalives_idle=0' by default. </issue> <code> [start of sdk/python/feast/infra/utils/postgres/postgres_config.py] 1 from enum import Enum 2 from typing import Optional 3 4 from pydantic import StrictStr 5 6 from feast.repo_config import FeastConfigBaseModel 7 8 9 class ConnectionType(Enum): 10 singleton = "singleton" 11 pool = "pool" 12 13 14 class PostgreSQLConfig(FeastConfigBaseModel): 15 min_conn: int = 1 16 max_conn: int = 10 17 conn_type: ConnectionType = ConnectionType.singleton 18 host: StrictStr 19 port: int = 5432 20 database: StrictStr 21 db_schema: StrictStr = "public" 22 user: StrictStr 23 password: StrictStr 24 sslmode: Optional[StrictStr] = None 25 sslkey_path: Optional[StrictStr] = None 26 sslcert_path: Optional[StrictStr] = None 27 sslrootcert_path: Optional[StrictStr] = None 28 keepalives_idle: int = 0 29 [end of sdk/python/feast/infra/utils/postgres/postgres_config.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sdk/python/feast/infra/utils/postgres/postgres_config.py b/sdk/python/feast/infra/utils/postgres/postgres_config.py --- a/sdk/python/feast/infra/utils/postgres/postgres_config.py +++ b/sdk/python/feast/infra/utils/postgres/postgres_config.py @@ -25,4 +25,4 @@ sslkey_path: Optional[StrictStr] = None sslcert_path: Optional[StrictStr] = None sslrootcert_path: Optional[StrictStr] = None - keepalives_idle: int = 0 + keepalives_idle: Optional[int] = None
{"golden_diff": "diff --git a/sdk/python/feast/infra/utils/postgres/postgres_config.py b/sdk/python/feast/infra/utils/postgres/postgres_config.py\n--- a/sdk/python/feast/infra/utils/postgres/postgres_config.py\n+++ b/sdk/python/feast/infra/utils/postgres/postgres_config.py\n@@ -25,4 +25,4 @@\n sslkey_path: Optional[StrictStr] = None\n sslcert_path: Optional[StrictStr] = None\n sslrootcert_path: Optional[StrictStr] = None\n- keepalives_idle: int = 0\n+ keepalives_idle: Optional[int] = None\n", "issue": "Postgres engine default keepalives_idle value causes setsockopt(TCP_KEEPIDLE) invalid value\nGet `OperationalError: connection to server at \"localhost\" (127.0.0.1), port 5432 failed: setsockopt(TCP_KEEPIDLE) failed: Invalid argument` when run `feast apply`.\r\nBecause of `keepalives_idle=config.keepalives_idle` field in function '_get_conn' in `infra/utils/postgres/connection_utils.py` file. For example, to avoid this error I need to pass 'keepalives_idle=1', but that argument isn't parsed for the registry in feature_store.yaml and pass 'keepalives_idle=0' by default setting in `infra/utils/postgres/postgres_config.py`. \r\n\r\n- Version: 0.33.1\r\n- Platform: linux ubuntu 20.04\r\n- Subsystem:\r\n\r\n## Possible Solution\r\nCheck this issue with the same problem https://github.com/TobikoData/sqlmesh/issues/750. I think you shouldn't pass 'keepalives_idle=0' by default.\n", "before_files": [{"content": "from enum import Enum\nfrom typing import Optional\n\nfrom pydantic import StrictStr\n\nfrom feast.repo_config import FeastConfigBaseModel\n\n\nclass ConnectionType(Enum):\n singleton = \"singleton\"\n pool = \"pool\"\n\n\nclass PostgreSQLConfig(FeastConfigBaseModel):\n min_conn: int = 1\n max_conn: int = 10\n conn_type: ConnectionType = ConnectionType.singleton\n host: StrictStr\n port: int = 5432\n database: StrictStr\n db_schema: StrictStr = \"public\"\n user: StrictStr\n password: StrictStr\n sslmode: Optional[StrictStr] = None\n sslkey_path: Optional[StrictStr] = None\n sslcert_path: Optional[StrictStr] = None\n sslrootcert_path: Optional[StrictStr] = None\n keepalives_idle: int = 0\n", "path": "sdk/python/feast/infra/utils/postgres/postgres_config.py"}]}
1,035
145
gh_patches_debug_18869
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-2407
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> We should be able to create new admin users, upgrade existing users to admin ## Description * `is_superuser` is currently a readonly property in Users APIs. * We should be able to set them while an admin is editing a user. * Users (including super-users) should not be able to modify it's value for themselves. </issue> <code> [start of mathesar/api/ui/serializers/users.py] 1 from django.contrib.auth.password_validation import validate_password 2 from rest_access_policy import FieldAccessMixin, PermittedPkRelatedField 3 from rest_framework import serializers 4 5 from mathesar.api.db.permissions.database import DatabaseAccessPolicy 6 from mathesar.api.db.permissions.schema import SchemaAccessPolicy 7 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin 8 from mathesar.api.exceptions.validation_exceptions.exceptions import IncorrectOldPassword 9 from mathesar.api.ui.permissions.users import UserAccessPolicy 10 from mathesar.models.base import Database, Schema 11 from mathesar.models.users import User, DatabaseRole, SchemaRole 12 13 14 class NestedDatabaseRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer): 15 class Meta: 16 model = DatabaseRole 17 fields = ['id', 'database', 'role'] 18 19 20 class NestedSchemaRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer): 21 class Meta: 22 model = SchemaRole 23 fields = ['id', 'schema', 'role'] 24 25 26 class UserSerializer(MathesarErrorMessageMixin, FieldAccessMixin, serializers.ModelSerializer): 27 database_roles = NestedDatabaseRoleSerializer(many=True, required=False) 28 schema_roles = NestedSchemaRoleSerializer(many=True, required=False) 29 access_policy = UserAccessPolicy 30 31 class Meta: 32 model = User 33 fields = [ 34 'id', 35 'full_name', 36 'short_name', 37 'username', 38 'password', 39 'email', 40 'is_superuser', 41 'database_roles', 42 'schema_roles', 43 ] 44 extra_kwargs = { 45 'password': {'write_only': True}, 46 'is_superuser': {'read_only': True}, 47 'database_roles': {'read_only': True}, 48 'schema_roles': {'read_only': True} 49 } 50 51 def create(self, validated_data): 52 password = validated_data.pop('password') 53 user = User(**validated_data) 54 user.password_change_needed = True 55 user.set_password(password) 56 user.save() 57 return user 58 59 60 class ChangePasswordSerializer(serializers.Serializer): 61 password = serializers.CharField(write_only=True, required=True, validators=[validate_password]) 62 old_password = serializers.CharField(write_only=True, required=True) 63 64 def validate_old_password(self, value): 65 user = self.context['request'].user 66 if user.check_password(value) is True: 67 return value 68 raise IncorrectOldPassword(field='old_password') 69 70 def update(self, instance, validated_data): 71 instance.set_password(validated_data['password']) 72 instance.save() 73 return instance 74 75 76 class PasswordResetSerializer(MathesarErrorMessageMixin, serializers.Serializer): 77 password = serializers.CharField(write_only=True, required=True, validators=[validate_password]) 78 79 80 class DatabaseRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer): 81 class Meta: 82 model = DatabaseRole 83 fields = ['id', 'user', 'database', 'role'] 84 85 # Restrict the list of databases to which the user has access to create a database role 86 # Refer https://rsinger86.github.io/drf-access-policy/policy_reuse/ for the usage of `PermittedPkRelatedField` 87 database = PermittedPkRelatedField( 88 access_policy=DatabaseAccessPolicy, 89 queryset=Database.current_objects.all() 90 ) 91 92 93 class SchemaRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer): 94 class Meta: 95 model = SchemaRole 96 fields = ['id', 'user', 'schema', 'role'] 97 98 schema = PermittedPkRelatedField( 99 access_policy=SchemaAccessPolicy, 100 queryset=Schema.current_objects.all() 101 ) 102 [end of mathesar/api/ui/serializers/users.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mathesar/api/ui/serializers/users.py b/mathesar/api/ui/serializers/users.py --- a/mathesar/api/ui/serializers/users.py +++ b/mathesar/api/ui/serializers/users.py @@ -43,11 +43,23 @@ ] extra_kwargs = { 'password': {'write_only': True}, - 'is_superuser': {'read_only': True}, 'database_roles': {'read_only': True}, 'schema_roles': {'read_only': True} } + def get_fields(self): + fields = super().get_fields() + request = self.context.get("request", None) + if not hasattr(request, 'parser_context'): + return fields + kwargs = request.parser_context.get('kwargs') + if kwargs: + user_pk = kwargs.get('pk') + if user_pk: + if request.user.id == int(user_pk) or not request.user.is_superuser: + fields["is_superuser"].read_only = True + return fields + def create(self, validated_data): password = validated_data.pop('password') user = User(**validated_data)
{"golden_diff": "diff --git a/mathesar/api/ui/serializers/users.py b/mathesar/api/ui/serializers/users.py\n--- a/mathesar/api/ui/serializers/users.py\n+++ b/mathesar/api/ui/serializers/users.py\n@@ -43,11 +43,23 @@\n ]\n extra_kwargs = {\n 'password': {'write_only': True},\n- 'is_superuser': {'read_only': True},\n 'database_roles': {'read_only': True},\n 'schema_roles': {'read_only': True}\n }\n \n+ def get_fields(self):\n+ fields = super().get_fields()\n+ request = self.context.get(\"request\", None)\n+ if not hasattr(request, 'parser_context'):\n+ return fields\n+ kwargs = request.parser_context.get('kwargs')\n+ if kwargs:\n+ user_pk = kwargs.get('pk')\n+ if user_pk:\n+ if request.user.id == int(user_pk) or not request.user.is_superuser:\n+ fields[\"is_superuser\"].read_only = True\n+ return fields\n+\n def create(self, validated_data):\n password = validated_data.pop('password')\n user = User(**validated_data)\n", "issue": "We should be able to create new admin users, upgrade existing users to admin\n## Description\r\n* `is_superuser` is currently a readonly property in Users APIs.\r\n* We should be able to set them while an admin is editing a user.\r\n* Users (including super-users) should not be able to modify it's value for themselves.\r\n\n", "before_files": [{"content": "from django.contrib.auth.password_validation import validate_password\nfrom rest_access_policy import FieldAccessMixin, PermittedPkRelatedField\nfrom rest_framework import serializers\n\nfrom mathesar.api.db.permissions.database import DatabaseAccessPolicy\nfrom mathesar.api.db.permissions.schema import SchemaAccessPolicy\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import IncorrectOldPassword\nfrom mathesar.api.ui.permissions.users import UserAccessPolicy\nfrom mathesar.models.base import Database, Schema\nfrom mathesar.models.users import User, DatabaseRole, SchemaRole\n\n\nclass NestedDatabaseRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = DatabaseRole\n fields = ['id', 'database', 'role']\n\n\nclass NestedSchemaRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = SchemaRole\n fields = ['id', 'schema', 'role']\n\n\nclass UserSerializer(MathesarErrorMessageMixin, FieldAccessMixin, serializers.ModelSerializer):\n database_roles = NestedDatabaseRoleSerializer(many=True, required=False)\n schema_roles = NestedSchemaRoleSerializer(many=True, required=False)\n access_policy = UserAccessPolicy\n\n class Meta:\n model = User\n fields = [\n 'id',\n 'full_name',\n 'short_name',\n 'username',\n 'password',\n 'email',\n 'is_superuser',\n 'database_roles',\n 'schema_roles',\n ]\n extra_kwargs = {\n 'password': {'write_only': True},\n 'is_superuser': {'read_only': True},\n 'database_roles': {'read_only': True},\n 'schema_roles': {'read_only': True}\n }\n\n def create(self, validated_data):\n password = validated_data.pop('password')\n user = User(**validated_data)\n user.password_change_needed = True\n user.set_password(password)\n user.save()\n return user\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n password = serializers.CharField(write_only=True, required=True, validators=[validate_password])\n old_password = serializers.CharField(write_only=True, required=True)\n\n def validate_old_password(self, value):\n user = self.context['request'].user\n if user.check_password(value) is True:\n return value\n raise IncorrectOldPassword(field='old_password')\n\n def update(self, instance, validated_data):\n instance.set_password(validated_data['password'])\n instance.save()\n return instance\n\n\nclass PasswordResetSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n password = serializers.CharField(write_only=True, required=True, validators=[validate_password])\n\n\nclass DatabaseRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = DatabaseRole\n fields = ['id', 'user', 'database', 'role']\n\n # Restrict the list of databases to which the user has access to create a database role\n # Refer https://rsinger86.github.io/drf-access-policy/policy_reuse/ for the usage of `PermittedPkRelatedField`\n database = PermittedPkRelatedField(\n access_policy=DatabaseAccessPolicy,\n queryset=Database.current_objects.all()\n )\n\n\nclass SchemaRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = SchemaRole\n fields = ['id', 'user', 'schema', 'role']\n\n schema = PermittedPkRelatedField(\n access_policy=SchemaAccessPolicy,\n queryset=Schema.current_objects.all()\n )\n", "path": "mathesar/api/ui/serializers/users.py"}]}
1,541
253
gh_patches_debug_22200
rasdani/github-patches
git_diff
mozilla__bugbug-1722
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Submit logged warnings to Sentry Currently, we only report exceptions to Sentry. It'd be nice to report warnings too, so we can get a sense of how often they happen. IIRC in the code-review bot (https://github.com/mozilla/code-review), we are doing that kind of automatically. </issue> <code> [start of http_service/bugbug_http/worker.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # This Source Code Form is subject to the terms of the Mozilla Public 4 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 5 # You can obtain one at http://mozilla.org/MPL/2.0/. 6 7 import os 8 import sys 9 10 import sentry_sdk 11 from redis import Redis 12 from rq import Connection, Worker 13 from sentry_sdk.integrations.rq import RqIntegration 14 15 import bugbug_http.boot 16 from bugbug import get_bugbug_version 17 18 if os.environ.get("SENTRY_DSN"): 19 sentry_sdk.init( 20 os.environ.get("SENTRY_DSN"), 21 integrations=[RqIntegration()], 22 release=get_bugbug_version(), 23 ) 24 25 26 def main(): 27 # Bootstrap the worker assets 28 bugbug_http.boot.boot_worker() 29 30 # Provide queue names to listen to as arguments to this script, 31 # similar to rq worker 32 redis_url = os.environ.get("REDIS_URL", "redis://localhost/0") 33 redis_conn = Redis.from_url(redis_url) 34 with Connection(connection=redis_conn): 35 qs = sys.argv[1:] or ["default"] 36 37 w = Worker(qs) 38 w.work() 39 40 41 if __name__ == "__main__": 42 main() 43 [end of http_service/bugbug_http/worker.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/http_service/bugbug_http/worker.py b/http_service/bugbug_http/worker.py --- a/http_service/bugbug_http/worker.py +++ b/http_service/bugbug_http/worker.py @@ -4,21 +4,29 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. +import logging import os import sys import sentry_sdk from redis import Redis from rq import Connection, Worker +from sentry_sdk.integrations.logging import LoggingIntegration from sentry_sdk.integrations.rq import RqIntegration import bugbug_http.boot from bugbug import get_bugbug_version if os.environ.get("SENTRY_DSN"): + logging_integration = LoggingIntegration( + # Default behaviour: INFO messages will be included as breadcrumbs + level=logging.INFO, + # Change default behaviour (ERROR messages events) + event_level=logging.WARNING, + ) sentry_sdk.init( - os.environ.get("SENTRY_DSN"), - integrations=[RqIntegration()], + dsn=os.environ.get("SENTRY_DSN"), + integrations=[RqIntegration(), logging_integration], release=get_bugbug_version(), )
{"golden_diff": "diff --git a/http_service/bugbug_http/worker.py b/http_service/bugbug_http/worker.py\n--- a/http_service/bugbug_http/worker.py\n+++ b/http_service/bugbug_http/worker.py\n@@ -4,21 +4,29 @@\n # License, v. 2.0. If a copy of the MPL was not distributed with this file,\n # You can obtain one at http://mozilla.org/MPL/2.0/.\n \n+import logging\n import os\n import sys\n \n import sentry_sdk\n from redis import Redis\n from rq import Connection, Worker\n+from sentry_sdk.integrations.logging import LoggingIntegration\n from sentry_sdk.integrations.rq import RqIntegration\n \n import bugbug_http.boot\n from bugbug import get_bugbug_version\n \n if os.environ.get(\"SENTRY_DSN\"):\n+ logging_integration = LoggingIntegration(\n+ # Default behaviour: INFO messages will be included as breadcrumbs\n+ level=logging.INFO,\n+ # Change default behaviour (ERROR messages events)\n+ event_level=logging.WARNING,\n+ )\n sentry_sdk.init(\n- os.environ.get(\"SENTRY_DSN\"),\n- integrations=[RqIntegration()],\n+ dsn=os.environ.get(\"SENTRY_DSN\"),\n+ integrations=[RqIntegration(), logging_integration],\n release=get_bugbug_version(),\n )\n", "issue": "Submit logged warnings to Sentry\nCurrently, we only report exceptions to Sentry.\r\nIt'd be nice to report warnings too, so we can get a sense of how often they happen.\r\nIIRC in the code-review bot (https://github.com/mozilla/code-review), we are doing that kind of automatically.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport os\nimport sys\n\nimport sentry_sdk\nfrom redis import Redis\nfrom rq import Connection, Worker\nfrom sentry_sdk.integrations.rq import RqIntegration\n\nimport bugbug_http.boot\nfrom bugbug import get_bugbug_version\n\nif os.environ.get(\"SENTRY_DSN\"):\n sentry_sdk.init(\n os.environ.get(\"SENTRY_DSN\"),\n integrations=[RqIntegration()],\n release=get_bugbug_version(),\n )\n\n\ndef main():\n # Bootstrap the worker assets\n bugbug_http.boot.boot_worker()\n\n # Provide queue names to listen to as arguments to this script,\n # similar to rq worker\n redis_url = os.environ.get(\"REDIS_URL\", \"redis://localhost/0\")\n redis_conn = Redis.from_url(redis_url)\n with Connection(connection=redis_conn):\n qs = sys.argv[1:] or [\"default\"]\n\n w = Worker(qs)\n w.work()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "http_service/bugbug_http/worker.py"}]}
964
288
gh_patches_debug_10388
rasdani/github-patches
git_diff
DistrictDataLabs__yellowbrick-766
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Critical Vulnerability in np.load with NumPy v1.16 and earlier There is a critical vulnerability with NumPy v1.16 and earlier that affects the new YB datasets module: https://www.bleepingcomputer.com/news/security/numpy-is-awaiting-fix-for-critical-remote-code-execution-bug/ This does not affect any Yellowbrick user for version 0.9.1 or earlier and we will not release version 1.0 without a patch for this bug. When NumPy 1.17 is released (if it contains the fix), we will mark our minimum NumPy requirement to that version. Currently, in the `develop` branch, we do use `np.load` when [loading a numpy dataset](https://github.com/DistrictDataLabs/yellowbrick/blob/develop/yellowbrick/datasets/base.py#L195), e.g. if Pandas is not available. We should update this to `np.load(allow_pickle=False)` as per the recommendation of the post above. Note that we do [ensure data downloaded from our repository matches an expected signature](https://github.com/DistrictDataLabs/yellowbrick/blob/708274289d66d9265f7ded03e3445bc2bd70f46e/yellowbrick/datasets/download.py#L106), which minimizes but does not eliminate the risk to Yellowbrick users. Thanks @theagent for bringing this to our attention! </issue> <code> [start of yellowbrick/datasets/base.py] 1 # yellowbrick.datasets.base 2 # Loading utilities for the yellowbrick datasets. 3 # 4 # Author: Rebecca Bilbro <[email protected]> 5 # Author: Benjamin Bengfort <[email protected]> 6 # Author: Raul Peralta <[email protected]> 7 # Created: Thu Jul 26 13:53:01 2018 -0400 8 # 9 # ID: base.py [] [email protected] $ 10 11 """ 12 Loading utilities for the yellowbrick datasets. 13 """ 14 15 ########################################################################## 16 ## Imports 17 ########################################################################## 18 19 import os 20 import json 21 import numpy as np 22 23 from .download import download_data 24 from .path import find_dataset_path, dataset_exists 25 26 from yellowbrick.exceptions import DatasetsError 27 from yellowbrick.utils.decorators import memoized 28 29 try: 30 import pandas as pd 31 except ImportError: 32 pd = None 33 34 35 ########################################################################## 36 ## Dataset Object 37 ########################################################################## 38 39 class BaseDataset(object): 40 """ 41 Base functionality for Dataset and Corpus objects. 42 """ 43 44 def __init__(self, name, url=None, signature=None, data_home=None): 45 self.url = url 46 self.name = name 47 self.data_home = data_home 48 self.signature = signature 49 50 # Check if the dataset exists, and if not - download it! 51 if not dataset_exists(self.name, data_home=data_home): 52 self.download() 53 54 def download(self, replace=False): 55 """ 56 Download the dataset from the hosted Yellowbrick data store and save 57 it to the location specified by ``get_data_home``. The downloader 58 verifies the download completed successfully and safely by comparing 59 the expected signature with the SHA 256 signature of the downloaded 60 archive file. 61 62 Parameters 63 ---------- 64 replace : bool, default: False 65 If the data archive already exists, replace the dataset. If this is 66 False and the dataset exists, an exception is raised. 67 """ 68 download_data( 69 self.url, self.signature, data_home=self.data_home, 70 replace=replace, extract=True 71 ) 72 73 def contents(self): 74 """ 75 Contents returns a list of the files in the data directory. 76 """ 77 data = find_dataset_path( 78 self.name, data_home=self.data_home, ext=None 79 ) 80 return os.listdir(data) 81 82 @memoized 83 def README(self): 84 """ 85 Returns the contents of the README.md file that describes the dataset 86 in detail and contains attribution information. 87 """ 88 path = find_dataset_path( 89 self.name, data_home=self.data_home, fname="README.md" 90 ) 91 with open(path, 'r') as f: 92 return f.read() 93 94 @memoized 95 def meta(self): 96 """ 97 Returns the contents of the meta.json file that describes important 98 attributes about the dataset and modifies the behavior of the loader. 99 """ 100 path = find_dataset_path( 101 self.name, data_home=self.data_home, fname="meta.json", raises=False 102 ) 103 if path is None: 104 return None 105 106 with open(path, 'r') as f: 107 return json.load(f) 108 109 @memoized 110 def citation(self): 111 """ 112 Returns the contents of the citation.bib file that describes the source 113 and provenance of the dataset or to cite for academic work. 114 """ 115 path = find_dataset_path( 116 self.name, data_home=self.data_home, fname="meta.json", raises=False 117 ) 118 if path is None: 119 return None 120 121 with open(path, 'r') as f: 122 return f.read() 123 124 125 class Dataset(BaseDataset): 126 """ 127 Datasets contain a reference to data on disk and provide utilities for 128 quickly loading files and objects into a variety of formats. The most 129 common use of the Dataset object is to load example datasets provided by 130 Yellowbrick to run the examples in the documentation. 131 132 The dataset by default will return the data as a numpy array, however if 133 Pandas is installed, it is possible to access the data as a DataFrame and 134 Series object. In either case, the data is represented by a features table, 135 X and a target vector, y. 136 137 Parameters 138 ---------- 139 name : str 140 The name of the dataset; should either be a folder in data home or 141 specified in the yellowbrick.datasets.DATASETS variable. This name is 142 used to perform all lookups and identify the dataset externally. 143 144 data_home : str, optional 145 The path on disk where data is stored. If not passed in, it is looked 146 up from YELLOWBRICK_DATA or the default returned by ``get_data_home``. 147 148 url : str, optional 149 The web location where the archive file of the dataset can be 150 downloaded from. 151 152 signature : str, optional 153 The signature of the data archive file, used to verify that the latest 154 version of the data has been downloaded and that the download hasn't 155 been corrupted or modified in anyway. 156 """ 157 158 def to_data(self): 159 """ 160 Returns the data contained in the dataset as X and y where X is the 161 features matrix and y is the target vector. If pandas is installed, 162 the data will be returned as DataFrame and Series objects. Otherwise, 163 the data will be returned as two numpy arrays. 164 165 Returns 166 ------- 167 X : array-like with shape (n_instances, n_features) 168 A pandas DataFrame or numpy array describing the instance features. 169 170 y : array-like with shape (n_instances,) 171 A pandas Series or numpy array describing the target vector. 172 """ 173 if pd is not None: 174 return self.to_pandas() 175 return self.to_numpy() 176 177 def to_numpy(self): 178 """ 179 Returns the dataset as two numpy arrays: X and y. 180 181 Returns 182 ------- 183 X : array-like with shape (n_instances, n_features) 184 A numpy array describing the instance features. 185 186 y : array-like with shape (n_instances,) 187 A numpy array describing the target vector. 188 """ 189 path = find_dataset_path(self.name, ext=".npz", data_home=self.data_home) 190 with np.load(path) as npf: 191 if "X" not in npf or "y" not in npf: 192 raise DatasetsError(( 193 "the downloaded dataset was improperly packaged without numpy arrays " 194 "- please report this bug to the Yellowbrick maintainers!" 195 )) 196 197 # TODO: How to handle the case where y is None? 198 return npf["X"], npf["y"] 199 200 def to_pandas(self): 201 """ 202 Returns the dataset as two pandas objects: X and y. 203 204 Returns 205 ------- 206 X : DataFrame with shape (n_instances, n_features) 207 A pandas DataFrame containing feature data and named columns. 208 209 y : Series with shape (n_instances,) 210 A pandas Series containing target data and an index that matches 211 the feature DataFrame index. 212 """ 213 # Ensure the metadata is valid before continuing 214 if self.meta is None: 215 raise DatasetsError(( 216 "the downloaded dataset was improperly packaged without meta.json " 217 "- please report this bug to the Yellowbrick maintainers!" 218 )) 219 220 if "features" not in self.meta or "target" not in self.meta: 221 raise DatasetsError(( 222 "the downloaded dataset was improperly packaged without features " 223 "or target - please report this bug to the Yellowbrick maintainers!" 224 )) 225 226 # Load data frame and return features and target 227 # TODO: Return y as None if there is no self.meta["target"] 228 df = self.to_dataframe() 229 return df[self.meta["features"]], df[self.meta["target"]] 230 231 232 def to_dataframe(self): 233 """ 234 Returns the entire dataset as a single pandas DataFrame. 235 236 Returns 237 ------- 238 df : DataFrame with shape (n_instances, n_columns) 239 A pandas DataFrame containing the complete original data table 240 including all targets (specified by the meta data) and all 241 features (including those that might have been filtered out). 242 """ 243 if pd is None: 244 raise DatasetsError( 245 "pandas is required to load DataFrame, it can be installed with pip" 246 ) 247 248 path = find_dataset_path(self.name, ext=".csv.gz", data_home=self.data_home) 249 return pd.read_csv(path, compression="gzip") 250 251 252 class Corpus(BaseDataset): 253 """ 254 Corpus datasets contain a reference to documents on disk and provide 255 utilities for quickly loading text data for use in machine learning 256 workflows. The most common use of the corpus is to load the text analysis 257 examples from the Yellowbrick documentation. 258 259 Parameters 260 ---------- 261 name : str 262 The name of the corpus; should either be a folder in data home or 263 specified in the yellowbrick.datasets.DATASETS variable. This name is 264 used to perform all lookups and identify the corpus externally. 265 266 data_home : str, optional 267 The path on disk where data is stored. If not passed in, it is looked 268 up from YELLOWBRICK_DATA or the default returned by ``get_data_home``. 269 270 url : str, optional 271 The web location where the archive file of the corpus can be 272 downloaded from. 273 274 signature : str, optional 275 The signature of the data archive file, used to verify that the latest 276 version of the data has been downloaded and that the download hasn't 277 been corrupted or modified in anyway. 278 """ 279 280 @memoized 281 def root(self): 282 """ 283 Discovers and caches the root directory of the corpus. 284 """ 285 return find_dataset_path(self.name, data_home=self.data_home, ext=None) 286 287 @memoized 288 def labels(self): 289 """ 290 Return the unique labels assigned to the documents. 291 """ 292 return [ 293 name for name in os.listdir(self.root) 294 if os.path.isdir(os.path.join(self.root, name)) 295 ] 296 297 @property 298 def files(self): 299 """ 300 Returns the list of file names for all documents. 301 """ 302 return [ 303 os.path.join(self.root, label, name) 304 for label in self.labels 305 for name in os.listdir(os.path.join(self.root, label)) 306 ] 307 308 @property 309 def data(self): 310 """ 311 Read all of the documents from disk into an in-memory list. 312 """ 313 def read(path): 314 with open(path, 'r', encoding='UTF-8') as f: 315 return f.read() 316 317 return [ 318 read(f) for f in self.files 319 ] 320 321 @property 322 def target(self): 323 """ 324 Returns the label associated with each item in data. 325 """ 326 return [ 327 os.path.basename(os.path.dirname(f)) for f in self.files 328 ] 329 [end of yellowbrick/datasets/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/yellowbrick/datasets/base.py b/yellowbrick/datasets/base.py --- a/yellowbrick/datasets/base.py +++ b/yellowbrick/datasets/base.py @@ -187,7 +187,7 @@ A numpy array describing the target vector. """ path = find_dataset_path(self.name, ext=".npz", data_home=self.data_home) - with np.load(path) as npf: + with np.load(path, allow_pickle=False) as npf: if "X" not in npf or "y" not in npf: raise DatasetsError(( "the downloaded dataset was improperly packaged without numpy arrays "
{"golden_diff": "diff --git a/yellowbrick/datasets/base.py b/yellowbrick/datasets/base.py\n--- a/yellowbrick/datasets/base.py\n+++ b/yellowbrick/datasets/base.py\n@@ -187,7 +187,7 @@\n A numpy array describing the target vector.\n \"\"\"\n path = find_dataset_path(self.name, ext=\".npz\", data_home=self.data_home)\n- with np.load(path) as npf:\n+ with np.load(path, allow_pickle=False) as npf:\n if \"X\" not in npf or \"y\" not in npf:\n raise DatasetsError((\n \"the downloaded dataset was improperly packaged without numpy arrays \"\n", "issue": "Critical Vulnerability in np.load with NumPy v1.16 and earlier\nThere is a critical vulnerability with NumPy v1.16 and earlier that affects the new YB datasets module:\r\n\r\nhttps://www.bleepingcomputer.com/news/security/numpy-is-awaiting-fix-for-critical-remote-code-execution-bug/\r\n\r\nThis does not affect any Yellowbrick user for version 0.9.1 or earlier and we will not release version 1.0 without a patch for this bug. When NumPy 1.17 is released (if it contains the fix), we will mark our minimum NumPy requirement to that version. \r\n\r\nCurrently, in the `develop` branch, we do use `np.load` when [loading a numpy dataset](https://github.com/DistrictDataLabs/yellowbrick/blob/develop/yellowbrick/datasets/base.py#L195), e.g. if Pandas is not available. We should update this to `np.load(allow_pickle=False)` as per the recommendation of the post above. Note that we do [ensure data downloaded from our repository matches an expected signature](https://github.com/DistrictDataLabs/yellowbrick/blob/708274289d66d9265f7ded03e3445bc2bd70f46e/yellowbrick/datasets/download.py#L106), which minimizes but does not eliminate the risk to Yellowbrick users. \r\n\r\nThanks @theagent for bringing this to our attention!\n", "before_files": [{"content": "# yellowbrick.datasets.base\n# Loading utilities for the yellowbrick datasets.\n#\n# Author: Rebecca Bilbro <[email protected]>\n# Author: Benjamin Bengfort <[email protected]>\n# Author: Raul Peralta <[email protected]>\n# Created: Thu Jul 26 13:53:01 2018 -0400\n#\n# ID: base.py [] [email protected] $\n\n\"\"\"\nLoading utilities for the yellowbrick datasets.\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport os\nimport json\nimport numpy as np\n\nfrom .download import download_data\nfrom .path import find_dataset_path, dataset_exists\n\nfrom yellowbrick.exceptions import DatasetsError\nfrom yellowbrick.utils.decorators import memoized\n\ntry:\n import pandas as pd\nexcept ImportError:\n pd = None\n\n\n##########################################################################\n## Dataset Object\n##########################################################################\n\nclass BaseDataset(object):\n \"\"\"\n Base functionality for Dataset and Corpus objects.\n \"\"\"\n\n def __init__(self, name, url=None, signature=None, data_home=None):\n self.url = url\n self.name = name\n self.data_home = data_home\n self.signature = signature\n\n # Check if the dataset exists, and if not - download it!\n if not dataset_exists(self.name, data_home=data_home):\n self.download()\n\n def download(self, replace=False):\n \"\"\"\n Download the dataset from the hosted Yellowbrick data store and save\n it to the location specified by ``get_data_home``. The downloader\n verifies the download completed successfully and safely by comparing\n the expected signature with the SHA 256 signature of the downloaded\n archive file.\n\n Parameters\n ----------\n replace : bool, default: False\n If the data archive already exists, replace the dataset. If this is\n False and the dataset exists, an exception is raised.\n \"\"\"\n download_data(\n self.url, self.signature, data_home=self.data_home,\n replace=replace, extract=True\n )\n\n def contents(self):\n \"\"\"\n Contents returns a list of the files in the data directory.\n \"\"\"\n data = find_dataset_path(\n self.name, data_home=self.data_home, ext=None\n )\n return os.listdir(data)\n\n @memoized\n def README(self):\n \"\"\"\n Returns the contents of the README.md file that describes the dataset\n in detail and contains attribution information.\n \"\"\"\n path = find_dataset_path(\n self.name, data_home=self.data_home, fname=\"README.md\"\n )\n with open(path, 'r') as f:\n return f.read()\n\n @memoized\n def meta(self):\n \"\"\"\n Returns the contents of the meta.json file that describes important\n attributes about the dataset and modifies the behavior of the loader.\n \"\"\"\n path = find_dataset_path(\n self.name, data_home=self.data_home, fname=\"meta.json\", raises=False\n )\n if path is None:\n return None\n\n with open(path, 'r') as f:\n return json.load(f)\n\n @memoized\n def citation(self):\n \"\"\"\n Returns the contents of the citation.bib file that describes the source\n and provenance of the dataset or to cite for academic work.\n \"\"\"\n path = find_dataset_path(\n self.name, data_home=self.data_home, fname=\"meta.json\", raises=False\n )\n if path is None:\n return None\n\n with open(path, 'r') as f:\n return f.read()\n\n\nclass Dataset(BaseDataset):\n \"\"\"\n Datasets contain a reference to data on disk and provide utilities for\n quickly loading files and objects into a variety of formats. The most\n common use of the Dataset object is to load example datasets provided by\n Yellowbrick to run the examples in the documentation.\n\n The dataset by default will return the data as a numpy array, however if\n Pandas is installed, it is possible to access the data as a DataFrame and\n Series object. In either case, the data is represented by a features table,\n X and a target vector, y.\n\n Parameters\n ----------\n name : str\n The name of the dataset; should either be a folder in data home or\n specified in the yellowbrick.datasets.DATASETS variable. This name is\n used to perform all lookups and identify the dataset externally.\n\n data_home : str, optional\n The path on disk where data is stored. If not passed in, it is looked\n up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.\n\n url : str, optional\n The web location where the archive file of the dataset can be\n downloaded from.\n\n signature : str, optional\n The signature of the data archive file, used to verify that the latest\n version of the data has been downloaded and that the download hasn't\n been corrupted or modified in anyway.\n \"\"\"\n\n def to_data(self):\n \"\"\"\n Returns the data contained in the dataset as X and y where X is the\n features matrix and y is the target vector. If pandas is installed,\n the data will be returned as DataFrame and Series objects. Otherwise,\n the data will be returned as two numpy arrays.\n\n Returns\n -------\n X : array-like with shape (n_instances, n_features)\n A pandas DataFrame or numpy array describing the instance features.\n\n y : array-like with shape (n_instances,)\n A pandas Series or numpy array describing the target vector.\n \"\"\"\n if pd is not None:\n return self.to_pandas()\n return self.to_numpy()\n\n def to_numpy(self):\n \"\"\"\n Returns the dataset as two numpy arrays: X and y.\n\n Returns\n -------\n X : array-like with shape (n_instances, n_features)\n A numpy array describing the instance features.\n\n y : array-like with shape (n_instances,)\n A numpy array describing the target vector.\n \"\"\"\n path = find_dataset_path(self.name, ext=\".npz\", data_home=self.data_home)\n with np.load(path) as npf:\n if \"X\" not in npf or \"y\" not in npf:\n raise DatasetsError((\n \"the downloaded dataset was improperly packaged without numpy arrays \"\n \"- please report this bug to the Yellowbrick maintainers!\"\n ))\n\n # TODO: How to handle the case where y is None?\n return npf[\"X\"], npf[\"y\"]\n\n def to_pandas(self):\n \"\"\"\n Returns the dataset as two pandas objects: X and y.\n\n Returns\n -------\n X : DataFrame with shape (n_instances, n_features)\n A pandas DataFrame containing feature data and named columns.\n\n y : Series with shape (n_instances,)\n A pandas Series containing target data and an index that matches\n the feature DataFrame index.\n \"\"\"\n # Ensure the metadata is valid before continuing\n if self.meta is None:\n raise DatasetsError((\n \"the downloaded dataset was improperly packaged without meta.json \"\n \"- please report this bug to the Yellowbrick maintainers!\"\n ))\n\n if \"features\" not in self.meta or \"target\" not in self.meta:\n raise DatasetsError((\n \"the downloaded dataset was improperly packaged without features \"\n \"or target - please report this bug to the Yellowbrick maintainers!\"\n ))\n\n # Load data frame and return features and target\n # TODO: Return y as None if there is no self.meta[\"target\"]\n df = self.to_dataframe()\n return df[self.meta[\"features\"]], df[self.meta[\"target\"]]\n\n\n def to_dataframe(self):\n \"\"\"\n Returns the entire dataset as a single pandas DataFrame.\n\n Returns\n -------\n df : DataFrame with shape (n_instances, n_columns)\n A pandas DataFrame containing the complete original data table\n including all targets (specified by the meta data) and all\n features (including those that might have been filtered out).\n \"\"\"\n if pd is None:\n raise DatasetsError(\n \"pandas is required to load DataFrame, it can be installed with pip\"\n )\n\n path = find_dataset_path(self.name, ext=\".csv.gz\", data_home=self.data_home)\n return pd.read_csv(path, compression=\"gzip\")\n\n\nclass Corpus(BaseDataset):\n \"\"\"\n Corpus datasets contain a reference to documents on disk and provide\n utilities for quickly loading text data for use in machine learning\n workflows. The most common use of the corpus is to load the text analysis\n examples from the Yellowbrick documentation.\n\n Parameters\n ----------\n name : str\n The name of the corpus; should either be a folder in data home or\n specified in the yellowbrick.datasets.DATASETS variable. This name is\n used to perform all lookups and identify the corpus externally.\n\n data_home : str, optional\n The path on disk where data is stored. If not passed in, it is looked\n up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.\n\n url : str, optional\n The web location where the archive file of the corpus can be\n downloaded from.\n\n signature : str, optional\n The signature of the data archive file, used to verify that the latest\n version of the data has been downloaded and that the download hasn't\n been corrupted or modified in anyway.\n \"\"\"\n\n @memoized\n def root(self):\n \"\"\"\n Discovers and caches the root directory of the corpus.\n \"\"\"\n return find_dataset_path(self.name, data_home=self.data_home, ext=None)\n\n @memoized\n def labels(self):\n \"\"\"\n Return the unique labels assigned to the documents.\n \"\"\"\n return [\n name for name in os.listdir(self.root)\n if os.path.isdir(os.path.join(self.root, name))\n ]\n\n @property\n def files(self):\n \"\"\"\n Returns the list of file names for all documents.\n \"\"\"\n return [\n os.path.join(self.root, label, name)\n for label in self.labels\n for name in os.listdir(os.path.join(self.root, label))\n ]\n\n @property\n def data(self):\n \"\"\"\n Read all of the documents from disk into an in-memory list.\n \"\"\"\n def read(path):\n with open(path, 'r', encoding='UTF-8') as f:\n return f.read()\n\n return [\n read(f) for f in self.files\n ]\n\n @property\n def target(self):\n \"\"\"\n Returns the label associated with each item in data.\n \"\"\"\n return [\n os.path.basename(os.path.dirname(f)) for f in self.files\n ]\n", "path": "yellowbrick/datasets/base.py"}]}
4,036
147
gh_patches_debug_31438
rasdani/github-patches
git_diff
pyodide__pyodide-2507
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pyodide_build buildpkg does not install Cython as a build dependency when it is spell with a lower case c ## 🐛 Bug When trying to build [cftime](https://github.com/Unidata/cftime) the isolated env does not install cython. ### To Reproduce `python -m pyodide_build buildpkg packages/cftime/meta.yaml` on [this meta.yaml](https://gist.github.com/ocefpaf/8b9a90bfa40d7dc27c63e3bf22ef335a) ### Expected behavior Successful build :smile: ### Environment - Pyodide Version<!-- (e.g. 1.8.1) -->: - Browser version<!-- (e.g. Chrome 95.0.4638.54) -->: - Any other relevant information: ### Additional context A patch to rename `cython` to `Cython` in the cftime pyproject.toml fixed it but we should not be case sensitive with PyPI names. xref.: https://github.com/pyodide/pyodide/pull/2504 </issue> <code> [start of pyodide-build/pyodide_build/pypabuild.py] 1 import contextlib 2 import os 3 import sys 4 import traceback 5 from itertools import chain 6 from pathlib import Path 7 from typing import Mapping 8 9 from build import BuildBackendException, ProjectBuilder # type: ignore[import] 10 from build.__main__ import ( # type: ignore[import] 11 _STYLES, 12 _error, 13 _handle_build_error, 14 _IsolatedEnvBuilder, 15 _ProjectBuilder, 16 ) 17 from build.env import IsolatedEnv # type: ignore[import] 18 from packaging.requirements import Requirement 19 20 from .common import get_hostsitepackages, get_pyversion 21 22 UNISOLATED_PACKAGES = ["numpy", "scipy", "cffi", "pycparser", "pythran", "cython"] 23 24 25 def symlink_unisolated_packages(env: IsolatedEnv): 26 pyversion = get_pyversion() 27 site_packages_path = f"lib/{pyversion}/site-packages" 28 env_site_packages = Path(env._path) / site_packages_path 29 host_site_packages = Path(get_hostsitepackages()) 30 for name in UNISOLATED_PACKAGES: 31 for path in chain( 32 host_site_packages.glob(f"{name}*"), host_site_packages.glob(f"_{name}*") 33 ): 34 (env_site_packages / path.name).unlink(missing_ok=True) 35 (env_site_packages / path.name).symlink_to(path) 36 37 38 def remove_unisolated_requirements(requires: set[str]) -> set[str]: 39 for reqstr in list(requires): 40 req = Requirement(reqstr) 41 for avoid_name in UNISOLATED_PACKAGES: 42 if avoid_name in req.name: 43 requires.remove(reqstr) 44 return requires 45 46 47 @contextlib.contextmanager 48 def replace_env(build_env: Mapping[str, str]): 49 old_environ = dict(os.environ) 50 os.environ.clear() 51 os.environ.update(build_env) 52 try: 53 yield 54 finally: 55 os.environ.clear() 56 os.environ.update(old_environ) 57 58 59 def install_reqs(env: IsolatedEnv, reqs: set[str]): 60 env.install(remove_unisolated_requirements(reqs)) 61 62 63 def _build_in_isolated_env( 64 build_env: Mapping[str, str], 65 builder: ProjectBuilder, 66 outdir: str, 67 distribution: str, 68 ) -> str: 69 with _IsolatedEnvBuilder() as env: 70 builder.python_executable = env.executable 71 builder.scripts_dir = env.scripts_dir 72 # first install the build dependencies 73 symlink_unisolated_packages(env) 74 install_reqs(env, builder.build_system_requires) 75 installed_requires_for_build = False 76 try: 77 build_reqs = builder.get_requires_for_build(distribution) 78 except BuildBackendException: 79 pass 80 else: 81 install_reqs(env, build_reqs) 82 installed_requires_for_build = True 83 84 with replace_env(build_env): 85 if not installed_requires_for_build: 86 install_reqs(env, builder.get_requires_for_build(distribution)) 87 return builder.build(distribution, outdir, {}) 88 89 90 def build(build_env: Mapping[str, str]): 91 srcdir = Path.cwd() 92 outdir = srcdir / "dist" 93 builder = _ProjectBuilder(srcdir) 94 distribution = "wheel" 95 try: 96 with _handle_build_error(): 97 built = _build_in_isolated_env( 98 build_env, builder, str(outdir), distribution 99 ) 100 print("{bold}{green}Successfully built {}{reset}".format(built, **_STYLES)) 101 except Exception as e: # pragma: no cover 102 tb = traceback.format_exc().strip("\n") 103 print("\n{dim}{}{reset}\n".format(tb, **_STYLES)) 104 _error(str(e)) 105 sys.exit(1) 106 [end of pyodide-build/pyodide_build/pypabuild.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pyodide-build/pyodide_build/pypabuild.py b/pyodide-build/pyodide_build/pypabuild.py --- a/pyodide-build/pyodide_build/pypabuild.py +++ b/pyodide-build/pyodide_build/pypabuild.py @@ -19,7 +19,7 @@ from .common import get_hostsitepackages, get_pyversion -UNISOLATED_PACKAGES = ["numpy", "scipy", "cffi", "pycparser", "pythran", "cython"] +UNISOLATED_PACKAGES = ["numpy", "scipy", "cffi", "pycparser", "pythran"] def symlink_unisolated_packages(env: IsolatedEnv): @@ -39,7 +39,7 @@ for reqstr in list(requires): req = Requirement(reqstr) for avoid_name in UNISOLATED_PACKAGES: - if avoid_name in req.name: + if avoid_name in req.name.lower(): requires.remove(reqstr) return requires @@ -58,6 +58,11 @@ def install_reqs(env: IsolatedEnv, reqs: set[str]): env.install(remove_unisolated_requirements(reqs)) + # Some packages (numcodecs) don't declare cython as a build dependency and + # only recythonize if it is present. We need them to always recythonize so + # we always install cython. If the reqs included some cython version already + # then this won't do anything. + env.install(["cython"]) def _build_in_isolated_env( @@ -66,6 +71,10 @@ outdir: str, distribution: str, ) -> str: + # For debugging: The following line disables removal of the isolated venv. + # It will be left in the /tmp folder and can be inspected or entered as + # needed. + # _IsolatedEnvBuilder.__exit__ = lambda *args: None with _IsolatedEnvBuilder() as env: builder.python_executable = env.executable builder.scripts_dir = env.scripts_dir
{"golden_diff": "diff --git a/pyodide-build/pyodide_build/pypabuild.py b/pyodide-build/pyodide_build/pypabuild.py\n--- a/pyodide-build/pyodide_build/pypabuild.py\n+++ b/pyodide-build/pyodide_build/pypabuild.py\n@@ -19,7 +19,7 @@\n \n from .common import get_hostsitepackages, get_pyversion\n \n-UNISOLATED_PACKAGES = [\"numpy\", \"scipy\", \"cffi\", \"pycparser\", \"pythran\", \"cython\"]\n+UNISOLATED_PACKAGES = [\"numpy\", \"scipy\", \"cffi\", \"pycparser\", \"pythran\"]\n \n \n def symlink_unisolated_packages(env: IsolatedEnv):\n@@ -39,7 +39,7 @@\n for reqstr in list(requires):\n req = Requirement(reqstr)\n for avoid_name in UNISOLATED_PACKAGES:\n- if avoid_name in req.name:\n+ if avoid_name in req.name.lower():\n requires.remove(reqstr)\n return requires\n \n@@ -58,6 +58,11 @@\n \n def install_reqs(env: IsolatedEnv, reqs: set[str]):\n env.install(remove_unisolated_requirements(reqs))\n+ # Some packages (numcodecs) don't declare cython as a build dependency and\n+ # only recythonize if it is present. We need them to always recythonize so\n+ # we always install cython. If the reqs included some cython version already\n+ # then this won't do anything.\n+ env.install([\"cython\"])\n \n \n def _build_in_isolated_env(\n@@ -66,6 +71,10 @@\n outdir: str,\n distribution: str,\n ) -> str:\n+ # For debugging: The following line disables removal of the isolated venv.\n+ # It will be left in the /tmp folder and can be inspected or entered as\n+ # needed.\n+ # _IsolatedEnvBuilder.__exit__ = lambda *args: None\n with _IsolatedEnvBuilder() as env:\n builder.python_executable = env.executable\n builder.scripts_dir = env.scripts_dir\n", "issue": "pyodide_build buildpkg does not install Cython as a build dependency when it is spell with a lower case c\n## \ud83d\udc1b Bug\r\n\r\nWhen trying to build [cftime](https://github.com/Unidata/cftime) the isolated env does not install cython.\r\n\r\n### To Reproduce\r\n\r\n`python -m pyodide_build buildpkg packages/cftime/meta.yaml` on [this meta.yaml](https://gist.github.com/ocefpaf/8b9a90bfa40d7dc27c63e3bf22ef335a)\r\n\r\n### Expected behavior\r\n\r\nSuccessful build :smile: \r\n\r\n### Environment\r\n\r\n- Pyodide Version<!-- (e.g. 1.8.1) -->:\r\n- Browser version<!-- (e.g. Chrome 95.0.4638.54) -->:\r\n- Any other relevant information:\r\n\r\n\r\n### Additional context\r\n\r\nA patch to rename `cython` to `Cython` in the cftime pyproject.toml fixed it but we should not be case sensitive with PyPI names.\r\n\r\nxref.: https://github.com/pyodide/pyodide/pull/2504\n", "before_files": [{"content": "import contextlib\nimport os\nimport sys\nimport traceback\nfrom itertools import chain\nfrom pathlib import Path\nfrom typing import Mapping\n\nfrom build import BuildBackendException, ProjectBuilder # type: ignore[import]\nfrom build.__main__ import ( # type: ignore[import]\n _STYLES,\n _error,\n _handle_build_error,\n _IsolatedEnvBuilder,\n _ProjectBuilder,\n)\nfrom build.env import IsolatedEnv # type: ignore[import]\nfrom packaging.requirements import Requirement\n\nfrom .common import get_hostsitepackages, get_pyversion\n\nUNISOLATED_PACKAGES = [\"numpy\", \"scipy\", \"cffi\", \"pycparser\", \"pythran\", \"cython\"]\n\n\ndef symlink_unisolated_packages(env: IsolatedEnv):\n pyversion = get_pyversion()\n site_packages_path = f\"lib/{pyversion}/site-packages\"\n env_site_packages = Path(env._path) / site_packages_path\n host_site_packages = Path(get_hostsitepackages())\n for name in UNISOLATED_PACKAGES:\n for path in chain(\n host_site_packages.glob(f\"{name}*\"), host_site_packages.glob(f\"_{name}*\")\n ):\n (env_site_packages / path.name).unlink(missing_ok=True)\n (env_site_packages / path.name).symlink_to(path)\n\n\ndef remove_unisolated_requirements(requires: set[str]) -> set[str]:\n for reqstr in list(requires):\n req = Requirement(reqstr)\n for avoid_name in UNISOLATED_PACKAGES:\n if avoid_name in req.name:\n requires.remove(reqstr)\n return requires\n\n\[email protected]\ndef replace_env(build_env: Mapping[str, str]):\n old_environ = dict(os.environ)\n os.environ.clear()\n os.environ.update(build_env)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_environ)\n\n\ndef install_reqs(env: IsolatedEnv, reqs: set[str]):\n env.install(remove_unisolated_requirements(reqs))\n\n\ndef _build_in_isolated_env(\n build_env: Mapping[str, str],\n builder: ProjectBuilder,\n outdir: str,\n distribution: str,\n) -> str:\n with _IsolatedEnvBuilder() as env:\n builder.python_executable = env.executable\n builder.scripts_dir = env.scripts_dir\n # first install the build dependencies\n symlink_unisolated_packages(env)\n install_reqs(env, builder.build_system_requires)\n installed_requires_for_build = False\n try:\n build_reqs = builder.get_requires_for_build(distribution)\n except BuildBackendException:\n pass\n else:\n install_reqs(env, build_reqs)\n installed_requires_for_build = True\n\n with replace_env(build_env):\n if not installed_requires_for_build:\n install_reqs(env, builder.get_requires_for_build(distribution))\n return builder.build(distribution, outdir, {})\n\n\ndef build(build_env: Mapping[str, str]):\n srcdir = Path.cwd()\n outdir = srcdir / \"dist\"\n builder = _ProjectBuilder(srcdir)\n distribution = \"wheel\"\n try:\n with _handle_build_error():\n built = _build_in_isolated_env(\n build_env, builder, str(outdir), distribution\n )\n print(\"{bold}{green}Successfully built {}{reset}\".format(built, **_STYLES))\n except Exception as e: # pragma: no cover\n tb = traceback.format_exc().strip(\"\\n\")\n print(\"\\n{dim}{}{reset}\\n\".format(tb, **_STYLES))\n _error(str(e))\n sys.exit(1)\n", "path": "pyodide-build/pyodide_build/pypabuild.py"}]}
1,805
485
gh_patches_debug_43929
rasdani/github-patches
git_diff
ocadotechnology__aimmo-71
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Change the way the UI receives game updates The UI currently polls the frontend server constantly for updates. Implement a solution where the _game simulation_ server pushes updates instead - e.g. using web sockets (or some other better solution that you can think of!) Possible server-side solutions: 1. client ==> proxy (on Kubernetes) ==> game simulation (on Kubernetes) Here we only need one public IP address for the proxy, no matter how many game simulation servers we spawn. 1. client ==> game simulation (on Kubernetes) We need a public IP address per game simulation. We need to investigate whether this would be a problem (e.g. can't be done dynamically, or is expensive). </issue> <code> [start of aimmo-game/simulation/turn_manager.py] 1 import logging 2 import requests 3 import threading 4 import time 5 from threading import Lock 6 from simulation import world_map 7 from simulation.action import ACTIONS 8 9 LOGGER = logging.getLogger(__name__) 10 11 12 class WorldStateProvider: 13 """ 14 Thread-safe container for the world state. 15 16 TODO: think about changing to snapshot rather than lock? 17 """ 18 19 def __init__(self): 20 self._world_state = None 21 self._lock = Lock() 22 23 def lock_and_get_world(self): 24 self._lock.acquire() 25 return self._world_state 26 27 def release_lock(self): 28 self._lock.release() 29 30 def set_world(self, new_world_state): 31 self._lock.acquire() 32 self._world_state = new_world_state 33 self._lock.release() 34 35 world_state_provider = WorldStateProvider() 36 37 38 class TurnManager(threading.Thread): 39 """ 40 Game loop 41 """ 42 daemon = True 43 44 def __init__(self, game_state): 45 world_state_provider.set_world(game_state) 46 super(TurnManager, self).__init__() 47 48 def _update_environment(self, game_state): 49 num_avatars = len(game_state.avatar_manager.active_avatars) 50 game_state.world_map.reconstruct_interactive_state(num_avatars) 51 52 def run_turn(self): 53 try: 54 game_state = world_state_provider.lock_and_get_world() 55 56 for avatar in game_state.avatar_manager.active_avatars: 57 turn_state = game_state.get_state_for(avatar) 58 try: 59 data = requests.post(avatar.worker_url, json=turn_state).json() 60 except ValueError as err: 61 LOGGER.info("Failed to get turn result: %s", err) 62 else: 63 try: 64 action_data = data['action'] 65 action = ACTIONS[action_data['action_type']](**action_data.get('options', {})) 66 except (KeyError, ValueError) as err: 67 LOGGER.info("Bad action data supplied: %s", err) 68 else: 69 action.apply(game_state, avatar) 70 71 self._update_environment(game_state) 72 73 finally: 74 world_state_provider.release_lock() 75 76 def run(self): 77 while True: 78 self.run_turn() 79 time.sleep(0.5) 80 [end of aimmo-game/simulation/turn_manager.py] [start of setup.py] 1 # -*- coding: utf-8 -*- 2 from setuptools import find_packages, setup 3 4 setup(name='aimmo', 5 packages=find_packages(), 6 include_package_data=True, 7 install_requires = [ 8 'django >= 1.8.3, < 1.9.0', 9 'django-autoconfig >= 0.3.6, < 1.0.0', 10 'django-js-reverse', 11 'flask', 12 'flask-cors', 13 'requests', 14 ], 15 tests_require=[ 16 'django-setuptest', 17 ], 18 test_suite='setuptest.setuptest.SetupTestSuite', 19 version='0.0.0', 20 zip_safe=False, 21 ) 22 [end of setup.py] [start of aimmo-game/service.py] 1 #!/usr/bin/env python 2 import logging 3 4 import flask 5 from flask.ext.cors import CORS 6 7 from simulation.turn_manager import world_state_provider 8 from simulation import map_generator 9 from simulation.avatar.avatar_manager import AvatarManager 10 from simulation.game_state import GameState 11 from simulation.turn_manager import TurnManager 12 from simulation.worker_manager import LocalWorkerManager 13 14 app = flask.Flask(__name__) 15 CORS(app) 16 17 18 def to_cell_type(cell): 19 if not cell.habitable: 20 return 1 21 if cell.generates_score: 22 return 2 23 return 0 24 25 26 def player_dict(avatar): 27 # TODO: implement better colour functionality: will eventually fall off end of numbers 28 colour = "#%06x" % (avatar.player_id * 4999) 29 return { 30 'id': avatar.player_id, 31 'x': avatar.location.x, 32 'y': avatar.location.y, 33 'health': avatar.health, 34 'score': avatar.score, 35 'rotation': 0, 36 "colours": { 37 "bodyStroke": "#0ff", 38 "bodyFill": colour, 39 "eyeStroke": "#aff", 40 "eyeFill": "#eff", 41 } 42 } 43 44 45 @app.route('/') 46 def get_world_state(): 47 try: 48 world = world_state_provider.lock_and_get_world() 49 num_cols = len(world.world_map.grid) 50 num_rows = len(world.world_map.grid[0]) 51 grid = [[None for x in xrange(num_cols)] for y in xrange(num_rows)] 52 for cell in world.world_map.all_cells(): 53 grid[cell.location.x][cell.location.y] = to_cell_type(cell) 54 player_data = {p.player_id: player_dict(p) for p in world.avatar_manager.avatars} 55 return flask.jsonify( 56 players=player_data, 57 score_locations=[(cell.location.x, cell.location.y) for cell in world.world_map.score_cells()], 58 pickup_locations=[(cell.location.x, cell.location.y) for cell in world.world_map.pickup_cells()], 59 map_changed=True, # TODO: experiment with only sending deltas (not if not required) 60 width=num_cols, 61 height=num_rows, 62 layout=grid, 63 ) 64 finally: 65 world_state_provider.release_lock() 66 67 68 def run_game(): 69 print("Running game...") 70 my_map = map_generator.generate_map(15, 15, 0.1) 71 player_manager = AvatarManager() 72 game_state = GameState(my_map, player_manager) 73 turn_manager = TurnManager(game_state=game_state) 74 worker_manager = LocalWorkerManager(game_state=game_state, users_url='http://localhost:8000/players/api/games/') 75 worker_manager.start() 76 turn_manager.start() 77 78 79 if __name__ == '__main__': 80 logging.basicConfig(level=logging.DEBUG) 81 82 run_game() 83 app.run(debug=True, use_reloader=False) 84 [end of aimmo-game/service.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/aimmo-game/service.py b/aimmo-game/service.py --- a/aimmo-game/service.py +++ b/aimmo-game/service.py @@ -1,8 +1,11 @@ #!/usr/bin/env python import logging +import eventlet +eventlet.monkey_patch() + import flask -from flask.ext.cors import CORS +from flask.ext.socketio import SocketIO, emit from simulation.turn_manager import world_state_provider from simulation import map_generator @@ -12,7 +15,7 @@ from simulation.worker_manager import LocalWorkerManager app = flask.Flask(__name__) -CORS(app) +socketio = SocketIO(app) def to_cell_type(cell): @@ -42,7 +45,6 @@ } [email protected]('/') def get_world_state(): try: world = world_state_provider.lock_and_get_world() @@ -52,25 +54,41 @@ for cell in world.world_map.all_cells(): grid[cell.location.x][cell.location.y] = to_cell_type(cell) player_data = {p.player_id: player_dict(p) for p in world.avatar_manager.avatars} - return flask.jsonify( - players=player_data, - score_locations=[(cell.location.x, cell.location.y) for cell in world.world_map.score_cells()], - pickup_locations=[(cell.location.x, cell.location.y) for cell in world.world_map.pickup_cells()], - map_changed=True, # TODO: experiment with only sending deltas (not if not required) - width=num_cols, - height=num_rows, - layout=grid, - ) + return { + 'players': player_data, + 'score_locations': [(cell.location.x, cell.location.y) for cell in world.world_map.score_cells()], + 'pickup_locations': [(cell.location.x, cell.location.y) for cell in world.world_map.pickup_cells()], + 'map_changed': True, # TODO: experiment with only sending deltas (not if not required) + 'width': num_cols, + 'height': num_rows, + 'layout': grid, + } finally: world_state_provider.release_lock() [email protected]('connect') +def world_update_on_connect(): + emit( + 'world-update', + get_world_state(), + ) + + +def send_world_update(): + socketio.emit( + 'world-update', + get_world_state(), + broadcast=True, + ) + + def run_game(): print("Running game...") my_map = map_generator.generate_map(15, 15, 0.1) player_manager = AvatarManager() game_state = GameState(my_map, player_manager) - turn_manager = TurnManager(game_state=game_state) + turn_manager = TurnManager(game_state=game_state, end_turn_callback=send_world_update) worker_manager = LocalWorkerManager(game_state=game_state, users_url='http://localhost:8000/players/api/games/') worker_manager.start() turn_manager.start() @@ -80,4 +98,4 @@ logging.basicConfig(level=logging.DEBUG) run_game() - app.run(debug=True, use_reloader=False) + socketio.run(app, debug=True, use_reloader=False) diff --git a/aimmo-game/simulation/turn_manager.py b/aimmo-game/simulation/turn_manager.py --- a/aimmo-game/simulation/turn_manager.py +++ b/aimmo-game/simulation/turn_manager.py @@ -41,8 +41,9 @@ """ daemon = True - def __init__(self, game_state): + def __init__(self, game_state, end_turn_callback): world_state_provider.set_world(game_state) + self.end_turn_callback = end_turn_callback super(TurnManager, self).__init__() def _update_environment(self, game_state): @@ -76,4 +77,5 @@ def run(self): while True: self.run_turn() + self.end_turn_callback() time.sleep(0.5) diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -8,8 +8,9 @@ 'django >= 1.8.3, < 1.9.0', 'django-autoconfig >= 0.3.6, < 1.0.0', 'django-js-reverse', + 'eventlet', 'flask', - 'flask-cors', + 'flask-socketio', 'requests', ], tests_require=[
{"golden_diff": "diff --git a/aimmo-game/service.py b/aimmo-game/service.py\n--- a/aimmo-game/service.py\n+++ b/aimmo-game/service.py\n@@ -1,8 +1,11 @@\n #!/usr/bin/env python\n import logging\n \n+import eventlet\n+eventlet.monkey_patch()\n+\n import flask\n-from flask.ext.cors import CORS\n+from flask.ext.socketio import SocketIO, emit\n \n from simulation.turn_manager import world_state_provider\n from simulation import map_generator\n@@ -12,7 +15,7 @@\n from simulation.worker_manager import LocalWorkerManager\n \n app = flask.Flask(__name__)\n-CORS(app)\n+socketio = SocketIO(app)\n \n \n def to_cell_type(cell):\n@@ -42,7 +45,6 @@\n }\n \n \[email protected]('/')\n def get_world_state():\n try:\n world = world_state_provider.lock_and_get_world()\n@@ -52,25 +54,41 @@\n for cell in world.world_map.all_cells():\n grid[cell.location.x][cell.location.y] = to_cell_type(cell)\n player_data = {p.player_id: player_dict(p) for p in world.avatar_manager.avatars}\n- return flask.jsonify(\n- players=player_data,\n- score_locations=[(cell.location.x, cell.location.y) for cell in world.world_map.score_cells()],\n- pickup_locations=[(cell.location.x, cell.location.y) for cell in world.world_map.pickup_cells()],\n- map_changed=True, # TODO: experiment with only sending deltas (not if not required)\n- width=num_cols,\n- height=num_rows,\n- layout=grid,\n- )\n+ return {\n+ 'players': player_data,\n+ 'score_locations': [(cell.location.x, cell.location.y) for cell in world.world_map.score_cells()],\n+ 'pickup_locations': [(cell.location.x, cell.location.y) for cell in world.world_map.pickup_cells()],\n+ 'map_changed': True, # TODO: experiment with only sending deltas (not if not required)\n+ 'width': num_cols,\n+ 'height': num_rows,\n+ 'layout': grid,\n+ }\n finally:\n world_state_provider.release_lock()\n \n \[email protected]('connect')\n+def world_update_on_connect():\n+ emit(\n+ 'world-update',\n+ get_world_state(),\n+ )\n+\n+\n+def send_world_update():\n+ socketio.emit(\n+ 'world-update',\n+ get_world_state(),\n+ broadcast=True,\n+ )\n+\n+\n def run_game():\n print(\"Running game...\")\n my_map = map_generator.generate_map(15, 15, 0.1)\n player_manager = AvatarManager()\n game_state = GameState(my_map, player_manager)\n- turn_manager = TurnManager(game_state=game_state)\n+ turn_manager = TurnManager(game_state=game_state, end_turn_callback=send_world_update)\n worker_manager = LocalWorkerManager(game_state=game_state, users_url='http://localhost:8000/players/api/games/')\n worker_manager.start()\n turn_manager.start()\n@@ -80,4 +98,4 @@\n logging.basicConfig(level=logging.DEBUG)\n \n run_game()\n- app.run(debug=True, use_reloader=False)\n+ socketio.run(app, debug=True, use_reloader=False)\ndiff --git a/aimmo-game/simulation/turn_manager.py b/aimmo-game/simulation/turn_manager.py\n--- a/aimmo-game/simulation/turn_manager.py\n+++ b/aimmo-game/simulation/turn_manager.py\n@@ -41,8 +41,9 @@\n \"\"\"\n daemon = True\n \n- def __init__(self, game_state):\n+ def __init__(self, game_state, end_turn_callback):\n world_state_provider.set_world(game_state)\n+ self.end_turn_callback = end_turn_callback\n super(TurnManager, self).__init__()\n \n def _update_environment(self, game_state):\n@@ -76,4 +77,5 @@\n def run(self):\n while True:\n self.run_turn()\n+ self.end_turn_callback()\n time.sleep(0.5)\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -8,8 +8,9 @@\n 'django >= 1.8.3, < 1.9.0',\n 'django-autoconfig >= 0.3.6, < 1.0.0',\n 'django-js-reverse',\n+ 'eventlet',\n 'flask',\n- 'flask-cors',\n+ 'flask-socketio',\n 'requests',\n ],\n tests_require=[\n", "issue": "Change the way the UI receives game updates\nThe UI currently polls the frontend server constantly for updates.\n\nImplement a solution where the _game simulation_ server pushes updates instead - e.g. using web sockets (or some other better solution that you can think of!)\n\nPossible server-side solutions:\n1. client ==> proxy (on Kubernetes) ==> game simulation (on Kubernetes)\nHere we only need one public IP address for the proxy, no matter how many game simulation servers we spawn.\n1. client ==> game simulation (on Kubernetes)\n We need a public IP address per game simulation. We need to investigate whether this would be a problem (e.g. can't be done dynamically, or is expensive).\n\n", "before_files": [{"content": "import logging\nimport requests\nimport threading\nimport time\nfrom threading import Lock\nfrom simulation import world_map\nfrom simulation.action import ACTIONS\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass WorldStateProvider:\n \"\"\"\n Thread-safe container for the world state.\n\n TODO: think about changing to snapshot rather than lock?\n \"\"\"\n\n def __init__(self):\n self._world_state = None\n self._lock = Lock()\n\n def lock_and_get_world(self):\n self._lock.acquire()\n return self._world_state\n\n def release_lock(self):\n self._lock.release()\n\n def set_world(self, new_world_state):\n self._lock.acquire()\n self._world_state = new_world_state\n self._lock.release()\n\nworld_state_provider = WorldStateProvider()\n\n\nclass TurnManager(threading.Thread):\n \"\"\"\n Game loop\n \"\"\"\n daemon = True\n\n def __init__(self, game_state):\n world_state_provider.set_world(game_state)\n super(TurnManager, self).__init__()\n\n def _update_environment(self, game_state):\n num_avatars = len(game_state.avatar_manager.active_avatars)\n game_state.world_map.reconstruct_interactive_state(num_avatars)\n\n def run_turn(self):\n try:\n game_state = world_state_provider.lock_and_get_world()\n\n for avatar in game_state.avatar_manager.active_avatars:\n turn_state = game_state.get_state_for(avatar)\n try:\n data = requests.post(avatar.worker_url, json=turn_state).json()\n except ValueError as err:\n LOGGER.info(\"Failed to get turn result: %s\", err)\n else:\n try:\n action_data = data['action']\n action = ACTIONS[action_data['action_type']](**action_data.get('options', {}))\n except (KeyError, ValueError) as err:\n LOGGER.info(\"Bad action data supplied: %s\", err)\n else:\n action.apply(game_state, avatar)\n\n self._update_environment(game_state)\n\n finally:\n world_state_provider.release_lock()\n\n def run(self):\n while True:\n self.run_turn()\n time.sleep(0.5)\n", "path": "aimmo-game/simulation/turn_manager.py"}, {"content": "# -*- coding: utf-8 -*-\nfrom setuptools import find_packages, setup\n\nsetup(name='aimmo',\n packages=find_packages(),\n include_package_data=True,\n install_requires = [\n 'django >= 1.8.3, < 1.9.0',\n 'django-autoconfig >= 0.3.6, < 1.0.0',\n 'django-js-reverse',\n 'flask',\n 'flask-cors',\n 'requests',\n ],\n tests_require=[\n 'django-setuptest',\n ],\n test_suite='setuptest.setuptest.SetupTestSuite',\n version='0.0.0',\n zip_safe=False,\n)\n", "path": "setup.py"}, {"content": "#!/usr/bin/env python\nimport logging\n\nimport flask\nfrom flask.ext.cors import CORS\n\nfrom simulation.turn_manager import world_state_provider\nfrom simulation import map_generator\nfrom simulation.avatar.avatar_manager import AvatarManager\nfrom simulation.game_state import GameState\nfrom simulation.turn_manager import TurnManager\nfrom simulation.worker_manager import LocalWorkerManager\n\napp = flask.Flask(__name__)\nCORS(app)\n\n\ndef to_cell_type(cell):\n if not cell.habitable:\n return 1\n if cell.generates_score:\n return 2\n return 0\n\n\ndef player_dict(avatar):\n # TODO: implement better colour functionality: will eventually fall off end of numbers\n colour = \"#%06x\" % (avatar.player_id * 4999)\n return {\n 'id': avatar.player_id,\n 'x': avatar.location.x,\n 'y': avatar.location.y,\n 'health': avatar.health,\n 'score': avatar.score,\n 'rotation': 0,\n \"colours\": {\n \"bodyStroke\": \"#0ff\",\n \"bodyFill\": colour,\n \"eyeStroke\": \"#aff\",\n \"eyeFill\": \"#eff\",\n }\n }\n\n\[email protected]('/')\ndef get_world_state():\n try:\n world = world_state_provider.lock_and_get_world()\n num_cols = len(world.world_map.grid)\n num_rows = len(world.world_map.grid[0])\n grid = [[None for x in xrange(num_cols)] for y in xrange(num_rows)]\n for cell in world.world_map.all_cells():\n grid[cell.location.x][cell.location.y] = to_cell_type(cell)\n player_data = {p.player_id: player_dict(p) for p in world.avatar_manager.avatars}\n return flask.jsonify(\n players=player_data,\n score_locations=[(cell.location.x, cell.location.y) for cell in world.world_map.score_cells()],\n pickup_locations=[(cell.location.x, cell.location.y) for cell in world.world_map.pickup_cells()],\n map_changed=True, # TODO: experiment with only sending deltas (not if not required)\n width=num_cols,\n height=num_rows,\n layout=grid,\n )\n finally:\n world_state_provider.release_lock()\n\n\ndef run_game():\n print(\"Running game...\")\n my_map = map_generator.generate_map(15, 15, 0.1)\n player_manager = AvatarManager()\n game_state = GameState(my_map, player_manager)\n turn_manager = TurnManager(game_state=game_state)\n worker_manager = LocalWorkerManager(game_state=game_state, users_url='http://localhost:8000/players/api/games/')\n worker_manager.start()\n turn_manager.start()\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n\n run_game()\n app.run(debug=True, use_reloader=False)\n", "path": "aimmo-game/service.py"}]}
2,274
1,018
gh_patches_debug_3703
rasdani/github-patches
git_diff
wright-group__WrightTools-359
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> coverage consider using [coverage](https://coverage.readthedocs.io/en/coverage-4.4.1/) </issue> <code> [start of setup.py] 1 #! /usr/bin/env python3 2 3 import os 4 from setuptools import setup, find_packages 5 6 7 def package_files(directory): 8 paths = [] 9 for (path, directories, filenames) in os.walk(directory): 10 for filename in filenames: 11 paths.append(os.path.join('..', path, filename)) 12 return paths 13 14 15 here = os.path.abspath(os.path.dirname(__file__)) 16 17 extra_files = package_files(os.path.join(here, 'WrightTools', 'datasets')) 18 extra_files.append(os.path.join(here, 'CONTRIBUTORS')) 19 extra_files.append(os.path.join(here, 'LICENSE')) 20 extra_files.append(os.path.join(here, 'README.rst')) 21 extra_files.append(os.path.join(here, 'requirements.txt')) 22 extra_files.append(os.path.join(here, 'VERSION')) 23 extra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json')) 24 25 with open(os.path.join(here, 'requirements.txt')) as f: 26 required = f.read().splitlines() 27 28 with open(os.path.join(here, 'VERSION')) as version_file: 29 version = version_file.read().strip() 30 31 setup( 32 name='WrightTools', 33 packages=find_packages(), 34 package_data={'': extra_files}, 35 setup_requires=['pytest-runner'], 36 tests_require=['pytest'], 37 install_requires=required, 38 extras_require={'docs': ['sphinx-gallery>=0.1.9']}, 39 version=version, 40 description='Tools for loading, processing, and plotting multidimensional spectroscopy data.', 41 author='Blaise Thompson', 42 author_email='[email protected]', 43 license='MIT', 44 url='http://wright.tools', 45 keywords='spectroscopy science multidimensional visualization', 46 classifiers=['Development Status :: 5 - Production/Stable', 47 'Intended Audience :: Science/Research', 48 'License :: OSI Approved :: MIT License', 49 'Natural Language :: English', 50 'Programming Language :: Python :: 2', 51 'Programming Language :: Python :: 2.7', 52 'Programming Language :: Python :: 3', 53 'Programming Language :: Python :: 3.3', 54 'Programming Language :: Python :: 3.4', 55 'Programming Language :: Python :: 3.5', 56 'Topic :: Scientific/Engineering'] 57 ) 58 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -33,7 +33,7 @@ packages=find_packages(), package_data={'': extra_files}, setup_requires=['pytest-runner'], - tests_require=['pytest'], + tests_require=['pytest', 'pytest-cov'], install_requires=required, extras_require={'docs': ['sphinx-gallery>=0.1.9']}, version=version,
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,7 +33,7 @@\n packages=find_packages(),\n package_data={'': extra_files},\n setup_requires=['pytest-runner'],\n- tests_require=['pytest'],\n+ tests_require=['pytest', 'pytest-cov'],\n install_requires=required,\n extras_require={'docs': ['sphinx-gallery>=0.1.9']},\n version=version,\n", "issue": "coverage\nconsider using [coverage](https://coverage.readthedocs.io/en/coverage-4.4.1/)\n", "before_files": [{"content": "#! /usr/bin/env python3\n\nimport os\nfrom setuptools import setup, find_packages\n\n\ndef package_files(directory):\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nextra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))\nextra_files.append(os.path.join(here, 'CONTRIBUTORS'))\nextra_files.append(os.path.join(here, 'LICENSE'))\nextra_files.append(os.path.join(here, 'README.rst'))\nextra_files.append(os.path.join(here, 'requirements.txt'))\nextra_files.append(os.path.join(here, 'VERSION'))\nextra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json'))\n\nwith open(os.path.join(here, 'requirements.txt')) as f:\n required = f.read().splitlines()\n\nwith open(os.path.join(here, 'VERSION')) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name='WrightTools',\n packages=find_packages(),\n package_data={'': extra_files},\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n install_requires=required,\n extras_require={'docs': ['sphinx-gallery>=0.1.9']},\n version=version,\n description='Tools for loading, processing, and plotting multidimensional spectroscopy data.',\n author='Blaise Thompson',\n author_email='[email protected]',\n license='MIT',\n url='http://wright.tools',\n keywords='spectroscopy science multidimensional visualization',\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Scientific/Engineering']\n)\n", "path": "setup.py"}]}
1,139
99
gh_patches_debug_7544
rasdani/github-patches
git_diff
liqd__a4-product-375
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [partner landing page] unpublished and archived projects are shown On the partner landing page, we show unpublished and archived projects. Unpublished projects should never be shown and archived projects should be hidden per default. See: https://product-dev.liqd.net/teststadt/ ![bildschirmfoto 2018-05-28 um 11 33 51](https://user-images.githubusercontent.com/15341015/40608238-2254983e-626b-11e8-8429-588c014f7a82.png) </issue> <code> [start of liqd_product/apps/partners/views.py] 1 from django.contrib.messages.views import SuccessMessageMixin 2 from django.utils.translation import ugettext_lazy as _ 3 from django.views import generic 4 from django.views.generic import DetailView 5 6 from adhocracy4.actions.models import Action 7 from adhocracy4.projects.models import Project 8 from adhocracy4.rules import mixins as rules_mixins 9 from liqd_product.apps.partners.models import Partner 10 11 from . import forms 12 13 14 class PartnerView(DetailView): 15 template_name = 'partner_landing_page.html' 16 model = Partner 17 slug_url_kwarg = 'partner_slug' 18 19 def get_context_data(self, **kwargs): 20 context = super().get_context_data(**kwargs) 21 22 context['project_list'] = Project.objects\ 23 .filter(organisation__partner=self.object) 24 25 context['action_list'] = Action.objects\ 26 .filter(project__organisation__partner=self.object)\ 27 .filter_public()\ 28 .exclude_updates()[:4] 29 30 context['stats'] = { 31 'users': 1204, 32 'items': 3425, 33 'comments': 23234, 34 'ratings': 134234, 35 } 36 37 return context 38 39 40 class InformationView(DetailView): 41 template_name = 'partner_information.html' 42 model = Partner 43 slug_url_kwarg = 'partner_slug' 44 45 46 class ImprintView(DetailView): 47 template_name = 'partner_imprint.html' 48 model = Partner 49 slug_url_kwarg = 'partner_slug' 50 51 52 class PartnerUpdateView(rules_mixins.PermissionRequiredMixin, 53 SuccessMessageMixin, 54 generic.UpdateView): 55 model = Partner 56 form_class = forms.PartnerForm 57 slug_url_kwarg = 'partner_slug' 58 template_name = 'partner_form.html' 59 success_message = _('Municipality successfully updated.') 60 permission_required = 'liqd_product_partners.change_partner' 61 menu_item = 'partner' 62 63 def get_success_url(self): 64 return self.request.path 65 [end of liqd_product/apps/partners/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/liqd_product/apps/partners/views.py b/liqd_product/apps/partners/views.py --- a/liqd_product/apps/partners/views.py +++ b/liqd_product/apps/partners/views.py @@ -20,7 +20,9 @@ context = super().get_context_data(**kwargs) context['project_list'] = Project.objects\ - .filter(organisation__partner=self.object) + .filter(organisation__partner=self.object, + is_archived=False, + is_draft=False) context['action_list'] = Action.objects\ .filter(project__organisation__partner=self.object)\
{"golden_diff": "diff --git a/liqd_product/apps/partners/views.py b/liqd_product/apps/partners/views.py\n--- a/liqd_product/apps/partners/views.py\n+++ b/liqd_product/apps/partners/views.py\n@@ -20,7 +20,9 @@\n context = super().get_context_data(**kwargs)\n \n context['project_list'] = Project.objects\\\n- .filter(organisation__partner=self.object)\n+ .filter(organisation__partner=self.object,\n+ is_archived=False,\n+ is_draft=False)\n \n context['action_list'] = Action.objects\\\n .filter(project__organisation__partner=self.object)\\\n", "issue": "[partner landing page] unpublished and archived projects are shown\nOn the partner landing page, we show unpublished and archived projects. Unpublished projects should never be shown and archived projects should be hidden per default.\r\n\r\nSee: https://product-dev.liqd.net/teststadt/\r\n\r\n![bildschirmfoto 2018-05-28 um 11 33 51](https://user-images.githubusercontent.com/15341015/40608238-2254983e-626b-11e8-8429-588c014f7a82.png)\r\n\r\n\r\n\n", "before_files": [{"content": "from django.contrib.messages.views import SuccessMessageMixin\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\nfrom django.views.generic import DetailView\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.projects.models import Project\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom liqd_product.apps.partners.models import Partner\n\nfrom . import forms\n\n\nclass PartnerView(DetailView):\n template_name = 'partner_landing_page.html'\n model = Partner\n slug_url_kwarg = 'partner_slug'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context['project_list'] = Project.objects\\\n .filter(organisation__partner=self.object)\n\n context['action_list'] = Action.objects\\\n .filter(project__organisation__partner=self.object)\\\n .filter_public()\\\n .exclude_updates()[:4]\n\n context['stats'] = {\n 'users': 1204,\n 'items': 3425,\n 'comments': 23234,\n 'ratings': 134234,\n }\n\n return context\n\n\nclass InformationView(DetailView):\n template_name = 'partner_information.html'\n model = Partner\n slug_url_kwarg = 'partner_slug'\n\n\nclass ImprintView(DetailView):\n template_name = 'partner_imprint.html'\n model = Partner\n slug_url_kwarg = 'partner_slug'\n\n\nclass PartnerUpdateView(rules_mixins.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.UpdateView):\n model = Partner\n form_class = forms.PartnerForm\n slug_url_kwarg = 'partner_slug'\n template_name = 'partner_form.html'\n success_message = _('Municipality successfully updated.')\n permission_required = 'liqd_product_partners.change_partner'\n menu_item = 'partner'\n\n def get_success_url(self):\n return self.request.path\n", "path": "liqd_product/apps/partners/views.py"}]}
1,232
135
gh_patches_debug_15038
rasdani/github-patches
git_diff
OpenEnergyPlatform__oeplatform-1454
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> toep: wrong FAQ link ## Description of the issue On toep there is now a drop down menu including a link to "FAQs". The link however goes to the main page of the OEA instead of going to the questions section: https://openenergyplatform.github.io/academy/questions/ ## Steps to Reproduce 1. Visit toep and click FAQ in "About" drop-down ## Ideas of solution Link directly to https://openenergyplatform.github.io/academy/questions/ FYI @wingechr </issue> <code> [start of oeplatform/settings.py] 1 """ 2 Django settings for oeplatform project. 3 4 Generated by 'django-admin startproject' using Django 1.8.5. 5 6 For more information on this file, see 7 https://docs.djangoproject.com/en/1.8/topics/settings/ 8 9 For the full list of settings and their values, see 10 https://docs.djangoproject.com/en/1.8/ref/settings/ 11 """ 12 13 # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 14 15 try: 16 from .securitysettings import * # noqa 17 except ImportError: 18 import logging 19 import os 20 21 logging.error("No securitysettings found. Triggerd in oeplatform/settings.py") 22 SECRET_KEY = os.environ.get("SECRET_KEY", "0") 23 DEFAULT_FROM_EMAIL = os.environ.get("DEFAULT_FROM_EMAIL") 24 URL = os.environ.get("URL") 25 26 # Quick-start development settings - unsuitable for production 27 # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ 28 29 # Application definition 30 31 INSTALLED_APPS = ( 32 "django.contrib.sites", 33 "django.contrib.admin", 34 "django.contrib.auth", 35 "django.contrib.contenttypes", 36 "django.contrib.sessions", 37 "django.contrib.messages", 38 "django.contrib.staticfiles", 39 "django.contrib.sessions.backends.signed_cookies", 40 "django_bootstrap5", 41 "rest_framework", 42 "rest_framework.authtoken", 43 "modelview", 44 "modelview.templatetags.modelview_extras", 45 "login", 46 "base", 47 "base.templatetags.base_tags", 48 "widget_tweaks", 49 "dataedit", 50 "colorfield", 51 "api", 52 "ontology", 53 "axes", 54 "captcha", 55 "django.contrib.postgres", 56 "fontawesome_5", 57 "django_better_admin_arrayfield", 58 "oeo_viewer", 59 "factsheet", 60 "corsheaders", 61 "owlready2", 62 "compressor", 63 ) 64 65 MIDDLEWARE = ( 66 "django.contrib.sites.middleware.CurrentSiteMiddleware", 67 "django.contrib.sessions.middleware.SessionMiddleware", 68 "django.middleware.common.CommonMiddleware", 69 "django.middleware.csrf.CsrfViewMiddleware", 70 "django.contrib.auth.middleware.AuthenticationMiddleware", 71 "django.contrib.messages.middleware.MessageMiddleware", 72 "django.middleware.clickjacking.XFrameOptionsMiddleware", 73 "django.middleware.security.SecurityMiddleware", 74 "login.middleware.DetachMiddleware", 75 "axes.middleware.AxesMiddleware", 76 "corsheaders.middleware.CorsMiddleware", 77 "django.middleware.common.CommonMiddleware", 78 ) 79 80 ROOT_URLCONF = "oeplatform.urls" 81 82 EXTERNAL_URLS = { 83 "tutorials_index": "https://openenergyplatform.github.io/academy/", 84 "tutorials_faq": "https://openenergyplatform.github.io/academy/", 85 "tutorials_api1": "https://openenergyplatform.github.io/academy/tutorials/01_api/01_api_download/", # noqa E501 86 "tutorials_licenses": "https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/", # noqa E501 87 "readthedocs": "https://oeplatform.readthedocs.io/en/latest/?badge=latest", 88 "mkdocs": "https://openenergyplatform.github.io/oeplatform/", 89 "compendium": "https://openenergyplatform.github.io/organisation/", 90 } 91 92 93 def external_urls_context_processor(request): 94 """Define hard coded external urls here. 95 Use in templates like this: {{ EXTERNAL_URLS.<name_of_url> }} 96 Also, you may want to add an icon indicating external links, e.g. 97 """ 98 return {"EXTERNAL_URLS": EXTERNAL_URLS} 99 100 101 SITE_ID = 1 102 103 TEMPLATES = [ 104 { 105 "BACKEND": "django.template.backends.django.DjangoTemplates", 106 "DIRS": [], 107 "APP_DIRS": True, 108 "OPTIONS": { 109 "context_processors": [ 110 "django.template.context_processors.debug", 111 "django.template.context_processors.request", 112 "django.contrib.auth.context_processors.auth", 113 "django.contrib.messages.context_processors.messages", 114 "oeplatform.settings.external_urls_context_processor", 115 ] 116 }, 117 } 118 ] 119 120 CORS_ORIGIN_WHITELIST = ["http://localhost:3000", "http://127.0.0.1:3000"] 121 122 GRAPHENE = {"SCHEMA": "factsheet.schema.schema"} 123 124 WSGI_APPLICATION = "oeplatform.wsgi.application" 125 126 try: 127 ONTOLOGY_FOLDER # noqa 128 except NameError: 129 ONTOLOGY_FOLDER = "/tmp" 130 131 132 # Internationalization 133 # https://docs.djangoproject.com/en/1.8/topics/i18n/ 134 135 LANGUAGE_CODE = "en-us" 136 137 TIME_ZONE = "Europe/Berlin" 138 139 USE_I18N = True 140 141 USE_L10N = True 142 143 USE_TZ = True 144 145 # Static files (CSS, JavaScript, Images) 146 # https://docs.djangoproject.com/en/1.8/howto/static-files/ 147 148 AUTH_USER_MODEL = "login.myuser" 149 LOGIN_URL = "/user/login" 150 LOGIN_REDIRECT_URL = "/" 151 152 REST_FRAMEWORK = { 153 "DEFAULT_AUTHENTICATION_CLASSES": ( 154 "rest_framework.authentication.BasicAuthentication", 155 "rest_framework.authentication.SessionAuthentication", 156 "rest_framework.authentication.TokenAuthentication", 157 ) 158 } 159 160 AUTHENTICATION_BACKENDS = [ 161 # AxesBackend should be the first backend in the AUTHENTICATION_BACKENDS list. 162 "axes.backends.AxesBackend", 163 # custom class extenging Django ModelBackend for login with username OR email 164 "login.backends.ModelBackendWithEmail", 165 ] 166 167 DEFAULT_AUTO_FIELD = "django.db.models.AutoField" 168 169 STATICFILES_FINDERS = { 170 "django.contrib.staticfiles.finders.FileSystemFinder", 171 "django.contrib.staticfiles.finders.AppDirectoriesFinder", 172 "compressor.finders.CompressorFinder", 173 } 174 175 176 # https://django-compressor.readthedocs.io/en/stable/settings.html 177 COMPRESS_ENABLED = True 178 COMPRESS_OFFLINE = True 179 COMPRESS_REBUILD_TIMEOUT = 0 180 COMPRESS_MTIME_DELAY = 0 181 [end of oeplatform/settings.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/oeplatform/settings.py b/oeplatform/settings.py --- a/oeplatform/settings.py +++ b/oeplatform/settings.py @@ -81,7 +81,7 @@ EXTERNAL_URLS = { "tutorials_index": "https://openenergyplatform.github.io/academy/", - "tutorials_faq": "https://openenergyplatform.github.io/academy/", + "tutorials_faq": "https://openenergyplatform.github.io/academy/questions/", "tutorials_api1": "https://openenergyplatform.github.io/academy/tutorials/01_api/01_api_download/", # noqa E501 "tutorials_licenses": "https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/", # noqa E501 "readthedocs": "https://oeplatform.readthedocs.io/en/latest/?badge=latest",
{"golden_diff": "diff --git a/oeplatform/settings.py b/oeplatform/settings.py\n--- a/oeplatform/settings.py\n+++ b/oeplatform/settings.py\n@@ -81,7 +81,7 @@\n \n EXTERNAL_URLS = {\n \"tutorials_index\": \"https://openenergyplatform.github.io/academy/\",\n- \"tutorials_faq\": \"https://openenergyplatform.github.io/academy/\",\n+ \"tutorials_faq\": \"https://openenergyplatform.github.io/academy/questions/\",\n \"tutorials_api1\": \"https://openenergyplatform.github.io/academy/tutorials/01_api/01_api_download/\", # noqa E501\n \"tutorials_licenses\": \"https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/\", # noqa E501\n \"readthedocs\": \"https://oeplatform.readthedocs.io/en/latest/?badge=latest\",\n", "issue": "toep: wrong FAQ link \n## Description of the issue\r\n\r\nOn toep there is now a drop down menu including a link to \"FAQs\". \r\nThe link however goes to the main page of the OEA instead of going to the questions section: https://openenergyplatform.github.io/academy/questions/\r\n\r\n## Steps to Reproduce\r\n1. Visit toep and click FAQ in \"About\" drop-down\r\n\r\n## Ideas of solution\r\n\r\nLink directly to https://openenergyplatform.github.io/academy/questions/\r\n\r\nFYI @wingechr \n", "before_files": [{"content": "\"\"\"\nDjango settings for oeplatform project.\n\nGenerated by 'django-admin startproject' using Django 1.8.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n\ntry:\n from .securitysettings import * # noqa\nexcept ImportError:\n import logging\n import os\n\n logging.error(\"No securitysettings found. Triggerd in oeplatform/settings.py\")\n SECRET_KEY = os.environ.get(\"SECRET_KEY\", \"0\")\n DEFAULT_FROM_EMAIL = os.environ.get(\"DEFAULT_FROM_EMAIL\")\n URL = os.environ.get(\"URL\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/\n\n# Application definition\n\nINSTALLED_APPS = (\n \"django.contrib.sites\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.sessions.backends.signed_cookies\",\n \"django_bootstrap5\",\n \"rest_framework\",\n \"rest_framework.authtoken\",\n \"modelview\",\n \"modelview.templatetags.modelview_extras\",\n \"login\",\n \"base\",\n \"base.templatetags.base_tags\",\n \"widget_tweaks\",\n \"dataedit\",\n \"colorfield\",\n \"api\",\n \"ontology\",\n \"axes\",\n \"captcha\",\n \"django.contrib.postgres\",\n \"fontawesome_5\",\n \"django_better_admin_arrayfield\",\n \"oeo_viewer\",\n \"factsheet\",\n \"corsheaders\",\n \"owlready2\",\n \"compressor\",\n)\n\nMIDDLEWARE = (\n \"django.contrib.sites.middleware.CurrentSiteMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"login.middleware.DetachMiddleware\",\n \"axes.middleware.AxesMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n)\n\nROOT_URLCONF = \"oeplatform.urls\"\n\nEXTERNAL_URLS = {\n \"tutorials_index\": \"https://openenergyplatform.github.io/academy/\",\n \"tutorials_faq\": \"https://openenergyplatform.github.io/academy/\",\n \"tutorials_api1\": \"https://openenergyplatform.github.io/academy/tutorials/01_api/01_api_download/\", # noqa E501\n \"tutorials_licenses\": \"https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/\", # noqa E501\n \"readthedocs\": \"https://oeplatform.readthedocs.io/en/latest/?badge=latest\",\n \"mkdocs\": \"https://openenergyplatform.github.io/oeplatform/\",\n \"compendium\": \"https://openenergyplatform.github.io/organisation/\",\n}\n\n\ndef external_urls_context_processor(request):\n \"\"\"Define hard coded external urls here.\n Use in templates like this: {{ EXTERNAL_URLS.<name_of_url> }}\n Also, you may want to add an icon indicating external links, e.g.\n \"\"\"\n return {\"EXTERNAL_URLS\": EXTERNAL_URLS}\n\n\nSITE_ID = 1\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"oeplatform.settings.external_urls_context_processor\",\n ]\n },\n }\n]\n\nCORS_ORIGIN_WHITELIST = [\"http://localhost:3000\", \"http://127.0.0.1:3000\"]\n\nGRAPHENE = {\"SCHEMA\": \"factsheet.schema.schema\"}\n\nWSGI_APPLICATION = \"oeplatform.wsgi.application\"\n\ntry:\n ONTOLOGY_FOLDER # noqa\nexcept NameError:\n ONTOLOGY_FOLDER = \"/tmp\"\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"Europe/Berlin\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nAUTH_USER_MODEL = \"login.myuser\"\nLOGIN_URL = \"/user/login\"\nLOGIN_REDIRECT_URL = \"/\"\n\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n \"rest_framework.authentication.TokenAuthentication\",\n )\n}\n\nAUTHENTICATION_BACKENDS = [\n # AxesBackend should be the first backend in the AUTHENTICATION_BACKENDS list.\n \"axes.backends.AxesBackend\",\n # custom class extenging Django ModelBackend for login with username OR email\n \"login.backends.ModelBackendWithEmail\",\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\nSTATICFILES_FINDERS = {\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n \"compressor.finders.CompressorFinder\",\n}\n\n\n# https://django-compressor.readthedocs.io/en/stable/settings.html\nCOMPRESS_ENABLED = True\nCOMPRESS_OFFLINE = True\nCOMPRESS_REBUILD_TIMEOUT = 0\nCOMPRESS_MTIME_DELAY = 0\n", "path": "oeplatform/settings.py"}]}
2,369
206
gh_patches_debug_25960
rasdani/github-patches
git_diff
secdev__scapy-1126
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> "Generating sets of packets" (as explained in the documentation) does not work for mixed sequence and range (e.g. IP(ttl=[1,2,(5,9)])) Hi, as explained in the [tutorial](http://scapy.readthedocs.io/en/latest/usage.html#generating-sets-of-packets) these two lines `b=IP(ttl=[1,2,(5,9)])` `[p for p in b]` should create a set of 7 packets, like `[<IP ttl=1 |>, <IP ttl=2 |>, <IP ttl=5 |>, <IP ttl=6 |>, <IP ttl=7 |>, <IP ttl=8 |>, <IP ttl=9 |>]`. But the result is just: `[<IP ttl=1 |>, <IP ttl=2 |>, <IP ttl=(5, 9) |>]` If I just use a "sequence" (`IP(ttl=[1,2,5,7,9])`) or "range" (`IP(ttl=(1,9)`), the result is that I expected. So, there are recent changesin scpay and the documentation is outdated or Is this a bug or I'm just stupid ;) ? Thanks! </issue> <code> [start of scapy/base_classes.py] 1 ## This file is part of Scapy 2 ## See http://www.secdev.org/projects/scapy for more informations 3 ## Copyright (C) Philippe Biondi <[email protected]> 4 ## This program is published under a GPLv2 license 5 6 """ 7 Generators and packet meta classes. 8 """ 9 10 ############### 11 ## Generators ## 12 ################ 13 14 from __future__ import absolute_import 15 import re,random,socket 16 import types 17 from scapy.modules.six.moves import range 18 19 class Gen(object): 20 __slots__ = [] 21 def __iter__(self): 22 return iter([]) 23 24 class SetGen(Gen): 25 def __init__(self, values, _iterpacket=1): 26 self._iterpacket=_iterpacket 27 if isinstance(values, (list, BasePacketList)): 28 self.values = list(values) 29 elif (isinstance(values, tuple) and (2 <= len(values) <= 3) and \ 30 all(hasattr(i, "__int__") for i in values)): 31 # We use values[1] + 1 as stop value for (x)range to maintain 32 # the behavior of using tuples as field `values` 33 self.values = [range(*((int(values[0]), int(values[1]) + 1) 34 + tuple(int(v) for v in values[2:])))] 35 else: 36 self.values = [values] 37 def transf(self, element): 38 return element 39 def __iter__(self): 40 for i in self.values: 41 if (isinstance(i, Gen) and 42 (self._iterpacket or not isinstance(i,BasePacket))) or ( 43 isinstance(i, (range, types.GeneratorType))): 44 for j in i: 45 yield j 46 else: 47 yield i 48 def __repr__(self): 49 return "<SetGen %r>" % self.values 50 51 class Net(Gen): 52 """Generate a list of IPs from a network address or a name""" 53 name = "ip" 54 ip_regex = re.compile(r"^(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\.(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\.(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\.(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)(/[0-3]?[0-9])?$") 55 56 @staticmethod 57 def _parse_digit(a,netmask): 58 netmask = min(8,max(netmask,0)) 59 if a == "*": 60 a = (0,256) 61 elif a.find("-") >= 0: 62 x, y = [int(d) for d in a.split('-')] 63 if x > y: 64 y = x 65 a = (x & (0xff<<netmask) , max(y, (x | (0xff>>(8-netmask))))+1) 66 else: 67 a = (int(a) & (0xff<<netmask),(int(a) | (0xff>>(8-netmask)))+1) 68 return a 69 70 @classmethod 71 def _parse_net(cls, net): 72 tmp=net.split('/')+["32"] 73 if not cls.ip_regex.match(net): 74 tmp[0]=socket.gethostbyname(tmp[0]) 75 netmask = int(tmp[1]) 76 ret_list = [cls._parse_digit(x, y-netmask) for (x, y) in zip(tmp[0].split('.'), [8, 16, 24, 32])] 77 return ret_list, netmask 78 79 def __init__(self, net): 80 self.repr=net 81 self.parsed,self.netmask = self._parse_net(net) 82 83 def __str__(self): 84 try: 85 return next(self.__iter__()) 86 except StopIteration: 87 return None 88 89 def __iter__(self): 90 for d in range(*self.parsed[3]): 91 for c in range(*self.parsed[2]): 92 for b in range(*self.parsed[1]): 93 for a in range(*self.parsed[0]): 94 yield "%i.%i.%i.%i" % (a,b,c,d) 95 def choice(self): 96 ip = [] 97 for v in self.parsed: 98 ip.append(str(random.randint(v[0],v[1]-1))) 99 return ".".join(ip) 100 101 def __repr__(self): 102 return "Net(%r)" % self.repr 103 def __eq__(self, other): 104 if hasattr(other, "parsed"): 105 p2 = other.parsed 106 else: 107 p2,nm2 = self._parse_net(other) 108 return self.parsed == p2 109 def __contains__(self, other): 110 if hasattr(other, "parsed"): 111 p2 = other.parsed 112 else: 113 p2,nm2 = self._parse_net(other) 114 for (a1,b1),(a2,b2) in zip(self.parsed,p2): 115 if a1 > a2 or b1 < b2: 116 return False 117 return True 118 def __rcontains__(self, other): 119 return self in self.__class__(other) 120 121 122 class OID(Gen): 123 name = "OID" 124 def __init__(self, oid): 125 self.oid = oid 126 self.cmpt = [] 127 fmt = [] 128 for i in oid.split("."): 129 if "-" in i: 130 fmt.append("%i") 131 self.cmpt.append(tuple(map(int, i.split("-")))) 132 else: 133 fmt.append(i) 134 self.fmt = ".".join(fmt) 135 def __repr__(self): 136 return "OID(%r)" % self.oid 137 def __iter__(self): 138 ii = [k[0] for k in self.cmpt] 139 while True: 140 yield self.fmt % tuple(ii) 141 i = 0 142 while True: 143 if i >= len(ii): 144 raise StopIteration 145 if ii[i] < self.cmpt[i][1]: 146 ii[i]+=1 147 break 148 else: 149 ii[i] = self.cmpt[i][0] 150 i += 1 151 152 153 154 ###################################### 155 ## Packet abstract and base classes ## 156 ###################################### 157 158 class Packet_metaclass(type): 159 def __new__(cls, name, bases, dct): 160 if "fields_desc" in dct: # perform resolution of references to other packets 161 current_fld = dct["fields_desc"] 162 resolved_fld = [] 163 for f in current_fld: 164 if isinstance(f, Packet_metaclass): # reference to another fields_desc 165 for f2 in f.fields_desc: 166 resolved_fld.append(f2) 167 else: 168 resolved_fld.append(f) 169 else: # look for a fields_desc in parent classes 170 resolved_fld = None 171 for b in bases: 172 if hasattr(b,"fields_desc"): 173 resolved_fld = b.fields_desc 174 break 175 176 if resolved_fld: # perform default value replacements 177 final_fld = [] 178 for f in resolved_fld: 179 if f.name in dct: 180 f = f.copy() 181 f.default = dct[f.name] 182 del(dct[f.name]) 183 final_fld.append(f) 184 185 dct["fields_desc"] = final_fld 186 187 if "__slots__" not in dct: 188 dct["__slots__"] = [] 189 for attr in ["name", "overload_fields"]: 190 try: 191 dct["_%s" % attr] = dct.pop(attr) 192 except KeyError: 193 pass 194 newcls = super(Packet_metaclass, cls).__new__(cls, name, bases, dct) 195 newcls.__all_slots__ = set( 196 attr 197 for cls in newcls.__mro__ if hasattr(cls, "__slots__") 198 for attr in cls.__slots__ 199 ) 200 201 if hasattr(newcls, "aliastypes"): 202 newcls.aliastypes = [newcls] + newcls.aliastypes 203 else: 204 newcls.aliastypes = [newcls] 205 206 if hasattr(newcls,"register_variant"): 207 newcls.register_variant() 208 for f in newcls.fields_desc: 209 if hasattr(f, "register_owner"): 210 f.register_owner(newcls) 211 from scapy import config 212 config.conf.layers.register(newcls) 213 return newcls 214 215 def __getattr__(self, attr): 216 for k in self.fields_desc: 217 if k.name == attr: 218 return k 219 raise AttributeError(attr) 220 221 def __call__(cls, *args, **kargs): 222 if "dispatch_hook" in cls.__dict__: 223 try: 224 cls = cls.dispatch_hook(*args, **kargs) 225 except: 226 from scapy import config 227 if config.conf.debug_dissector: 228 raise 229 cls = config.conf.raw_layer 230 i = cls.__new__(cls, cls.__name__, cls.__bases__, cls.__dict__) 231 i.__init__(*args, **kargs) 232 return i 233 234 class Field_metaclass(type): 235 def __new__(cls, name, bases, dct): 236 if "__slots__" not in dct: 237 dct["__slots__"] = [] 238 newcls = super(Field_metaclass, cls).__new__(cls, name, bases, dct) 239 return newcls 240 241 class NewDefaultValues(Packet_metaclass): 242 """NewDefaultValues is deprecated (not needed anymore) 243 244 remove this: 245 __metaclass__ = NewDefaultValues 246 and it should still work. 247 """ 248 def __new__(cls, name, bases, dct): 249 from scapy.error import log_loading 250 import traceback 251 try: 252 for tb in traceback.extract_stack()+[("??",-1,None,"")]: 253 f,l,_,line = tb 254 if line.startswith("class"): 255 break 256 except: 257 f,l="??",-1 258 raise 259 log_loading.warning("Deprecated (no more needed) use of NewDefaultValues (%s l. %i).", f, l) 260 261 return super(NewDefaultValues, cls).__new__(cls, name, bases, dct) 262 263 class BasePacket(Gen): 264 __slots__ = [] 265 266 267 ############################# 268 ## Packet list base class ## 269 ############################# 270 271 class BasePacketList(object): 272 __slots__ = [] 273 [end of scapy/base_classes.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scapy/base_classes.py b/scapy/base_classes.py --- a/scapy/base_classes.py +++ b/scapy/base_classes.py @@ -20,20 +20,29 @@ __slots__ = [] def __iter__(self): return iter([]) - + + +def _get_values(value): + """Generate a range object from (start, stop[, step]) tuples, or +return value. + + """ + if (isinstance(value, tuple) and (2 <= len(value) <= 3) and \ + all(hasattr(i, "__int__") for i in value)): + # We use values[1] + 1 as stop value for (x)range to maintain + # the behavior of using tuples as field `values` + return range(*((int(value[0]), int(value[1]) + 1) + + tuple(int(v) for v in value[2:]))) + return value + + class SetGen(Gen): def __init__(self, values, _iterpacket=1): self._iterpacket=_iterpacket if isinstance(values, (list, BasePacketList)): - self.values = list(values) - elif (isinstance(values, tuple) and (2 <= len(values) <= 3) and \ - all(hasattr(i, "__int__") for i in values)): - # We use values[1] + 1 as stop value for (x)range to maintain - # the behavior of using tuples as field `values` - self.values = [range(*((int(values[0]), int(values[1]) + 1) - + tuple(int(v) for v in values[2:])))] + self.values = [_get_values(val) for val in values] else: - self.values = [values] + self.values = [_get_values(values)] def transf(self, element): return element def __iter__(self):
{"golden_diff": "diff --git a/scapy/base_classes.py b/scapy/base_classes.py\n--- a/scapy/base_classes.py\n+++ b/scapy/base_classes.py\n@@ -20,20 +20,29 @@\n __slots__ = []\n def __iter__(self):\n return iter([])\n- \n+\n+\n+def _get_values(value):\n+ \"\"\"Generate a range object from (start, stop[, step]) tuples, or\n+return value.\n+\n+ \"\"\"\n+ if (isinstance(value, tuple) and (2 <= len(value) <= 3) and \\\n+ all(hasattr(i, \"__int__\") for i in value)):\n+ # We use values[1] + 1 as stop value for (x)range to maintain\n+ # the behavior of using tuples as field `values`\n+ return range(*((int(value[0]), int(value[1]) + 1)\n+ + tuple(int(v) for v in value[2:])))\n+ return value\n+\n+\n class SetGen(Gen):\n def __init__(self, values, _iterpacket=1):\n self._iterpacket=_iterpacket\n if isinstance(values, (list, BasePacketList)):\n- self.values = list(values)\n- elif (isinstance(values, tuple) and (2 <= len(values) <= 3) and \\\n- all(hasattr(i, \"__int__\") for i in values)):\n- # We use values[1] + 1 as stop value for (x)range to maintain\n- # the behavior of using tuples as field `values`\n- self.values = [range(*((int(values[0]), int(values[1]) + 1)\n- + tuple(int(v) for v in values[2:])))]\n+ self.values = [_get_values(val) for val in values]\n else:\n- self.values = [values]\n+ self.values = [_get_values(values)]\n def transf(self, element):\n return element\n def __iter__(self):\n", "issue": "\"Generating sets of packets\" (as explained in the documentation) does not work for mixed sequence and range (e.g. IP(ttl=[1,2,(5,9)]))\nHi,\r\nas explained in the [tutorial](http://scapy.readthedocs.io/en/latest/usage.html#generating-sets-of-packets) \r\nthese two lines \r\n`b=IP(ttl=[1,2,(5,9)])`\r\n`[p for p in b]` \r\nshould create a set of 7 packets, like\r\n`[<IP ttl=1 |>, <IP ttl=2 |>, <IP ttl=5 |>, <IP ttl=6 |>, <IP ttl=7 |>, <IP ttl=8 |>, <IP ttl=9 |>]`.\r\n\r\nBut the result is just:\r\n`[<IP ttl=1 |>, <IP ttl=2 |>, <IP ttl=(5, 9) |>]`\r\n\r\nIf I just use a \"sequence\" (`IP(ttl=[1,2,5,7,9])`) or \"range\" (`IP(ttl=(1,9)`), the result is that I expected. \r\n\r\nSo, there are recent changesin scpay and the documentation is outdated or Is this a bug or I'm just stupid ;) ?\r\n\r\nThanks!\n", "before_files": [{"content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <[email protected]>\n## This program is published under a GPLv2 license\n\n\"\"\"\nGenerators and packet meta classes.\n\"\"\"\n\n###############\n## Generators ##\n################\n\nfrom __future__ import absolute_import\nimport re,random,socket\nimport types\nfrom scapy.modules.six.moves import range\n\nclass Gen(object):\n __slots__ = []\n def __iter__(self):\n return iter([])\n \nclass SetGen(Gen):\n def __init__(self, values, _iterpacket=1):\n self._iterpacket=_iterpacket\n if isinstance(values, (list, BasePacketList)):\n self.values = list(values)\n elif (isinstance(values, tuple) and (2 <= len(values) <= 3) and \\\n all(hasattr(i, \"__int__\") for i in values)):\n # We use values[1] + 1 as stop value for (x)range to maintain\n # the behavior of using tuples as field `values`\n self.values = [range(*((int(values[0]), int(values[1]) + 1)\n + tuple(int(v) for v in values[2:])))]\n else:\n self.values = [values]\n def transf(self, element):\n return element\n def __iter__(self):\n for i in self.values:\n if (isinstance(i, Gen) and\n (self._iterpacket or not isinstance(i,BasePacket))) or (\n isinstance(i, (range, types.GeneratorType))):\n for j in i:\n yield j\n else:\n yield i\n def __repr__(self):\n return \"<SetGen %r>\" % self.values\n\nclass Net(Gen):\n \"\"\"Generate a list of IPs from a network address or a name\"\"\"\n name = \"ip\"\n ip_regex = re.compile(r\"^(\\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\\.(\\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\\.(\\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\\.(\\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)(/[0-3]?[0-9])?$\")\n\n @staticmethod\n def _parse_digit(a,netmask):\n netmask = min(8,max(netmask,0))\n if a == \"*\":\n a = (0,256)\n elif a.find(\"-\") >= 0:\n x, y = [int(d) for d in a.split('-')]\n if x > y:\n y = x\n a = (x & (0xff<<netmask) , max(y, (x | (0xff>>(8-netmask))))+1)\n else:\n a = (int(a) & (0xff<<netmask),(int(a) | (0xff>>(8-netmask)))+1)\n return a\n\n @classmethod\n def _parse_net(cls, net):\n tmp=net.split('/')+[\"32\"]\n if not cls.ip_regex.match(net):\n tmp[0]=socket.gethostbyname(tmp[0])\n netmask = int(tmp[1])\n ret_list = [cls._parse_digit(x, y-netmask) for (x, y) in zip(tmp[0].split('.'), [8, 16, 24, 32])]\n return ret_list, netmask\n\n def __init__(self, net):\n self.repr=net\n self.parsed,self.netmask = self._parse_net(net)\n\n def __str__(self):\n try:\n return next(self.__iter__())\n except StopIteration:\n return None\n \n def __iter__(self):\n for d in range(*self.parsed[3]):\n for c in range(*self.parsed[2]):\n for b in range(*self.parsed[1]):\n for a in range(*self.parsed[0]):\n yield \"%i.%i.%i.%i\" % (a,b,c,d)\n def choice(self):\n ip = []\n for v in self.parsed:\n ip.append(str(random.randint(v[0],v[1]-1)))\n return \".\".join(ip) \n \n def __repr__(self):\n return \"Net(%r)\" % self.repr\n def __eq__(self, other):\n if hasattr(other, \"parsed\"):\n p2 = other.parsed\n else:\n p2,nm2 = self._parse_net(other)\n return self.parsed == p2\n def __contains__(self, other):\n if hasattr(other, \"parsed\"):\n p2 = other.parsed\n else:\n p2,nm2 = self._parse_net(other)\n for (a1,b1),(a2,b2) in zip(self.parsed,p2):\n if a1 > a2 or b1 < b2:\n return False\n return True\n def __rcontains__(self, other): \n return self in self.__class__(other)\n \n\nclass OID(Gen):\n name = \"OID\"\n def __init__(self, oid):\n self.oid = oid \n self.cmpt = []\n fmt = [] \n for i in oid.split(\".\"):\n if \"-\" in i:\n fmt.append(\"%i\")\n self.cmpt.append(tuple(map(int, i.split(\"-\"))))\n else:\n fmt.append(i)\n self.fmt = \".\".join(fmt)\n def __repr__(self):\n return \"OID(%r)\" % self.oid\n def __iter__(self): \n ii = [k[0] for k in self.cmpt]\n while True:\n yield self.fmt % tuple(ii)\n i = 0\n while True:\n if i >= len(ii):\n raise StopIteration\n if ii[i] < self.cmpt[i][1]:\n ii[i]+=1\n break\n else:\n ii[i] = self.cmpt[i][0]\n i += 1\n\n\n \n######################################\n## Packet abstract and base classes ##\n######################################\n\nclass Packet_metaclass(type):\n def __new__(cls, name, bases, dct):\n if \"fields_desc\" in dct: # perform resolution of references to other packets\n current_fld = dct[\"fields_desc\"]\n resolved_fld = []\n for f in current_fld:\n if isinstance(f, Packet_metaclass): # reference to another fields_desc\n for f2 in f.fields_desc:\n resolved_fld.append(f2)\n else:\n resolved_fld.append(f)\n else: # look for a fields_desc in parent classes\n resolved_fld = None\n for b in bases:\n if hasattr(b,\"fields_desc\"):\n resolved_fld = b.fields_desc\n break\n\n if resolved_fld: # perform default value replacements\n final_fld = []\n for f in resolved_fld:\n if f.name in dct:\n f = f.copy()\n f.default = dct[f.name]\n del(dct[f.name])\n final_fld.append(f)\n\n dct[\"fields_desc\"] = final_fld\n\n if \"__slots__\" not in dct:\n dct[\"__slots__\"] = []\n for attr in [\"name\", \"overload_fields\"]:\n try:\n dct[\"_%s\" % attr] = dct.pop(attr)\n except KeyError:\n pass\n newcls = super(Packet_metaclass, cls).__new__(cls, name, bases, dct)\n newcls.__all_slots__ = set(\n attr\n for cls in newcls.__mro__ if hasattr(cls, \"__slots__\")\n for attr in cls.__slots__\n )\n\n if hasattr(newcls, \"aliastypes\"):\n newcls.aliastypes = [newcls] + newcls.aliastypes\n else:\n newcls.aliastypes = [newcls]\n\n if hasattr(newcls,\"register_variant\"):\n newcls.register_variant()\n for f in newcls.fields_desc:\n if hasattr(f, \"register_owner\"):\n f.register_owner(newcls)\n from scapy import config\n config.conf.layers.register(newcls)\n return newcls\n\n def __getattr__(self, attr):\n for k in self.fields_desc:\n if k.name == attr:\n return k\n raise AttributeError(attr)\n\n def __call__(cls, *args, **kargs):\n if \"dispatch_hook\" in cls.__dict__:\n try:\n cls = cls.dispatch_hook(*args, **kargs)\n except:\n from scapy import config\n if config.conf.debug_dissector:\n raise\n cls = config.conf.raw_layer\n i = cls.__new__(cls, cls.__name__, cls.__bases__, cls.__dict__)\n i.__init__(*args, **kargs)\n return i\n\nclass Field_metaclass(type):\n def __new__(cls, name, bases, dct):\n if \"__slots__\" not in dct:\n dct[\"__slots__\"] = []\n newcls = super(Field_metaclass, cls).__new__(cls, name, bases, dct)\n return newcls\n\nclass NewDefaultValues(Packet_metaclass):\n \"\"\"NewDefaultValues is deprecated (not needed anymore)\n \n remove this:\n __metaclass__ = NewDefaultValues\n and it should still work.\n \"\"\" \n def __new__(cls, name, bases, dct):\n from scapy.error import log_loading\n import traceback\n try:\n for tb in traceback.extract_stack()+[(\"??\",-1,None,\"\")]:\n f,l,_,line = tb\n if line.startswith(\"class\"):\n break\n except:\n f,l=\"??\",-1\n raise\n log_loading.warning(\"Deprecated (no more needed) use of NewDefaultValues (%s l. %i).\", f, l)\n \n return super(NewDefaultValues, cls).__new__(cls, name, bases, dct)\n\nclass BasePacket(Gen):\n __slots__ = []\n\n\n#############################\n## Packet list base class ##\n#############################\n\nclass BasePacketList(object):\n __slots__ = []\n", "path": "scapy/base_classes.py"}]}
3,803
436
gh_patches_debug_13696
rasdani/github-patches
git_diff
enthought__chaco-634
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Dont use traitsui.api as tui https://github.com/enthought/chaco/blob/3de7780561fa29e79c887432d3ce408ea82d1614/chaco/plugin/plot_editor.py makes use of the odd `import traitsui.api as tui` alias which needs to be updated and removed. </issue> <code> [start of chaco/plugin/plot_editor.py] 1 from chaco.shell.scaly_plot import ScalyPlot 2 from enable.component_editor import ComponentEditor 3 from pyface.workbench.api import TraitsUIEditor 4 from traits.api import Any, Enum, HasTraits, Property, Str 5 from traitsui import api as tui 6 7 8 class PlotUI(HasTraits): 9 """Simple Traits UI proxy for a Chaco plot.""" 10 11 # The plot. 12 component = Any() 13 14 traits_view = tui.View( 15 tui.Item("component", editor=ComponentEditor(), show_label=False), 16 resizable=True, 17 ) 18 19 20 class PlotEditor(TraitsUIEditor): 21 """A Workbench Editor showing a Chaco plot for the shell interface.""" 22 23 bgcolor = Str("white") 24 image_default_origin = Enum( 25 "bottom left", "top left", "bottom right", "top right" 26 ) 27 28 # The plot. 29 component = Property(Any) 30 container = Property(Any) 31 32 # The PlotData. 33 data = Any() 34 35 # The PlotSession of which we are a part. We need to know this in order 36 # to notify it of our being closed, etc. 37 session = Any() 38 39 def __init__( 40 self, 41 is_image=False, 42 bgcolor="white", 43 image_default_origin="top left", 44 *args, 45 **kw 46 ): 47 48 super(TraitsUIEditor, self).__init__(**kw) 49 50 # Some defaults which should be overridden by preferences. 51 self.bgcolor = bgcolor 52 self.image_default_origin = image_default_origin 53 54 # Create an empty top-level container 55 if is_image: 56 top_container = self._create_top_img_container() 57 else: 58 top_container = self._create_top_container() 59 60 self.obj = PlotUI(component=top_container) 61 62 #### PlotWindow interface ################################################## 63 64 def get_container(self): 65 return self.obj.component 66 67 def set_container(self, container): 68 self.obj.component = container 69 70 def iconize(self, iconize): 71 """Iconizes the window if *iconize* is True. 72 73 Do nothing in this implementation. 74 """ 75 76 def maximize(self, maximize): 77 """If *maximize* is True, maximizes the window size; restores if False. 78 79 Do nothing in this implementation. 80 """ 81 82 def set_size(self, width, height): 83 pass 84 85 def set_title(self, title): 86 self.name = title 87 88 def raise_window(self): 89 self.window.activate_editor(self) 90 91 #### Editor interface ###################################################### 92 93 def destroy_control(self): 94 """Destroy the toolkit-specific control that represents the part.""" 95 self._on_window_close() 96 super(TraitsUIEditor, self).destroy_control() 97 98 #### Private interface ##################################################### 99 100 def _get_container(self): 101 return self.obj.component 102 103 def _set_container(self, value): 104 self.obj.component = value 105 106 def _get_component(self): 107 return self.obj.component 108 109 def _set_component(self, value): 110 self.obj.component = value 111 112 def _create_top_container(self): 113 plot = ScalyPlot( 114 padding=50, 115 fill_padding=True, 116 bgcolor=self.bgcolor, 117 use_backbuffer=True, 118 ) 119 return plot 120 121 def _create_top_img_container(self): 122 plot = ScalyPlot( 123 padding=50, 124 fill_padding=True, 125 bgcolor=self.bgcolor, 126 use_backbuffer=True, 127 default_origin=self.image_default_origin, 128 ) 129 return plot 130 131 def _on_window_close(self): 132 if self.session: 133 try: 134 ndx = self.session.windows.index(self) 135 self.session.del_window(ndx) 136 except ValueError: 137 pass 138 [end of chaco/plugin/plot_editor.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chaco/plugin/plot_editor.py b/chaco/plugin/plot_editor.py --- a/chaco/plugin/plot_editor.py +++ b/chaco/plugin/plot_editor.py @@ -2,7 +2,7 @@ from enable.component_editor import ComponentEditor from pyface.workbench.api import TraitsUIEditor from traits.api import Any, Enum, HasTraits, Property, Str -from traitsui import api as tui +from traitsui.api import Item, View class PlotUI(HasTraits): @@ -11,8 +11,8 @@ # The plot. component = Any() - traits_view = tui.View( - tui.Item("component", editor=ComponentEditor(), show_label=False), + traits_view = View( + Item("component", editor=ComponentEditor(), show_label=False), resizable=True, )
{"golden_diff": "diff --git a/chaco/plugin/plot_editor.py b/chaco/plugin/plot_editor.py\n--- a/chaco/plugin/plot_editor.py\n+++ b/chaco/plugin/plot_editor.py\n@@ -2,7 +2,7 @@\n from enable.component_editor import ComponentEditor\n from pyface.workbench.api import TraitsUIEditor\n from traits.api import Any, Enum, HasTraits, Property, Str\n-from traitsui import api as tui\n+from traitsui.api import Item, View\n \n \n class PlotUI(HasTraits):\n@@ -11,8 +11,8 @@\n # The plot.\n component = Any()\n \n- traits_view = tui.View(\n- tui.Item(\"component\", editor=ComponentEditor(), show_label=False),\n+ traits_view = View(\n+ Item(\"component\", editor=ComponentEditor(), show_label=False),\n resizable=True,\n )\n", "issue": "Dont use traitsui.api as tui\nhttps://github.com/enthought/chaco/blob/3de7780561fa29e79c887432d3ce408ea82d1614/chaco/plugin/plot_editor.py makes use of the odd `import traitsui.api as tui` alias which needs to be updated and removed.\n", "before_files": [{"content": "from chaco.shell.scaly_plot import ScalyPlot\nfrom enable.component_editor import ComponentEditor\nfrom pyface.workbench.api import TraitsUIEditor\nfrom traits.api import Any, Enum, HasTraits, Property, Str\nfrom traitsui import api as tui\n\n\nclass PlotUI(HasTraits):\n \"\"\"Simple Traits UI proxy for a Chaco plot.\"\"\"\n\n # The plot.\n component = Any()\n\n traits_view = tui.View(\n tui.Item(\"component\", editor=ComponentEditor(), show_label=False),\n resizable=True,\n )\n\n\nclass PlotEditor(TraitsUIEditor):\n \"\"\"A Workbench Editor showing a Chaco plot for the shell interface.\"\"\"\n\n bgcolor = Str(\"white\")\n image_default_origin = Enum(\n \"bottom left\", \"top left\", \"bottom right\", \"top right\"\n )\n\n # The plot.\n component = Property(Any)\n container = Property(Any)\n\n # The PlotData.\n data = Any()\n\n # The PlotSession of which we are a part. We need to know this in order\n # to notify it of our being closed, etc.\n session = Any()\n\n def __init__(\n self,\n is_image=False,\n bgcolor=\"white\",\n image_default_origin=\"top left\",\n *args,\n **kw\n ):\n\n super(TraitsUIEditor, self).__init__(**kw)\n\n # Some defaults which should be overridden by preferences.\n self.bgcolor = bgcolor\n self.image_default_origin = image_default_origin\n\n # Create an empty top-level container\n if is_image:\n top_container = self._create_top_img_container()\n else:\n top_container = self._create_top_container()\n\n self.obj = PlotUI(component=top_container)\n\n #### PlotWindow interface ##################################################\n\n def get_container(self):\n return self.obj.component\n\n def set_container(self, container):\n self.obj.component = container\n\n def iconize(self, iconize):\n \"\"\"Iconizes the window if *iconize* is True.\n\n Do nothing in this implementation.\n \"\"\"\n\n def maximize(self, maximize):\n \"\"\"If *maximize* is True, maximizes the window size; restores if False.\n\n Do nothing in this implementation.\n \"\"\"\n\n def set_size(self, width, height):\n pass\n\n def set_title(self, title):\n self.name = title\n\n def raise_window(self):\n self.window.activate_editor(self)\n\n #### Editor interface ######################################################\n\n def destroy_control(self):\n \"\"\"Destroy the toolkit-specific control that represents the part.\"\"\"\n self._on_window_close()\n super(TraitsUIEditor, self).destroy_control()\n\n #### Private interface #####################################################\n\n def _get_container(self):\n return self.obj.component\n\n def _set_container(self, value):\n self.obj.component = value\n\n def _get_component(self):\n return self.obj.component\n\n def _set_component(self, value):\n self.obj.component = value\n\n def _create_top_container(self):\n plot = ScalyPlot(\n padding=50,\n fill_padding=True,\n bgcolor=self.bgcolor,\n use_backbuffer=True,\n )\n return plot\n\n def _create_top_img_container(self):\n plot = ScalyPlot(\n padding=50,\n fill_padding=True,\n bgcolor=self.bgcolor,\n use_backbuffer=True,\n default_origin=self.image_default_origin,\n )\n return plot\n\n def _on_window_close(self):\n if self.session:\n try:\n ndx = self.session.windows.index(self)\n self.session.del_window(ndx)\n except ValueError:\n pass\n", "path": "chaco/plugin/plot_editor.py"}]}
1,719
188
gh_patches_debug_35309
rasdani/github-patches
git_diff
nvaccess__nvda-9119
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Windows Store apps: use app title as product name instead of wwahost **Reported by nvdakor on 2014-07-07 13:10** Hi, Currently, when invoking appModule.productName for a Windows Store app, NVDA says "Windows operating system". Although this is fine for built-in apps such as Bing Weather and Windows Store, this may confuse users and developers when they are testing accessibility of Store apps (both existing ones and apps to be created in the future). As a way of providing actual product name for debugging purposes and for people to tell us which app they are having issues with, I propose using app title for productName. Before: 1. Go to Windows Store app such as Weather. 2. Once the app opens, press NVDA+F1 to show developer info. Look under appModule.productName. After: 1. Open any Windows Store app. 2. Press NVDA+F1 to open developer info. appModule.productName tells you the title of the app. Implementation: 1. In appModules.wwahost.py, override _get_productName to return the title (api.getForegroundObject().name). 2. wwahost.AppModule.productName will contain the actual title of the app. Thanks. **Blocked by #4569** Load specific app modules for apps hosted by wwahost.exe **Reported by jteh on 2014-10-23 07:35** wwahost.exe hosts many Windows Store apps (though it seems some do have their own executables). To allow for application specific customisation, we should identify the specific application in the app module name. I think we can use GetApplicationUserModelId (in kernel32) for this purpose. This includes a "!" character, so we'll need to check whether Python will cope with this and replace if not. **Blocked by #4360** **Blocking #4259** </issue> <code> [start of source/appModules/wwahost.py] 1 #A part of NonVisual Desktop Access (NVDA) 2 #This file is covered by the GNU General Public License. 3 #See the file COPYING for more details. 4 #Copyright (C) 2012 NV Access Limited 5 6 from comtypes import COMError 7 import IAccessibleHandler 8 from NVDAObjects.IAccessible.MSHTML import Body 9 import appModuleHandler 10 import controlTypes 11 import winUser 12 13 class AppModule(appModuleHandler.AppModule): 14 15 def event_NVDAObject_init(self,obj): 16 #The root document of HTML Metro Apps must be treeted as an application. 17 if isinstance(obj,Body) and obj.windowClassName=="Internet Explorer_Server": 18 try: 19 paccParent=obj.IAccessibleObject.accParent.accParent 20 identity=IAccessibleHandler.getIAccIdentity(paccParent,0) 21 except (COMError,AttributeError): 22 identity=None 23 if identity: 24 windowHandle=identity.get('windowHandle') 25 if windowHandle and winUser.getClassName(windowHandle)=="Web Platform Embedding": 26 obj.role=controlTypes.ROLE_APPLICATION 27 [end of source/appModules/wwahost.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/source/appModules/wwahost.py b/source/appModules/wwahost.py --- a/source/appModules/wwahost.py +++ b/source/appModules/wwahost.py @@ -1,26 +1,40 @@ -#A part of NonVisual Desktop Access (NVDA) -#This file is covered by the GNU General Public License. -#See the file COPYING for more details. -#Copyright (C) 2012 NV Access Limited +# A part of NonVisual Desktop Access (NVDA) +# This file is covered by the GNU General Public License. +# See the file COPYING for more details. +# Copyright (C) 2012-2020 NV Access Limited, Joseph Lee -from comtypes import COMError -import IAccessibleHandler -from NVDAObjects.IAccessible.MSHTML import Body +"""App module host for Windows 8.x and 10 apps hosted by wwahost.exe. +In Windows 8, apps written in Javascript are executed inside WWAHost, including some WinRT apps. +In Windows 10, progressive web apps (PWA) and friends are hosted inside this process. +App modules wishing to support apps hosted inside this process must subclass the AppModule class. +""" + +import ctypes import appModuleHandler -import controlTypes -import winUser +import winKernel + + +def getAppNameFromHost(processId): + # Some apps that come with Windows 8 and 8.1 are hosted by wwahost.exe. + # App modules for these are named after the hosted app name. + processHandle = winKernel.openProcess( + winKernel.SYNCHRONIZE | winKernel.PROCESS_QUERY_INFORMATION, False, processId + ) + length = ctypes.c_uint() + winKernel.kernel32.GetApplicationUserModelId(processHandle, ctypes.byref(length), None) + appModel = ctypes.create_unicode_buffer(length.value) + winKernel.kernel32.GetApplicationUserModelId(processHandle, ctypes.byref(length), appModel) + winKernel.closeHandle(processHandle) + # Sometimes app model might be empty, so raise errors and fall back to wwahost. + if not appModel.value: + raise LookupError + # App model is shown as familyName!appName, + # and importing files with the exclamation point in the middle of the name isn't supported. + # Therefore return only the app name portion. + # Convert this into lowercase to make the file name consistent with other NVDA app modules. + return appModel.value.split("!")[-1].lower() class AppModule(appModuleHandler.AppModule): - def event_NVDAObject_init(self,obj): - #The root document of HTML Metro Apps must be treeted as an application. - if isinstance(obj,Body) and obj.windowClassName=="Internet Explorer_Server": - try: - paccParent=obj.IAccessibleObject.accParent.accParent - identity=IAccessibleHandler.getIAccIdentity(paccParent,0) - except (COMError,AttributeError): - identity=None - if identity: - windowHandle=identity.get('windowHandle') - if windowHandle and winUser.getClassName(windowHandle)=="Web Platform Embedding": - obj.role=controlTypes.ROLE_APPLICATION + # WWAHost app content is treated as part of an app, not a browse mode document. + disableBrowseModeByDefault = True
{"golden_diff": "diff --git a/source/appModules/wwahost.py b/source/appModules/wwahost.py\n--- a/source/appModules/wwahost.py\n+++ b/source/appModules/wwahost.py\n@@ -1,26 +1,40 @@\n-#A part of NonVisual Desktop Access (NVDA)\r\n-#This file is covered by the GNU General Public License.\r\n-#See the file COPYING for more details.\r\n-#Copyright (C) 2012 NV Access Limited\r\n+# A part of NonVisual Desktop Access (NVDA)\r\n+# This file is covered by the GNU General Public License.\r\n+# See the file COPYING for more details.\r\n+# Copyright (C) 2012-2020 NV Access Limited, Joseph Lee\r\n \r\n-from comtypes import COMError\r\n-import IAccessibleHandler\r\n-from NVDAObjects.IAccessible.MSHTML import Body\r\n+\"\"\"App module host for Windows 8.x and 10 apps hosted by wwahost.exe.\r\n+In Windows 8, apps written in Javascript are executed inside WWAHost, including some WinRT apps.\r\n+In Windows 10, progressive web apps (PWA) and friends are hosted inside this process.\r\n+App modules wishing to support apps hosted inside this process must subclass the AppModule class.\r\n+\"\"\"\r\n+\r\n+import ctypes\r\n import appModuleHandler\r\n-import controlTypes\r\n-import winUser\r\n+import winKernel\r\n+\r\n+\r\n+def getAppNameFromHost(processId):\r\n+\t# Some apps that come with Windows 8 and 8.1 are hosted by wwahost.exe.\r\n+\t# App modules for these are named after the hosted app name.\r\n+\tprocessHandle = winKernel.openProcess(\r\n+\t\twinKernel.SYNCHRONIZE | winKernel.PROCESS_QUERY_INFORMATION, False, processId\r\n+\t)\r\n+\tlength = ctypes.c_uint()\r\n+\twinKernel.kernel32.GetApplicationUserModelId(processHandle, ctypes.byref(length), None)\r\n+\tappModel = ctypes.create_unicode_buffer(length.value)\r\n+\twinKernel.kernel32.GetApplicationUserModelId(processHandle, ctypes.byref(length), appModel)\r\n+\twinKernel.closeHandle(processHandle)\r\n+\t# Sometimes app model might be empty, so raise errors and fall back to wwahost.\r\n+\tif not appModel.value:\r\n+\t\traise LookupError\r\n+\t# App model is shown as familyName!appName,\r\n+\t# and importing files with the exclamation point in the middle of the name isn't supported.\r\n+\t# Therefore return only the app name portion.\r\n+\t# Convert this into lowercase to make the file name consistent with other NVDA app modules.\r\n+\treturn appModel.value.split(\"!\")[-1].lower()\r\n \r\n class AppModule(appModuleHandler.AppModule):\r\n \r\n-\tdef event_NVDAObject_init(self,obj):\r\n-\t\t#The root document of HTML Metro Apps must be treeted as an application. \r\n-\t\tif isinstance(obj,Body) and obj.windowClassName==\"Internet Explorer_Server\":\r\n-\t\t\ttry:\r\n-\t\t\t\tpaccParent=obj.IAccessibleObject.accParent.accParent\r\n-\t\t\t\tidentity=IAccessibleHandler.getIAccIdentity(paccParent,0)\r\n-\t\t\texcept (COMError,AttributeError):\r\n-\t\t\t\tidentity=None\r\n-\t\t\tif identity:\r\n-\t\t\t\twindowHandle=identity.get('windowHandle')\r\n-\t\t\t\tif windowHandle and winUser.getClassName(windowHandle)==\"Web Platform Embedding\":\r\n-\t\t\t\t\tobj.role=controlTypes.ROLE_APPLICATION\r\n+\t# WWAHost app content is treated as part of an app, not a browse mode document.\r\n+\tdisableBrowseModeByDefault = True\n", "issue": "Windows Store apps: use app title as product name instead of wwahost\n**Reported by nvdakor on 2014-07-07 13:10**\nHi,\nCurrently, when invoking appModule.productName for a Windows Store app, NVDA says \"Windows operating system\". Although this is fine for built-in apps such as Bing Weather and Windows Store, this may confuse users and developers when they are testing accessibility of Store apps (both existing ones and apps to be created in the future). As a way of providing actual product name for debugging purposes and for people to tell us which app they are having issues with, I propose using app title for productName.\nBefore:\n1. Go to Windows Store app such as Weather.\n2. Once the app opens, press NVDA+F1 to show developer info. Look under appModule.productName.\nAfter:\n1. Open any Windows Store app.\n2. Press NVDA+F1 to open developer info. appModule.productName tells you the title of the app.\nImplementation:\n1. In appModules.wwahost.py, override _get_productName to return the title (api.getForegroundObject().name).\n2. wwahost.AppModule.productName will contain the actual title of the app.\nThanks.\n\n**Blocked by #4569**\n\nLoad specific app modules for apps hosted by wwahost.exe\n**Reported by jteh on 2014-10-23 07:35**\nwwahost.exe hosts many Windows Store apps (though it seems some do have their own executables). To allow for application specific customisation, we should identify the specific application in the app module name.\n\nI think we can use GetApplicationUserModelId (in kernel32) for this purpose. This includes a \"!\" character, so we'll need to check whether Python will cope with this and replace if not.\n**Blocked by #4360**\n**Blocking #4259**\n\n", "before_files": [{"content": "#A part of NonVisual Desktop Access (NVDA)\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n#Copyright (C) 2012 NV Access Limited\r\n\r\nfrom comtypes import COMError\r\nimport IAccessibleHandler\r\nfrom NVDAObjects.IAccessible.MSHTML import Body\r\nimport appModuleHandler\r\nimport controlTypes\r\nimport winUser\r\n\r\nclass AppModule(appModuleHandler.AppModule):\r\n\r\n\tdef event_NVDAObject_init(self,obj):\r\n\t\t#The root document of HTML Metro Apps must be treeted as an application. \r\n\t\tif isinstance(obj,Body) and obj.windowClassName==\"Internet Explorer_Server\":\r\n\t\t\ttry:\r\n\t\t\t\tpaccParent=obj.IAccessibleObject.accParent.accParent\r\n\t\t\t\tidentity=IAccessibleHandler.getIAccIdentity(paccParent,0)\r\n\t\t\texcept (COMError,AttributeError):\r\n\t\t\t\tidentity=None\r\n\t\t\tif identity:\r\n\t\t\t\twindowHandle=identity.get('windowHandle')\r\n\t\t\t\tif windowHandle and winUser.getClassName(windowHandle)==\"Web Platform Embedding\":\r\n\t\t\t\t\tobj.role=controlTypes.ROLE_APPLICATION\r\n", "path": "source/appModules/wwahost.py"}]}
1,231
752
gh_patches_debug_34722
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-355
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Transform BytesCodec into a general codec. now the BytesCodec is for mnist dataset specified, and it should be modified to encode/decode any dataset using bytes format. </issue> <code> [start of record_codec/__init__.py] 1 from record_codec.tf_example_codec import TFExampleCodec 2 3 __all__=[TFExampleCodec] 4 [end of record_codec/__init__.py] [start of record_codec/bytes_codec.py] 1 import numpy as np 2 3 N = 28 4 5 6 # TODO: maybe use TF variant tensor to do more flexible encoding. 7 class BytesCodec(object): 8 def encode(self, data): 9 values = [] 10 for _, f_value in data: 11 values.append(f_value) 12 return np.concatenate(values, axis=None).tobytes() 13 14 def decode(self, record): 15 parsed = np.frombuffer(record, dtype="uint8") 16 assert len(parsed) == N * N + 1 17 label = parsed[-1] 18 parsed = np.resize(parsed[:-1], new_shape=(N, N)) 19 return {'image': parsed, 'label': label} 20 21 def show(self, data, label): 22 """Print the image and label on terminal for debugging""" 23 assert data.shape == (N, N) and data.dtype == "uint8" 24 assert label >= 0 and label <= 9 25 26 def grey(x): 27 return "\033[48;2;%d;%d;%dm" % (x, x, x) + " \033[0m" 28 29 for line in data: 30 s = "".join(grey(x) for x in line) 31 print(s) 32 print("label =", label) 33 [end of record_codec/bytes_codec.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/record_codec/__init__.py b/record_codec/__init__.py --- a/record_codec/__init__.py +++ b/record_codec/__init__.py @@ -1,3 +1,4 @@ +from record_codec.bytes_codec import BytesCodec from record_codec.tf_example_codec import TFExampleCodec -__all__=[TFExampleCodec] +__all__=[BytesCodec, TFExampleCodec] diff --git a/record_codec/bytes_codec.py b/record_codec/bytes_codec.py --- a/record_codec/bytes_codec.py +++ b/record_codec/bytes_codec.py @@ -1,32 +1,42 @@ import numpy as np -N = 28 - -# TODO: maybe use TF variant tensor to do more flexible encoding. class BytesCodec(object): + def __init__(self, feature_columns): + self._feature_columns = feature_columns + self._col_id = { + c.name: order for order, c in enumerate(feature_columns) + } + def encode(self, data): - values = [] - for _, f_value in data: - values.append(f_value) - return np.concatenate(values, axis=None).tobytes() + # Rearrange the data in order of the columns. + values = [None] * len(self._feature_columns) + for f_name, f_value in data: + col_id = self._col_id[f_name] + column = self._feature_columns[col_id] + if column.dtype != f_value.dtype or column.shape != f_value.shape: + raise ValueError( + "Input data doesn't match column %s definition: column: (%s, %s) data: (%s, %s)" % ( + f_name, column.dtype, column.shape, f_value.dtype, f_value.shape) + ) + values[col_id] = f_value.tobytes() + for id, value in enumerate(values): + if value is None: + raise ValueError( + "Missing value for column: %s", + self._col_id[id].name + ) + return b"".join(values) def decode(self, record): - parsed = np.frombuffer(record, dtype="uint8") - assert len(parsed) == N * N + 1 - label = parsed[-1] - parsed = np.resize(parsed[:-1], new_shape=(N, N)) - return {'image': parsed, 'label': label} - - def show(self, data, label): - """Print the image and label on terminal for debugging""" - assert data.shape == (N, N) and data.dtype == "uint8" - assert label >= 0 and label <= 9 - - def grey(x): - return "\033[48;2;%d;%d;%dm" % (x, x, x) + " \033[0m" - - for line in data: - s = "".join(grey(x) for x in line) - print(s) - print("label =", label) + offset = 0 + res = {} + for c in self._feature_columns: + count = np.prod(c.shape) + res[c.name] = np.frombuffer( + record, + dtype=c.dtype.as_numpy_dtype, + count=count, + offset=offset).reshape(c.shape) + offset += count * c.dtype.size + return res
{"golden_diff": "diff --git a/record_codec/__init__.py b/record_codec/__init__.py\n--- a/record_codec/__init__.py\n+++ b/record_codec/__init__.py\n@@ -1,3 +1,4 @@\n+from record_codec.bytes_codec import BytesCodec\n from record_codec.tf_example_codec import TFExampleCodec\n \n-__all__=[TFExampleCodec]\n+__all__=[BytesCodec, TFExampleCodec]\ndiff --git a/record_codec/bytes_codec.py b/record_codec/bytes_codec.py\n--- a/record_codec/bytes_codec.py\n+++ b/record_codec/bytes_codec.py\n@@ -1,32 +1,42 @@\n import numpy as np\n \n-N = 28\n \n-\n-# TODO: maybe use TF variant tensor to do more flexible encoding.\n class BytesCodec(object):\n+ def __init__(self, feature_columns):\n+ self._feature_columns = feature_columns\n+ self._col_id = {\n+ c.name: order for order, c in enumerate(feature_columns)\n+ }\n+\n def encode(self, data):\n- values = [] \n- for _, f_value in data:\n- values.append(f_value)\n- return np.concatenate(values, axis=None).tobytes()\n+ # Rearrange the data in order of the columns.\n+ values = [None] * len(self._feature_columns)\n+ for f_name, f_value in data:\n+ col_id = self._col_id[f_name]\n+ column = self._feature_columns[col_id]\n+ if column.dtype != f_value.dtype or column.shape != f_value.shape:\n+ raise ValueError(\n+ \"Input data doesn't match column %s definition: column: (%s, %s) data: (%s, %s)\" % (\n+ f_name, column.dtype, column.shape, f_value.dtype, f_value.shape)\n+ )\n+ values[col_id] = f_value.tobytes()\n+ for id, value in enumerate(values):\n+ if value is None:\n+ raise ValueError(\n+ \"Missing value for column: %s\",\n+ self._col_id[id].name\n+ )\n+ return b\"\".join(values)\n \n def decode(self, record):\n- parsed = np.frombuffer(record, dtype=\"uint8\")\n- assert len(parsed) == N * N + 1\n- label = parsed[-1]\n- parsed = np.resize(parsed[:-1], new_shape=(N, N))\n- return {'image': parsed, 'label': label}\n-\n- def show(self, data, label):\n- \"\"\"Print the image and label on terminal for debugging\"\"\"\n- assert data.shape == (N, N) and data.dtype == \"uint8\"\n- assert label >= 0 and label <= 9\n-\n- def grey(x):\n- return \"\\033[48;2;%d;%d;%dm\" % (x, x, x) + \" \\033[0m\"\n-\n- for line in data:\n- s = \"\".join(grey(x) for x in line)\n- print(s)\n- print(\"label =\", label)\n+ offset = 0\n+ res = {}\n+ for c in self._feature_columns:\n+ count = np.prod(c.shape)\n+ res[c.name] = np.frombuffer(\n+ record,\n+ dtype=c.dtype.as_numpy_dtype,\n+ count=count,\n+ offset=offset).reshape(c.shape)\n+ offset += count * c.dtype.size\n+ return res\n", "issue": "Transform BytesCodec into a general codec.\nnow the BytesCodec is for mnist dataset specified, and it should be modified to encode/decode any dataset using bytes format.\n", "before_files": [{"content": "from record_codec.tf_example_codec import TFExampleCodec\n\n__all__=[TFExampleCodec]\n", "path": "record_codec/__init__.py"}, {"content": "import numpy as np\n\nN = 28\n\n\n# TODO: maybe use TF variant tensor to do more flexible encoding.\nclass BytesCodec(object):\n def encode(self, data):\n values = [] \n for _, f_value in data:\n values.append(f_value)\n return np.concatenate(values, axis=None).tobytes()\n\n def decode(self, record):\n parsed = np.frombuffer(record, dtype=\"uint8\")\n assert len(parsed) == N * N + 1\n label = parsed[-1]\n parsed = np.resize(parsed[:-1], new_shape=(N, N))\n return {'image': parsed, 'label': label}\n\n def show(self, data, label):\n \"\"\"Print the image and label on terminal for debugging\"\"\"\n assert data.shape == (N, N) and data.dtype == \"uint8\"\n assert label >= 0 and label <= 9\n\n def grey(x):\n return \"\\033[48;2;%d;%d;%dm\" % (x, x, x) + \" \\033[0m\"\n\n for line in data:\n s = \"\".join(grey(x) for x in line)\n print(s)\n print(\"label =\", label)\n", "path": "record_codec/bytes_codec.py"}]}
937
765
gh_patches_debug_13513
rasdani/github-patches
git_diff
easybuilders__easybuild-easyblocks-2109
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> EB_PYTHON startup behaviour The documentation at https://easybuild.readthedocs.io/en/latest/Python-2-3-compatibility.html?#controlling-which-python-command-easybuild-will-use-via-eb-python suggests that one can control which version of Python EasyBuild will use by setting EB_PYTHON. Since https://github.com/easybuilders/easybuild-easyblocks/commit/40b76bef5a1f4f149e0c13cce913c051b54da5a3 running `module load EasyBuild` will now overwrite any existing EB_PYTHON environment variable. While it is still possible to configure EB_PYTHON after running `module load EasyBuild`, it is no longer possible to configure EB_PYTHON first (e.g. in one's shell startup), since it is overwritten when EasyBuild loads. If this is the desired behaviour then it should be documented. If this is not the desired behaviour, then it is a bug. </issue> <code> [start of easybuild/easyblocks/e/easybuildmeta.py] 1 # # 2 # Copyright 2013-2020 Ghent University 3 # 4 # This file is part of EasyBuild, 5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), 6 # with support of Ghent University (http://ugent.be/hpc), 7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), 8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en) 9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). 10 # 11 # https://github.com/easybuilders/easybuild 12 # 13 # EasyBuild is free software: you can redistribute it and/or modify 14 # it under the terms of the GNU General Public License as published by 15 # the Free Software Foundation v2. 16 # 17 # EasyBuild is distributed in the hope that it will be useful, 18 # but WITHOUT ANY WARRANTY; without even the implied warranty of 19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 # GNU General Public License for more details. 21 # 22 # You should have received a copy of the GNU General Public License 23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. 24 # # 25 """ 26 EasyBuild support for installing EasyBuild, implemented as an easyblock 27 28 @author: Kenneth Hoste (UGent) 29 """ 30 import copy 31 import os 32 import re 33 from distutils.version import LooseVersion 34 35 from easybuild.easyblocks.generic.pythonpackage import PythonPackage 36 from easybuild.tools.build_log import EasyBuildError 37 from easybuild.tools.filetools import read_file 38 from easybuild.tools.modules import get_software_root_env_var_name 39 from easybuild.tools.py2vs3 import OrderedDict 40 from easybuild.tools.utilities import flatten 41 42 43 # note: we can't use EB_EasyBuild as easyblock name, as that would require an easyblock named 'easybuild.py', 44 # which would screw up namespacing and create all kinds of problems (e.g. easyblocks not being found anymore) 45 class EB_EasyBuildMeta(PythonPackage): 46 """Support for install EasyBuild.""" 47 48 def __init__(self, *args, **kwargs): 49 """Initialize custom class variables.""" 50 super(EB_EasyBuildMeta, self).__init__(*args, **kwargs) 51 self.real_initial_environ = None 52 53 self.easybuild_pkgs = ['easybuild-framework', 'easybuild-easyblocks', 'easybuild-easyconfigs'] 54 if LooseVersion(self.version) >= LooseVersion('2.0') and LooseVersion(self.version) <= LooseVersion('3.999'): 55 # deliberately include vsc-install & vsc-base twice; 56 # first time to ensure the specified vsc-install/vsc-base package is available when framework gets installed 57 self.easybuild_pkgs.insert(0, 'vsc-base') 58 self.easybuild_pkgs.insert(0, 'vsc-install') 59 # second time as last package to be installed, to ensure that the vsc-base version listed 60 # in easy-install.pth is the one specified; 61 # when installing the easybuild-* packages, the vsc-base version in easy-install.pth may be 'bumped' 62 # if a newer vsc-base version is found somewhere (e.g. provided by the OS) 63 self.easybuild_pkgs.extend(['vsc-base', 'vsc-install']) 64 # consider setuptools first, in case it is listed as a sources 65 self.easybuild_pkgs.insert(0, 'setuptools') 66 67 def check_readiness_step(self): 68 """Make sure EasyBuild can be installed with a loaded EasyBuild module.""" 69 env_var_name = get_software_root_env_var_name(self.name) 70 if env_var_name in os.environ: 71 os.environ.pop(env_var_name) 72 self.log.debug("$%s is unset so EasyBuild can be installed with a loaded EasyBuild module" % env_var_name) 73 else: 74 self.log.debug("Not unsetting $%s since it's not set" % env_var_name) 75 76 super(EB_EasyBuildMeta, self).check_readiness_step() 77 78 def build_step(self): 79 """No building for EasyBuild packages.""" 80 pass 81 82 def install_step(self): 83 """Install EasyBuild packages one by one.""" 84 try: 85 subdirs = os.listdir(self.builddir) 86 for pkg in self.easybuild_pkgs: 87 seldirs = [x for x in subdirs if x.startswith(pkg)] 88 if len(seldirs) != 1: 89 # setuptools is optional since it may be available in the OS; 90 # vsc-install and vsc-base sources are optional, 91 # they can be pulled in from PyPi when installing easybuild-framework too 92 if pkg not in ['setuptools', 'vsc-base', 'vsc-install']: 93 raise EasyBuildError("Failed to find required EasyBuild package %s (subdirs: %s, seldirs: %s)", 94 pkg, subdirs, seldirs) 95 96 else: 97 self.log.info("Installing package %s", pkg) 98 os.chdir(os.path.join(self.builddir, seldirs[0])) 99 super(EB_EasyBuildMeta, self).install_step() 100 101 except OSError as err: 102 raise EasyBuildError("Failed to install EasyBuild packages: %s", err) 103 104 def post_install_step(self): 105 """Remove setuptools.pth file that hard includes a system-wide (site-packages) path, if it is there.""" 106 107 setuptools_pth = os.path.join(self.installdir, self.pylibdir, 'setuptools.pth') 108 if os.path.exists(setuptools_pth): 109 setuptools_pth_txt = read_file(setuptools_pth) 110 # any line that starts with '/' is a sign of trouble 111 sys_path_regex = re.compile('^/', re.M) 112 if sys_path_regex.search(setuptools_pth_txt): 113 self.log.warning("Found %s, and includes one or more absolute system paths. Removing it.", 114 setuptools_pth) 115 try: 116 os.remove(setuptools_pth) 117 except OSError as err: 118 raise EasyBuildError("Failed to remove %s: %s", setuptools_pth, err) 119 120 def sanity_check_step(self): 121 """Custom sanity check for EasyBuild.""" 122 123 # check whether easy-install.pth contains correct entries 124 easy_install_pth = os.path.join(self.installdir, self.pylibdir, 'easy-install.pth') 125 if os.path.exists(easy_install_pth): 126 easy_install_pth_txt = read_file(easy_install_pth) 127 128 ignore_pkgs = ['setuptools', 'vsc-install'] 129 if LooseVersion(self.version) > LooseVersion('3.999'): 130 ignore_pkgs.append('vsc-base') 131 132 for pkg in [p for p in self.easybuild_pkgs if p not in ignore_pkgs]: 133 if pkg == 'vsc-base': 134 # don't include strict version check for vsc-base 135 pkg_regex = re.compile(r"^\./%s" % pkg.replace('-', '_'), re.M) 136 else: 137 major_minor_version = '.'.join(self.version.split('.')[:2]) 138 pkg_regex = re.compile(r"^\./%s-%s" % (pkg.replace('-', '_'), major_minor_version), re.M) 139 140 if not pkg_regex.search(easy_install_pth_txt): 141 raise EasyBuildError("Failed to find pattern '%s' in %s: %s", 142 pkg_regex.pattern, easy_install_pth, easy_install_pth_txt) 143 144 # list of dirs to check, by package 145 # boolean indicates whether dir is expected to reside in Python lib/pythonX/site-packages dir 146 subdirs_by_pkg = { 147 'easybuild-framework': [('easybuild/framework', True), ('easybuild/tools', True)], 148 'easybuild-easyblocks': [('easybuild/easyblocks', True)], 149 'easybuild-easyconfigs': [('easybuild/easyconfigs', False)], 150 } 151 if LooseVersion(self.version) >= LooseVersion('2.0') and LooseVersion(self.version) < LooseVersion('3.999'): 152 subdirs_by_pkg.update({ 153 'vsc-base': [('vsc/utils', True)], 154 }) 155 156 # final list of directories to check, by setup tool 157 # order matters, e.g. setuptools before distutils 158 eb_dirs = OrderedDict() 159 eb_dirs['setuptools'] = [] 160 eb_dirs['distutils.core'] = flatten([x for x in subdirs_by_pkg.values()]) 161 162 # determine setup tool (setuptools or distutils) 163 setup_tool = None 164 for tool in eb_dirs.keys(): 165 self.log.debug("Trying %s.." % tool) 166 try: 167 exec("from %s import setup" % tool) 168 setup_tool = tool 169 break 170 except ImportError: 171 pass 172 self.log.debug('setup_tool: %s' % setup_tool) 173 174 # for a setuptools installation, we need to figure out the egg dirs, 175 # since we don't know the individual package versions 176 if setup_tool == 'setuptools': 177 try: 178 installed_dirs = os.listdir(os.path.join(self.installdir, self.pylibdir)) 179 for (pkg, subdirs) in subdirs_by_pkg.items(): 180 sel_dirs = [x for x in installed_dirs if x.startswith(pkg.replace('-', '_'))] 181 if not len(sel_dirs) == 1: 182 raise EasyBuildError("Failed to isolate installed egg dir for %s", pkg) 183 184 for (subdir, _) in subdirs: 185 # eggs always go in Python lib/pythonX/site-packages dir with setuptools 186 eb_dirs['setuptools'].append((os.path.join(sel_dirs[0], subdir), True)) 187 except OSError as err: 188 raise EasyBuildError("Failed to determine sanity check dir paths: %s", err) 189 190 # set of sanity check paths to check for EasyBuild 191 custom_paths = { 192 'files': ['bin/eb'], 193 'dirs': [self.pylibdir] + [[x, os.path.join(self.pylibdir, x)][y] for (x, y) in eb_dirs[setup_tool]], 194 } 195 196 # make sure we don't trip over deprecated behavior in old EasyBuild versions 197 eb_cmd = 'eb' 198 if LooseVersion(self.version) <= LooseVersion('1.16.0'): 199 eb_cmd = 'EASYBUILD_DEPRECATED=1.0 eb' 200 201 # set of sanity check commands to run for EasyBuild 202 custom_commands = [ 203 # this may spit out a wrong version, but that should be safe to ignore 204 # occurs when the EasyBuild being used is newer than the EasyBuild being installed 205 (eb_cmd, '--version'), 206 (eb_cmd, '-a'), 207 (eb_cmd, '-e ConfigureMake -a'), 208 ] 209 210 # (temporary) cleanse copy of initial environment to avoid conflict with (potentially) loaded EasyBuild module 211 self.real_initial_environ = copy.deepcopy(self.initial_environ) 212 for env_var in ['_LMFILES_', 'LOADEDMODULES']: 213 if env_var in self.initial_environ: 214 self.initial_environ.pop(env_var) 215 os.environ.pop(env_var) 216 self.log.debug("Unset $%s in current env and copy of original env to make sanity check work" % env_var) 217 218 super(EB_EasyBuildMeta, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands) 219 220 def make_module_extra(self): 221 """ 222 Set $EB_PYTHON to ensure that this EasyBuild installation uses the same Python executable it was installed with. 223 """ 224 txt = super(EB_EasyBuildMeta, self).make_module_extra() 225 txt += self.module_generator.set_environment('EB_PYTHON', self.python_cmd) 226 return txt 227 228 def make_module_step(self, fake=False): 229 """Create module file, before copy of original environment that was tampered with is restored.""" 230 modpath = super(EB_EasyBuildMeta, self).make_module_step(fake=fake) 231 232 if not fake: 233 # restore copy of original environment 234 self.initial_environ = copy.deepcopy(self.real_initial_environ) 235 self.log.debug("Restored copy of original environment") 236 237 return modpath 238 [end of easybuild/easyblocks/e/easybuildmeta.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/easybuild/easyblocks/e/easybuildmeta.py b/easybuild/easyblocks/e/easybuildmeta.py --- a/easybuild/easyblocks/e/easybuildmeta.py +++ b/easybuild/easyblocks/e/easybuildmeta.py @@ -219,10 +219,11 @@ def make_module_extra(self): """ - Set $EB_PYTHON to ensure that this EasyBuild installation uses the same Python executable it was installed with. + Set $EB_INSTALLPYTHON to ensure that this EasyBuild installation uses the same Python executable it was + installed with (which can still be overridden by the user with $EB_PYTHON). """ txt = super(EB_EasyBuildMeta, self).make_module_extra() - txt += self.module_generator.set_environment('EB_PYTHON', self.python_cmd) + txt += self.module_generator.set_environment('EB_INSTALLPYTHON', self.python_cmd) return txt def make_module_step(self, fake=False):
{"golden_diff": "diff --git a/easybuild/easyblocks/e/easybuildmeta.py b/easybuild/easyblocks/e/easybuildmeta.py\n--- a/easybuild/easyblocks/e/easybuildmeta.py\n+++ b/easybuild/easyblocks/e/easybuildmeta.py\n@@ -219,10 +219,11 @@\n \n def make_module_extra(self):\n \"\"\"\n- Set $EB_PYTHON to ensure that this EasyBuild installation uses the same Python executable it was installed with.\n+ Set $EB_INSTALLPYTHON to ensure that this EasyBuild installation uses the same Python executable it was\n+ installed with (which can still be overridden by the user with $EB_PYTHON).\n \"\"\"\n txt = super(EB_EasyBuildMeta, self).make_module_extra()\n- txt += self.module_generator.set_environment('EB_PYTHON', self.python_cmd)\n+ txt += self.module_generator.set_environment('EB_INSTALLPYTHON', self.python_cmd)\n return txt\n \n def make_module_step(self, fake=False):\n", "issue": "EB_PYTHON startup behaviour\nThe documentation at https://easybuild.readthedocs.io/en/latest/Python-2-3-compatibility.html?#controlling-which-python-command-easybuild-will-use-via-eb-python suggests that one can control which version of Python EasyBuild will use by setting EB_PYTHON.\r\n\r\nSince https://github.com/easybuilders/easybuild-easyblocks/commit/40b76bef5a1f4f149e0c13cce913c051b54da5a3 running `module load EasyBuild` will now overwrite any existing EB_PYTHON environment variable.\r\n\r\nWhile it is still possible to configure EB_PYTHON after running `module load EasyBuild`, it is no longer possible to configure EB_PYTHON first (e.g. in one's shell startup), since it is overwritten when EasyBuild loads.\r\n\r\nIf this is the desired behaviour then it should be documented. If this is not the desired behaviour, then it is a bug.\n", "before_files": [{"content": "# #\n# Copyright 2013-2020 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n# #\n\"\"\"\nEasyBuild support for installing EasyBuild, implemented as an easyblock\n\n@author: Kenneth Hoste (UGent)\n\"\"\"\nimport copy\nimport os\nimport re\nfrom distutils.version import LooseVersion\n\nfrom easybuild.easyblocks.generic.pythonpackage import PythonPackage\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.filetools import read_file\nfrom easybuild.tools.modules import get_software_root_env_var_name\nfrom easybuild.tools.py2vs3 import OrderedDict\nfrom easybuild.tools.utilities import flatten\n\n\n# note: we can't use EB_EasyBuild as easyblock name, as that would require an easyblock named 'easybuild.py',\n# which would screw up namespacing and create all kinds of problems (e.g. easyblocks not being found anymore)\nclass EB_EasyBuildMeta(PythonPackage):\n \"\"\"Support for install EasyBuild.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize custom class variables.\"\"\"\n super(EB_EasyBuildMeta, self).__init__(*args, **kwargs)\n self.real_initial_environ = None\n\n self.easybuild_pkgs = ['easybuild-framework', 'easybuild-easyblocks', 'easybuild-easyconfigs']\n if LooseVersion(self.version) >= LooseVersion('2.0') and LooseVersion(self.version) <= LooseVersion('3.999'):\n # deliberately include vsc-install & vsc-base twice;\n # first time to ensure the specified vsc-install/vsc-base package is available when framework gets installed\n self.easybuild_pkgs.insert(0, 'vsc-base')\n self.easybuild_pkgs.insert(0, 'vsc-install')\n # second time as last package to be installed, to ensure that the vsc-base version listed\n # in easy-install.pth is the one specified;\n # when installing the easybuild-* packages, the vsc-base version in easy-install.pth may be 'bumped'\n # if a newer vsc-base version is found somewhere (e.g. provided by the OS)\n self.easybuild_pkgs.extend(['vsc-base', 'vsc-install'])\n # consider setuptools first, in case it is listed as a sources\n self.easybuild_pkgs.insert(0, 'setuptools')\n\n def check_readiness_step(self):\n \"\"\"Make sure EasyBuild can be installed with a loaded EasyBuild module.\"\"\"\n env_var_name = get_software_root_env_var_name(self.name)\n if env_var_name in os.environ:\n os.environ.pop(env_var_name)\n self.log.debug(\"$%s is unset so EasyBuild can be installed with a loaded EasyBuild module\" % env_var_name)\n else:\n self.log.debug(\"Not unsetting $%s since it's not set\" % env_var_name)\n\n super(EB_EasyBuildMeta, self).check_readiness_step()\n\n def build_step(self):\n \"\"\"No building for EasyBuild packages.\"\"\"\n pass\n\n def install_step(self):\n \"\"\"Install EasyBuild packages one by one.\"\"\"\n try:\n subdirs = os.listdir(self.builddir)\n for pkg in self.easybuild_pkgs:\n seldirs = [x for x in subdirs if x.startswith(pkg)]\n if len(seldirs) != 1:\n # setuptools is optional since it may be available in the OS;\n # vsc-install and vsc-base sources are optional,\n # they can be pulled in from PyPi when installing easybuild-framework too\n if pkg not in ['setuptools', 'vsc-base', 'vsc-install']:\n raise EasyBuildError(\"Failed to find required EasyBuild package %s (subdirs: %s, seldirs: %s)\",\n pkg, subdirs, seldirs)\n\n else:\n self.log.info(\"Installing package %s\", pkg)\n os.chdir(os.path.join(self.builddir, seldirs[0]))\n super(EB_EasyBuildMeta, self).install_step()\n\n except OSError as err:\n raise EasyBuildError(\"Failed to install EasyBuild packages: %s\", err)\n\n def post_install_step(self):\n \"\"\"Remove setuptools.pth file that hard includes a system-wide (site-packages) path, if it is there.\"\"\"\n\n setuptools_pth = os.path.join(self.installdir, self.pylibdir, 'setuptools.pth')\n if os.path.exists(setuptools_pth):\n setuptools_pth_txt = read_file(setuptools_pth)\n # any line that starts with '/' is a sign of trouble\n sys_path_regex = re.compile('^/', re.M)\n if sys_path_regex.search(setuptools_pth_txt):\n self.log.warning(\"Found %s, and includes one or more absolute system paths. Removing it.\",\n setuptools_pth)\n try:\n os.remove(setuptools_pth)\n except OSError as err:\n raise EasyBuildError(\"Failed to remove %s: %s\", setuptools_pth, err)\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for EasyBuild.\"\"\"\n\n # check whether easy-install.pth contains correct entries\n easy_install_pth = os.path.join(self.installdir, self.pylibdir, 'easy-install.pth')\n if os.path.exists(easy_install_pth):\n easy_install_pth_txt = read_file(easy_install_pth)\n\n ignore_pkgs = ['setuptools', 'vsc-install']\n if LooseVersion(self.version) > LooseVersion('3.999'):\n ignore_pkgs.append('vsc-base')\n\n for pkg in [p for p in self.easybuild_pkgs if p not in ignore_pkgs]:\n if pkg == 'vsc-base':\n # don't include strict version check for vsc-base\n pkg_regex = re.compile(r\"^\\./%s\" % pkg.replace('-', '_'), re.M)\n else:\n major_minor_version = '.'.join(self.version.split('.')[:2])\n pkg_regex = re.compile(r\"^\\./%s-%s\" % (pkg.replace('-', '_'), major_minor_version), re.M)\n\n if not pkg_regex.search(easy_install_pth_txt):\n raise EasyBuildError(\"Failed to find pattern '%s' in %s: %s\",\n pkg_regex.pattern, easy_install_pth, easy_install_pth_txt)\n\n # list of dirs to check, by package\n # boolean indicates whether dir is expected to reside in Python lib/pythonX/site-packages dir\n subdirs_by_pkg = {\n 'easybuild-framework': [('easybuild/framework', True), ('easybuild/tools', True)],\n 'easybuild-easyblocks': [('easybuild/easyblocks', True)],\n 'easybuild-easyconfigs': [('easybuild/easyconfigs', False)],\n }\n if LooseVersion(self.version) >= LooseVersion('2.0') and LooseVersion(self.version) < LooseVersion('3.999'):\n subdirs_by_pkg.update({\n 'vsc-base': [('vsc/utils', True)],\n })\n\n # final list of directories to check, by setup tool\n # order matters, e.g. setuptools before distutils\n eb_dirs = OrderedDict()\n eb_dirs['setuptools'] = []\n eb_dirs['distutils.core'] = flatten([x for x in subdirs_by_pkg.values()])\n\n # determine setup tool (setuptools or distutils)\n setup_tool = None\n for tool in eb_dirs.keys():\n self.log.debug(\"Trying %s..\" % tool)\n try:\n exec(\"from %s import setup\" % tool)\n setup_tool = tool\n break\n except ImportError:\n pass\n self.log.debug('setup_tool: %s' % setup_tool)\n\n # for a setuptools installation, we need to figure out the egg dirs,\n # since we don't know the individual package versions\n if setup_tool == 'setuptools':\n try:\n installed_dirs = os.listdir(os.path.join(self.installdir, self.pylibdir))\n for (pkg, subdirs) in subdirs_by_pkg.items():\n sel_dirs = [x for x in installed_dirs if x.startswith(pkg.replace('-', '_'))]\n if not len(sel_dirs) == 1:\n raise EasyBuildError(\"Failed to isolate installed egg dir for %s\", pkg)\n\n for (subdir, _) in subdirs:\n # eggs always go in Python lib/pythonX/site-packages dir with setuptools\n eb_dirs['setuptools'].append((os.path.join(sel_dirs[0], subdir), True))\n except OSError as err:\n raise EasyBuildError(\"Failed to determine sanity check dir paths: %s\", err)\n\n # set of sanity check paths to check for EasyBuild\n custom_paths = {\n 'files': ['bin/eb'],\n 'dirs': [self.pylibdir] + [[x, os.path.join(self.pylibdir, x)][y] for (x, y) in eb_dirs[setup_tool]],\n }\n\n # make sure we don't trip over deprecated behavior in old EasyBuild versions\n eb_cmd = 'eb'\n if LooseVersion(self.version) <= LooseVersion('1.16.0'):\n eb_cmd = 'EASYBUILD_DEPRECATED=1.0 eb'\n\n # set of sanity check commands to run for EasyBuild\n custom_commands = [\n # this may spit out a wrong version, but that should be safe to ignore\n # occurs when the EasyBuild being used is newer than the EasyBuild being installed\n (eb_cmd, '--version'),\n (eb_cmd, '-a'),\n (eb_cmd, '-e ConfigureMake -a'),\n ]\n\n # (temporary) cleanse copy of initial environment to avoid conflict with (potentially) loaded EasyBuild module\n self.real_initial_environ = copy.deepcopy(self.initial_environ)\n for env_var in ['_LMFILES_', 'LOADEDMODULES']:\n if env_var in self.initial_environ:\n self.initial_environ.pop(env_var)\n os.environ.pop(env_var)\n self.log.debug(\"Unset $%s in current env and copy of original env to make sanity check work\" % env_var)\n\n super(EB_EasyBuildMeta, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)\n\n def make_module_extra(self):\n \"\"\"\n Set $EB_PYTHON to ensure that this EasyBuild installation uses the same Python executable it was installed with.\n \"\"\"\n txt = super(EB_EasyBuildMeta, self).make_module_extra()\n txt += self.module_generator.set_environment('EB_PYTHON', self.python_cmd)\n return txt\n\n def make_module_step(self, fake=False):\n \"\"\"Create module file, before copy of original environment that was tampered with is restored.\"\"\"\n modpath = super(EB_EasyBuildMeta, self).make_module_step(fake=fake)\n\n if not fake:\n # restore copy of original environment\n self.initial_environ = copy.deepcopy(self.real_initial_environ)\n self.log.debug(\"Restored copy of original environment\")\n\n return modpath\n", "path": "easybuild/easyblocks/e/easybuildmeta.py"}]}
3,984
220
gh_patches_debug_38766
rasdani/github-patches
git_diff
PrefectHQ__prefect-1582
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add Parameter Specification to run CLI command ## Current behavior Currently the CLI command doesn't allow for the specification of parameters for a flow run ## Proposed behavior Couple options we could take here: Multiple --parameter NAME=VALUE options (e.g. each --parameter similar to how docker build does --build-arg) Single --parameters {name: value} that takes a dictionary Also we should have a file option where you can provide a json file </issue> <code> [start of src/prefect/cli/run.py] 1 import time 2 3 import click 4 from tabulate import tabulate 5 6 from prefect.client import Client 7 from prefect.utilities.graphql import EnumValue, with_args 8 9 10 @click.group(hidden=True) 11 def run(): 12 """ 13 Run Prefect flows. 14 15 \b 16 Usage: 17 $ prefect run [STORAGE/PLATFORM] 18 19 \b 20 Arguments: 21 cloud Run flows in Prefect Cloud 22 23 \b 24 Examples: 25 $ prefect run cloud --name Test-Flow --project My-Project 26 Flow Run ID: 2ba3rrfd-411c-4d99-bb2a-f64a6dea78f9 27 28 \b 29 $ prefect run cloud --name Test-Flow --project My-Project --watch 30 Flow Run ID: 2ba3rrfd-411c-4d99-bb2a-f64a6dea78f9 31 Scheduled -> Submitted -> Running -> Success 32 """ 33 pass 34 35 36 @run.command(hidden=True) 37 @click.option( 38 "--name", "-n", required=True, help="The name of a flow to run.", hidden=True 39 ) 40 @click.option( 41 "--project", 42 "-p", 43 required=True, 44 help="The project that contains the flow.", 45 hidden=True, 46 ) 47 @click.option("--version", "-v", type=int, help="A flow version to run.", hidden=True) 48 @click.option( 49 "--watch", 50 "-w", 51 is_flag=True, 52 help="Watch current state of the flow run.", 53 hidden=True, 54 ) 55 @click.option( 56 "--logs", "-l", is_flag=True, help="Live logs of the flow run.", hidden=True 57 ) 58 def cloud(name, project, version, watch, logs): 59 """ 60 Run a deployed flow in Prefect Cloud. 61 62 \b 63 Options: 64 --name, -n TEXT The name of a flow to run [required] 65 --project, -p TEXT The name of a project that contains the flow [required] 66 --version, -v INTEGER A flow version to run 67 --watch, -w Watch current state of the flow run, stream output to stdout 68 --logs, -l Get logs of the flow run, stream output to stdout 69 """ 70 71 if watch and logs: 72 click.secho( 73 "Streaming state and logs not currently supported together.", fg="red" 74 ) 75 return 76 77 query = { 78 "query": { 79 with_args( 80 "flow", 81 { 82 "where": { 83 "_and": { 84 "name": {"_eq": name}, 85 "version": {"_eq": version}, 86 "project": {"name": {"_eq": project}}, 87 } 88 }, 89 "order_by": { 90 "name": EnumValue("asc"), 91 "version": EnumValue("desc"), 92 }, 93 "distinct_on": EnumValue("name"), 94 }, 95 ): {"id": True} 96 } 97 } 98 99 client = Client() 100 result = client.graphql(query) 101 102 flow_data = result.data.flow 103 104 if flow_data: 105 flow_id = flow_data[0].id 106 else: 107 click.secho("{} not found".format(name), fg="red") 108 return 109 110 flow_run_id = client.create_flow_run(flow_id=flow_id) 111 click.echo("Flow Run ID: {}".format(flow_run_id)) 112 113 if watch: 114 current_states = [] 115 while True: 116 query = { 117 "query": { 118 with_args("flow_run_by_pk", {"id": flow_run_id}): { 119 with_args( 120 "states", 121 {"order_by": {EnumValue("timestamp"): EnumValue("asc")}}, 122 ): {"state": True, "timestamp": True} 123 } 124 } 125 } 126 127 result = client.graphql(query) 128 129 # Filter through retrieved states and output in order 130 for state_index in result.data.flow_run_by_pk.states: 131 state = state_index.state 132 if state not in current_states: 133 if state != "Success" and state != "Failed": 134 click.echo("{} -> ".format(state), nl=False) 135 else: 136 click.echo(state) 137 return 138 139 current_states.append(state) 140 141 time.sleep(3) 142 143 if logs: 144 all_logs = [] 145 146 log_query = { 147 with_args( 148 "logs", {"order_by": {EnumValue("timestamp"): EnumValue("asc")}} 149 ): {"timestamp": True, "message": True, "level": True}, 150 "start_time": True, 151 } 152 153 query = { 154 "query": { 155 with_args( 156 "flow_run", 157 { 158 "where": {"id": {"_eq": flow_run_id}}, 159 "order_by": {EnumValue("start_time"): EnumValue("desc")}, 160 }, 161 ): log_query 162 } 163 } 164 165 while True: 166 result = Client().graphql(query) 167 168 flow_run = result.data.flow_run 169 if not flow_run: 170 click.secho("{} not found".format(flow_run_id), fg="red") 171 return 172 173 new_run = flow_run[0] 174 logs = new_run.logs 175 output = [] 176 177 for i in logs: 178 if [i.timestamp, i.level, i.message] not in all_logs: 179 180 if not len(all_logs): 181 click.echo( 182 tabulate( 183 [[i.timestamp, i.level, i.message]], 184 headers=["TIMESTAMP", "LEVEL", "MESSAGE"], 185 tablefmt="plain", 186 numalign="left", 187 stralign="left", 188 ) 189 ) 190 all_logs.append([i.timestamp, i.level, i.message]) 191 continue 192 193 output.append([i.timestamp, i.level, i.message]) 194 all_logs.append([i.timestamp, i.level, i.message]) 195 196 if output: 197 click.echo( 198 tabulate(output, tablefmt="plain", numalign="left", stralign="left") 199 ) 200 201 # Check if state is either Success or Failed, exit if it is 202 pk_query = { 203 "query": { 204 with_args("flow_run_by_pk", {"id": flow_run_id}): {"state": True} 205 } 206 } 207 result = client.graphql(pk_query) 208 209 if ( 210 result.data.flow_run_by_pk.state == "Success" 211 or result.data.flow_run_by_pk.state == "Failed" 212 ): 213 return 214 215 time.sleep(3) 216 [end of src/prefect/cli/run.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/prefect/cli/run.py b/src/prefect/cli/run.py --- a/src/prefect/cli/run.py +++ b/src/prefect/cli/run.py @@ -1,3 +1,4 @@ +import json import time import click @@ -45,6 +46,16 @@ hidden=True, ) @click.option("--version", "-v", type=int, help="A flow version to run.", hidden=True) [email protected]( + "--parameters-file", + "-pf", + help="A parameters JSON file.", + hidden=True, + type=click.Path(exists=True), +) [email protected]( + "--parameters-string", "--ps", help="A parameters JSON string.", hidden=True +) @click.option( "--watch", "-w", @@ -55,17 +66,29 @@ @click.option( "--logs", "-l", is_flag=True, help="Live logs of the flow run.", hidden=True ) -def cloud(name, project, version, watch, logs): +def cloud(name, project, version, parameters_file, parameters_string, watch, logs): """ Run a deployed flow in Prefect Cloud. \b Options: - --name, -n TEXT The name of a flow to run [required] - --project, -p TEXT The name of a project that contains the flow [required] - --version, -v INTEGER A flow version to run - --watch, -w Watch current state of the flow run, stream output to stdout - --logs, -l Get logs of the flow run, stream output to stdout + --name, -n TEXT The name of a flow to run [required] + --project, -p TEXT The name of a project that contains the flow [required] + --version, -v INTEGER A flow version to run + --parameters-file, -pf FILE PATH A filepath of a JSON file containing parameters + --parameters-string, -ps TEXT A string of JSON parameters + --watch, -w Watch current state of the flow run, stream output to stdout + --logs, -l Get logs of the flow run, stream output to stdout + + \b + If both `--parameters-file` and `--parameters-string` are provided then the values passed + in through the string will override the values provided from the file. + + \b + e.g. + File contains: {"a": 1, "b": 2} + String: '{"a": 3}' + Parameters passed to the flow run: {"a": 3, "b": 2} """ if watch and logs: @@ -107,7 +130,20 @@ click.secho("{} not found".format(name), fg="red") return - flow_run_id = client.create_flow_run(flow_id=flow_id) + # Load parameters from file if provided + file_params = {} + if parameters_file: + with open(parameters_file) as params_file: + file_params = json.load(params_file) + + # Load parameters from string if provided + string_params = {} + if parameters_string: + string_params = json.loads(parameters_string) + + flow_run_id = client.create_flow_run( + flow_id=flow_id, parameters={**file_params, **string_params} + ) click.echo("Flow Run ID: {}".format(flow_run_id)) if watch:
{"golden_diff": "diff --git a/src/prefect/cli/run.py b/src/prefect/cli/run.py\n--- a/src/prefect/cli/run.py\n+++ b/src/prefect/cli/run.py\n@@ -1,3 +1,4 @@\n+import json\n import time\n \n import click\n@@ -45,6 +46,16 @@\n hidden=True,\n )\n @click.option(\"--version\", \"-v\", type=int, help=\"A flow version to run.\", hidden=True)\[email protected](\n+ \"--parameters-file\",\n+ \"-pf\",\n+ help=\"A parameters JSON file.\",\n+ hidden=True,\n+ type=click.Path(exists=True),\n+)\[email protected](\n+ \"--parameters-string\", \"--ps\", help=\"A parameters JSON string.\", hidden=True\n+)\n @click.option(\n \"--watch\",\n \"-w\",\n@@ -55,17 +66,29 @@\n @click.option(\n \"--logs\", \"-l\", is_flag=True, help=\"Live logs of the flow run.\", hidden=True\n )\n-def cloud(name, project, version, watch, logs):\n+def cloud(name, project, version, parameters_file, parameters_string, watch, logs):\n \"\"\"\n Run a deployed flow in Prefect Cloud.\n \n \\b\n Options:\n- --name, -n TEXT The name of a flow to run [required]\n- --project, -p TEXT The name of a project that contains the flow [required]\n- --version, -v INTEGER A flow version to run\n- --watch, -w Watch current state of the flow run, stream output to stdout\n- --logs, -l Get logs of the flow run, stream output to stdout\n+ --name, -n TEXT The name of a flow to run [required]\n+ --project, -p TEXT The name of a project that contains the flow [required]\n+ --version, -v INTEGER A flow version to run\n+ --parameters-file, -pf FILE PATH A filepath of a JSON file containing parameters\n+ --parameters-string, -ps TEXT A string of JSON parameters\n+ --watch, -w Watch current state of the flow run, stream output to stdout\n+ --logs, -l Get logs of the flow run, stream output to stdout\n+\n+ \\b\n+ If both `--parameters-file` and `--parameters-string` are provided then the values passed\n+ in through the string will override the values provided from the file.\n+\n+ \\b\n+ e.g.\n+ File contains: {\"a\": 1, \"b\": 2}\n+ String: '{\"a\": 3}'\n+ Parameters passed to the flow run: {\"a\": 3, \"b\": 2}\n \"\"\"\n \n if watch and logs:\n@@ -107,7 +130,20 @@\n click.secho(\"{} not found\".format(name), fg=\"red\")\n return\n \n- flow_run_id = client.create_flow_run(flow_id=flow_id)\n+ # Load parameters from file if provided\n+ file_params = {}\n+ if parameters_file:\n+ with open(parameters_file) as params_file:\n+ file_params = json.load(params_file)\n+\n+ # Load parameters from string if provided\n+ string_params = {}\n+ if parameters_string:\n+ string_params = json.loads(parameters_string)\n+\n+ flow_run_id = client.create_flow_run(\n+ flow_id=flow_id, parameters={**file_params, **string_params}\n+ )\n click.echo(\"Flow Run ID: {}\".format(flow_run_id))\n \n if watch:\n", "issue": "Add Parameter Specification to run CLI command\n## Current behavior\r\nCurrently the CLI command doesn't allow for the specification of parameters for a flow run\r\n\r\n\r\n\r\n## Proposed behavior\r\nCouple options we could take here:\r\n\r\nMultiple --parameter NAME=VALUE options (e.g. each --parameter similar to how docker build does --build-arg)\r\n\r\nSingle --parameters {name: value} that takes a dictionary\r\n\r\nAlso we should have a file option where you can provide a json file\n", "before_files": [{"content": "import time\n\nimport click\nfrom tabulate import tabulate\n\nfrom prefect.client import Client\nfrom prefect.utilities.graphql import EnumValue, with_args\n\n\[email protected](hidden=True)\ndef run():\n \"\"\"\n Run Prefect flows.\n\n \\b\n Usage:\n $ prefect run [STORAGE/PLATFORM]\n\n \\b\n Arguments:\n cloud Run flows in Prefect Cloud\n\n \\b\n Examples:\n $ prefect run cloud --name Test-Flow --project My-Project\n Flow Run ID: 2ba3rrfd-411c-4d99-bb2a-f64a6dea78f9\n\n \\b\n $ prefect run cloud --name Test-Flow --project My-Project --watch\n Flow Run ID: 2ba3rrfd-411c-4d99-bb2a-f64a6dea78f9\n Scheduled -> Submitted -> Running -> Success\n \"\"\"\n pass\n\n\[email protected](hidden=True)\[email protected](\n \"--name\", \"-n\", required=True, help=\"The name of a flow to run.\", hidden=True\n)\[email protected](\n \"--project\",\n \"-p\",\n required=True,\n help=\"The project that contains the flow.\",\n hidden=True,\n)\[email protected](\"--version\", \"-v\", type=int, help=\"A flow version to run.\", hidden=True)\[email protected](\n \"--watch\",\n \"-w\",\n is_flag=True,\n help=\"Watch current state of the flow run.\",\n hidden=True,\n)\[email protected](\n \"--logs\", \"-l\", is_flag=True, help=\"Live logs of the flow run.\", hidden=True\n)\ndef cloud(name, project, version, watch, logs):\n \"\"\"\n Run a deployed flow in Prefect Cloud.\n\n \\b\n Options:\n --name, -n TEXT The name of a flow to run [required]\n --project, -p TEXT The name of a project that contains the flow [required]\n --version, -v INTEGER A flow version to run\n --watch, -w Watch current state of the flow run, stream output to stdout\n --logs, -l Get logs of the flow run, stream output to stdout\n \"\"\"\n\n if watch and logs:\n click.secho(\n \"Streaming state and logs not currently supported together.\", fg=\"red\"\n )\n return\n\n query = {\n \"query\": {\n with_args(\n \"flow\",\n {\n \"where\": {\n \"_and\": {\n \"name\": {\"_eq\": name},\n \"version\": {\"_eq\": version},\n \"project\": {\"name\": {\"_eq\": project}},\n }\n },\n \"order_by\": {\n \"name\": EnumValue(\"asc\"),\n \"version\": EnumValue(\"desc\"),\n },\n \"distinct_on\": EnumValue(\"name\"),\n },\n ): {\"id\": True}\n }\n }\n\n client = Client()\n result = client.graphql(query)\n\n flow_data = result.data.flow\n\n if flow_data:\n flow_id = flow_data[0].id\n else:\n click.secho(\"{} not found\".format(name), fg=\"red\")\n return\n\n flow_run_id = client.create_flow_run(flow_id=flow_id)\n click.echo(\"Flow Run ID: {}\".format(flow_run_id))\n\n if watch:\n current_states = []\n while True:\n query = {\n \"query\": {\n with_args(\"flow_run_by_pk\", {\"id\": flow_run_id}): {\n with_args(\n \"states\",\n {\"order_by\": {EnumValue(\"timestamp\"): EnumValue(\"asc\")}},\n ): {\"state\": True, \"timestamp\": True}\n }\n }\n }\n\n result = client.graphql(query)\n\n # Filter through retrieved states and output in order\n for state_index in result.data.flow_run_by_pk.states:\n state = state_index.state\n if state not in current_states:\n if state != \"Success\" and state != \"Failed\":\n click.echo(\"{} -> \".format(state), nl=False)\n else:\n click.echo(state)\n return\n\n current_states.append(state)\n\n time.sleep(3)\n\n if logs:\n all_logs = []\n\n log_query = {\n with_args(\n \"logs\", {\"order_by\": {EnumValue(\"timestamp\"): EnumValue(\"asc\")}}\n ): {\"timestamp\": True, \"message\": True, \"level\": True},\n \"start_time\": True,\n }\n\n query = {\n \"query\": {\n with_args(\n \"flow_run\",\n {\n \"where\": {\"id\": {\"_eq\": flow_run_id}},\n \"order_by\": {EnumValue(\"start_time\"): EnumValue(\"desc\")},\n },\n ): log_query\n }\n }\n\n while True:\n result = Client().graphql(query)\n\n flow_run = result.data.flow_run\n if not flow_run:\n click.secho(\"{} not found\".format(flow_run_id), fg=\"red\")\n return\n\n new_run = flow_run[0]\n logs = new_run.logs\n output = []\n\n for i in logs:\n if [i.timestamp, i.level, i.message] not in all_logs:\n\n if not len(all_logs):\n click.echo(\n tabulate(\n [[i.timestamp, i.level, i.message]],\n headers=[\"TIMESTAMP\", \"LEVEL\", \"MESSAGE\"],\n tablefmt=\"plain\",\n numalign=\"left\",\n stralign=\"left\",\n )\n )\n all_logs.append([i.timestamp, i.level, i.message])\n continue\n\n output.append([i.timestamp, i.level, i.message])\n all_logs.append([i.timestamp, i.level, i.message])\n\n if output:\n click.echo(\n tabulate(output, tablefmt=\"plain\", numalign=\"left\", stralign=\"left\")\n )\n\n # Check if state is either Success or Failed, exit if it is\n pk_query = {\n \"query\": {\n with_args(\"flow_run_by_pk\", {\"id\": flow_run_id}): {\"state\": True}\n }\n }\n result = client.graphql(pk_query)\n\n if (\n result.data.flow_run_by_pk.state == \"Success\"\n or result.data.flow_run_by_pk.state == \"Failed\"\n ):\n return\n\n time.sleep(3)\n", "path": "src/prefect/cli/run.py"}]}
2,558
803
gh_patches_debug_1859
rasdani/github-patches
git_diff
NVIDIA__NVFlare-191
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The "show_stats" command got broken The "show_stats server" and "show_stats client" command got the following error. This is caused by this PR change (https://github.com/NVIDIA/NVFlare/pull/162): > show_stats server Error: Failed to communicate with Admin Server localhost on 8003: '_DefaultReplyProcessor' object has no attribute 'process_dict' Done [7269 usecs] 2022-02-08 17:26:12.865006 > </issue> <code> [start of nvflare/fuel/hci/client/api_spec.py] 1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from __future__ import annotations 16 17 from abc import ABC, abstractmethod 18 from typing import Optional 19 20 from nvflare.fuel.hci.table import Table 21 22 23 class ReplyProcessor: 24 """A base class for parsing server's response.""" 25 26 def reply_start(self, api: AdminAPISpec, reply_json): 27 pass 28 29 def process_string(self, api: AdminAPISpec, item: str): 30 pass 31 32 def process_success(self, api: AdminAPISpec, item: str): 33 pass 34 35 def process_error(self, api: AdminAPISpec, err: str): 36 pass 37 38 def process_table(self, api: AdminAPISpec, table: Table): 39 pass 40 41 def process_shutdown(self, api: AdminAPISpec, msg: str): 42 pass 43 44 def process_token(self, api: AdminAPISpec, token: str): 45 pass 46 47 def protocol_error(self, api: AdminAPISpec, err: str): 48 pass 49 50 def reply_done(self, api: AdminAPISpec): 51 pass 52 53 54 class AdminAPISpec(ABC): 55 def __init__(self): 56 self.reply_processor = None 57 self.command_result = None 58 59 @abstractmethod 60 def server_execute(self, command: str, reply_processor: Optional[ReplyProcessor] = None): 61 """Executes a command on server side. 62 63 Args: 64 command: The command to be executed. 65 reply_processor: Reply callback to use. 66 """ 67 pass 68 69 def set_command_result(self, result): 70 """Sets the result returning from executing the command.""" 71 self.command_result = result 72 73 def get_command_result(self): 74 """Gets the result returning from executing the command.""" 75 return self.command_result 76 [end of nvflare/fuel/hci/client/api_spec.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nvflare/fuel/hci/client/api_spec.py b/nvflare/fuel/hci/client/api_spec.py --- a/nvflare/fuel/hci/client/api_spec.py +++ b/nvflare/fuel/hci/client/api_spec.py @@ -38,6 +38,9 @@ def process_table(self, api: AdminAPISpec, table: Table): pass + def process_dict(self, api: AdminAPISpec, data: dict): + pass + def process_shutdown(self, api: AdminAPISpec, msg: str): pass
{"golden_diff": "diff --git a/nvflare/fuel/hci/client/api_spec.py b/nvflare/fuel/hci/client/api_spec.py\n--- a/nvflare/fuel/hci/client/api_spec.py\n+++ b/nvflare/fuel/hci/client/api_spec.py\n@@ -38,6 +38,9 @@\n def process_table(self, api: AdminAPISpec, table: Table):\n pass\n \n+ def process_dict(self, api: AdminAPISpec, data: dict):\n+ pass\n+\n def process_shutdown(self, api: AdminAPISpec, msg: str):\n pass\n", "issue": "The \"show_stats\" command got broken\nThe \"show_stats server\" and \"show_stats client\" command got the following error. This is caused by this PR change (https://github.com/NVIDIA/NVFlare/pull/162):\r\n\r\n> show_stats server\r\nError: Failed to communicate with Admin Server localhost on 8003: '_DefaultReplyProcessor' object has no attribute 'process_dict'\r\nDone [7269 usecs] 2022-02-08 17:26:12.865006\r\n> \r\n\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom typing import Optional\n\nfrom nvflare.fuel.hci.table import Table\n\n\nclass ReplyProcessor:\n \"\"\"A base class for parsing server's response.\"\"\"\n\n def reply_start(self, api: AdminAPISpec, reply_json):\n pass\n\n def process_string(self, api: AdminAPISpec, item: str):\n pass\n\n def process_success(self, api: AdminAPISpec, item: str):\n pass\n\n def process_error(self, api: AdminAPISpec, err: str):\n pass\n\n def process_table(self, api: AdminAPISpec, table: Table):\n pass\n\n def process_shutdown(self, api: AdminAPISpec, msg: str):\n pass\n\n def process_token(self, api: AdminAPISpec, token: str):\n pass\n\n def protocol_error(self, api: AdminAPISpec, err: str):\n pass\n\n def reply_done(self, api: AdminAPISpec):\n pass\n\n\nclass AdminAPISpec(ABC):\n def __init__(self):\n self.reply_processor = None\n self.command_result = None\n\n @abstractmethod\n def server_execute(self, command: str, reply_processor: Optional[ReplyProcessor] = None):\n \"\"\"Executes a command on server side.\n\n Args:\n command: The command to be executed.\n reply_processor: Reply callback to use.\n \"\"\"\n pass\n\n def set_command_result(self, result):\n \"\"\"Sets the result returning from executing the command.\"\"\"\n self.command_result = result\n\n def get_command_result(self):\n \"\"\"Gets the result returning from executing the command.\"\"\"\n return self.command_result\n", "path": "nvflare/fuel/hci/client/api_spec.py"}]}
1,341
131
gh_patches_debug_39747
rasdani/github-patches
git_diff
NVIDIA__NVFlare-359
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Minor inconsistency between study config generation tool and study spec </issue> <code> [start of nvflare/apis/study_manager_spec.py] 1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import datetime 16 17 18 class Study: 19 def __init__( 20 self, 21 name: str, 22 description: str, 23 sites: [str], 24 users: [str], 25 start_time: datetime.datetime, 26 end_time: datetime.datetime, 27 reviewers=None, 28 ): 29 self.name = name 30 self.description = description 31 self.sites = sites 32 self.users = users 33 self.start_time = start_time 34 self.end_time = end_time 35 self.reviewers = reviewers 36 self.create_time = None 37 38 39 class StudyManagerSpec(object): 40 def create_study(self, study: Study) -> Study: 41 """Create the study object permanently 42 43 The caller must have validated the sites and users of the study. 44 45 Validate the study before saving: 46 The name of the study must be unique; 47 Sites and users must be defined; 48 Start and end time must make sense. 49 50 Args: 51 study: the caller-provided study info 52 53 Returns: updated study info (e.g. create_time is set) 54 55 """ 56 pass 57 58 def list_studies(self) -> [str]: 59 """ 60 List names of all defined studies 61 62 Returns: list of study names 63 64 """ 65 pass 66 67 def list_active_studies(self) -> [str]: 68 """ 69 List names of all active studies (started but not ended) 70 71 Returns: list of study names 72 73 """ 74 pass 75 76 def get_study(self, name: str) -> Study: 77 """Get the Study object for the specified name. 78 79 Args: 80 name: unique name of the study 81 82 Returns: the Study object 83 84 """ 85 pass 86 [end of nvflare/apis/study_manager_spec.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nvflare/apis/study_manager_spec.py b/nvflare/apis/study_manager_spec.py --- a/nvflare/apis/study_manager_spec.py +++ b/nvflare/apis/study_manager_spec.py @@ -12,7 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import datetime +from abc import ABC, abstractmethod +from typing import Dict, List +from datetime import datetime + +from .fl_context import FLContext class Study: @@ -20,32 +24,35 @@ self, name: str, description: str, - sites: [str], - users: [str], - start_time: datetime.datetime, - end_time: datetime.datetime, + contact: str, + participating_clients: List[str], + participating_admins: List[str], + start_date: datetime.date, + end_date: datetime.date, reviewers=None, ): self.name = name self.description = description - self.sites = sites - self.users = users - self.start_time = start_time - self.end_time = end_time + self.contact = contact + self.participating_clients = participating_clients + self.participating_admins = participating_admins + self.start_date = start_date + self.end_date = end_date self.reviewers = reviewers - self.create_time = None + self.created_at = datetime.utcnow().isoformat() -class StudyManagerSpec(object): - def create_study(self, study: Study) -> Study: - """Create the study object permanently +class StudyManagerSpec(ABC): + @abstractmethod + def add_study(self, study: Study, fl_ctx: FLContext) -> Study: + """Add the study object permanently - The caller must have validated the sites and users of the study. + The caller must have validated the participating_clients and participating_admins of the study. Validate the study before saving: The name of the study must be unique; - Sites and users must be defined; - Start and end time must make sense. + participating_clients and participating_admins must be defined; + Start and end date must make sense. Args: study: the caller-provided study info @@ -55,7 +62,8 @@ """ pass - def list_studies(self) -> [str]: + @abstractmethod + def list_studies(self, fl_ctx: FLContext) -> List[str]: """ List names of all defined studies @@ -64,7 +72,8 @@ """ pass - def list_active_studies(self) -> [str]: + @abstractmethod + def list_active_studies(self, fl_ctx: FLContext) -> List[str]: """ List names of all active studies (started but not ended) @@ -73,7 +82,8 @@ """ pass - def get_study(self, name: str) -> Study: + @abstractmethod + def get_study(self, name: str, fl_ctx: FLContext) -> Study: """Get the Study object for the specified name. Args:
{"golden_diff": "diff --git a/nvflare/apis/study_manager_spec.py b/nvflare/apis/study_manager_spec.py\n--- a/nvflare/apis/study_manager_spec.py\n+++ b/nvflare/apis/study_manager_spec.py\n@@ -12,7 +12,11 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import datetime\n+from abc import ABC, abstractmethod\n+from typing import Dict, List\n+from datetime import datetime\n+\n+from .fl_context import FLContext\n \n \n class Study:\n@@ -20,32 +24,35 @@\n self,\n name: str,\n description: str,\n- sites: [str],\n- users: [str],\n- start_time: datetime.datetime,\n- end_time: datetime.datetime,\n+ contact: str,\n+ participating_clients: List[str],\n+ participating_admins: List[str],\n+ start_date: datetime.date,\n+ end_date: datetime.date,\n reviewers=None,\n ):\n self.name = name\n self.description = description\n- self.sites = sites\n- self.users = users\n- self.start_time = start_time\n- self.end_time = end_time\n+ self.contact = contact\n+ self.participating_clients = participating_clients\n+ self.participating_admins = participating_admins\n+ self.start_date = start_date\n+ self.end_date = end_date\n self.reviewers = reviewers\n- self.create_time = None\n+ self.created_at = datetime.utcnow().isoformat()\n \n \n-class StudyManagerSpec(object):\n- def create_study(self, study: Study) -> Study:\n- \"\"\"Create the study object permanently\n+class StudyManagerSpec(ABC):\n+ @abstractmethod\n+ def add_study(self, study: Study, fl_ctx: FLContext) -> Study:\n+ \"\"\"Add the study object permanently\n \n- The caller must have validated the sites and users of the study.\n+ The caller must have validated the participating_clients and participating_admins of the study.\n \n Validate the study before saving:\n The name of the study must be unique;\n- Sites and users must be defined;\n- Start and end time must make sense.\n+ participating_clients and participating_admins must be defined;\n+ Start and end date must make sense.\n \n Args:\n study: the caller-provided study info\n@@ -55,7 +62,8 @@\n \"\"\"\n pass\n \n- def list_studies(self) -> [str]:\n+ @abstractmethod\n+ def list_studies(self, fl_ctx: FLContext) -> List[str]:\n \"\"\"\n List names of all defined studies\n \n@@ -64,7 +72,8 @@\n \"\"\"\n pass\n \n- def list_active_studies(self) -> [str]:\n+ @abstractmethod\n+ def list_active_studies(self, fl_ctx: FLContext) -> List[str]:\n \"\"\"\n List names of all active studies (started but not ended)\n \n@@ -73,7 +82,8 @@\n \"\"\"\n pass\n \n- def get_study(self, name: str) -> Study:\n+ @abstractmethod\n+ def get_study(self, name: str, fl_ctx: FLContext) -> Study:\n \"\"\"Get the Study object for the specified name.\n \n Args:\n", "issue": "Minor inconsistency between study config generation tool and study spec\n\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\n\n\nclass Study:\n def __init__(\n self,\n name: str,\n description: str,\n sites: [str],\n users: [str],\n start_time: datetime.datetime,\n end_time: datetime.datetime,\n reviewers=None,\n ):\n self.name = name\n self.description = description\n self.sites = sites\n self.users = users\n self.start_time = start_time\n self.end_time = end_time\n self.reviewers = reviewers\n self.create_time = None\n\n\nclass StudyManagerSpec(object):\n def create_study(self, study: Study) -> Study:\n \"\"\"Create the study object permanently\n\n The caller must have validated the sites and users of the study.\n\n Validate the study before saving:\n The name of the study must be unique;\n Sites and users must be defined;\n Start and end time must make sense.\n\n Args:\n study: the caller-provided study info\n\n Returns: updated study info (e.g. create_time is set)\n\n \"\"\"\n pass\n\n def list_studies(self) -> [str]:\n \"\"\"\n List names of all defined studies\n\n Returns: list of study names\n\n \"\"\"\n pass\n\n def list_active_studies(self) -> [str]:\n \"\"\"\n List names of all active studies (started but not ended)\n\n Returns: list of study names\n\n \"\"\"\n pass\n\n def get_study(self, name: str) -> Study:\n \"\"\"Get the Study object for the specified name.\n\n Args:\n name: unique name of the study\n\n Returns: the Study object\n\n \"\"\"\n pass\n", "path": "nvflare/apis/study_manager_spec.py"}]}
1,220
724
gh_patches_debug_7650
rasdani/github-patches
git_diff
electricitymaps__electricitymaps-contrib-2830
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Moldova parser fails due to expired SSL certificate Certificate expired on Friday, 20 November 2020 at 11:15:53 </issue> <code> [start of parsers/MD.py] 1 #!/usr/bin/env python3 2 # coding=utf-8 3 4 """Parser for Moldova.""" 5 6 import arrow 7 from operator import itemgetter 8 import requests 9 10 TYPE_MAPPING = { 11 u'tmva476': 'hydro', # NHE Costeşti (run-of-river) #2 index 12 u'tmva112': 'hydro', # NHE Dubăsari (run-of-river) #4 index 13 u'tmva367': 'gas', # CET Nord (CHPP) #3 index 14 u'tmva42': 'gas', # CET-1 Chişinău (CHPP) #6 index 15 u'tmva378': 'gas', # CET-2 Chişinău (CHPP) #5 index 16 u'tmva1024': 'gas', # CERS Moldovenească (fuel mix 2017 99.92% gas, 0.08% oil) #7 index 17 } 18 19 display_url = 'http://www.moldelectrica.md/ro/activity/system_state' 20 data_url = 'http://www.moldelectrica.md/utils/load4.php' 21 22 23 def get_data(session=None): 24 """ Returns generation data as a list of floats.""" 25 26 s = session or requests.Session() 27 28 #In order for the data url to return data, cookies from the display url must be obtained then reused. 29 response = s.get(display_url) 30 data_response = s.get(data_url) 31 raw_data = data_response.text 32 try: 33 data = [float(i) for i in raw_data.split(',')] 34 except: 35 raise Exception("Not able to parse received data. Check that the specifed URL returns correct data.") 36 37 return data 38 39 40 def fetch_production(zone_key='MD', session=None, target_datetime=None, logger=None): 41 """Requests the last known production mix (in MW) of a given country 42 43 Arguments: 44 zone_key (optional) -- used in case a parser is able to fetch multiple countries 45 session (optional) -- request session passed in order to re-use an existing session 46 47 Return: 48 A dictionary in the form: 49 { 50 'zoneKey': 'FR', 51 'datetime': '2017-01-01T00:00:00Z', 52 'production': { 53 'biomass': 0.0, 54 'coal': 0.0, 55 'gas': 0.0, 56 'hydro': 0.0, 57 'nuclear': null, 58 'oil': 0.0, 59 'solar': 0.0, 60 'wind': 0.0, 61 'geothermal': 0.0, 62 'unknown': 0.0 63 }, 64 'storage': { 65 'hydro': -10.0, 66 }, 67 'source': 'mysource.com' 68 } 69 """ 70 if target_datetime: 71 raise NotImplementedError('This parser is not yet able to parse past dates') 72 73 grid_status = get_data(session=session) 74 production = {'solar': None, 'wind': None, 'biomass': None, 'nuclear': 0.0} 75 76 production['gas'] = sum(itemgetter(3, 5, 6)(grid_status)) 77 production['hydro'] = sum(itemgetter(2, 4)(grid_status)) 78 production['unknown'] = grid_status[7] 79 80 consumption = grid_status[-5] 81 82 dt = arrow.now('Europe/Chisinau').datetime 83 84 datapoint = { 85 'zoneKey': zone_key, 86 'datetime': dt, 87 'consumption': consumption, 88 'production': production, 89 'storage': {}, 90 'source': 'moldelectrica.md' 91 } 92 93 return datapoint 94 95 96 def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None): 97 """Requests the last known power exchange (in MW) between two countries 98 Arguments: 99 zone_key1 -- the first country code 100 zone_key2 -- the second country code; order of the two codes in params doesn't matter 101 session (optional) -- request session passed in order to re-use an existing session 102 Return: 103 A dictionary in the form: 104 { 105 'sortedZoneKeys': 'DK->NO', 106 'datetime': '2017-01-01T00:00:00Z', 107 'netFlow': 0.0, 108 'source': 'mysource.com' 109 } 110 where net flow is from DK into NO 111 """ 112 if target_datetime: 113 raise NotImplementedError('This parser is not yet able to parse past dates') 114 115 sortedZoneKeys = '->'.join(sorted([zone_key1, zone_key2])) 116 117 exchange_status = get_data(session=session) 118 119 if sortedZoneKeys == 'MD->UA': 120 netflow = -1 * exchange_status[-3] 121 elif sortedZoneKeys == 'MD->RO': 122 netflow = -1 * exchange_status[-2] 123 else: 124 raise NotImplementedError('This exchange pair is not implemented') 125 126 dt = arrow.now('Europe/Chisinau').datetime 127 128 exchange = { 129 'sortedZoneKeys': sortedZoneKeys, 130 'datetime': dt, 131 'netFlow': netflow, 132 'source': 'moldelectrica.md' 133 } 134 135 return exchange 136 137 138 if __name__ == '__main__': 139 """Main method, never used by the Electricity Map backend, but handy for testing.""" 140 141 print('fetch_production() ->') 142 print(fetch_production()) 143 print('fetch_exchange(MD, UA) ->') 144 print(fetch_exchange('MD', 'UA')) 145 print('fetch_exchange(MD, RO) ->') 146 print(fetch_exchange('MD', 'RO')) 147 [end of parsers/MD.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/parsers/MD.py b/parsers/MD.py --- a/parsers/MD.py +++ b/parsers/MD.py @@ -26,8 +26,8 @@ s = session or requests.Session() #In order for the data url to return data, cookies from the display url must be obtained then reused. - response = s.get(display_url) - data_response = s.get(data_url) + response = s.get(display_url, verify=False) + data_response = s.get(data_url, verify=False) raw_data = data_response.text try: data = [float(i) for i in raw_data.split(',')]
{"golden_diff": "diff --git a/parsers/MD.py b/parsers/MD.py\n--- a/parsers/MD.py\n+++ b/parsers/MD.py\n@@ -26,8 +26,8 @@\n s = session or requests.Session()\n \n #In order for the data url to return data, cookies from the display url must be obtained then reused.\n- response = s.get(display_url)\n- data_response = s.get(data_url)\n+ response = s.get(display_url, verify=False)\n+ data_response = s.get(data_url, verify=False)\n raw_data = data_response.text\n try:\n data = [float(i) for i in raw_data.split(',')]\n", "issue": "Moldova parser fails due to expired SSL certificate\nCertificate expired on Friday, 20 November 2020 at 11:15:53\n", "before_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\n\n\"\"\"Parser for Moldova.\"\"\"\n\nimport arrow\nfrom operator import itemgetter\nimport requests\n\nTYPE_MAPPING = {\n u'tmva476': 'hydro', # NHE Coste\u015fti (run-of-river) #2 index\n u'tmva112': 'hydro', # NHE Dub\u0103sari (run-of-river) #4 index\n u'tmva367': 'gas', # CET Nord (CHPP) #3 index\n u'tmva42': 'gas', # CET-1 Chi\u015fin\u0103u (CHPP) #6 index\n u'tmva378': 'gas', # CET-2 Chi\u015fin\u0103u (CHPP) #5 index\n u'tmva1024': 'gas', # CERS Moldoveneasc\u0103 (fuel mix 2017 99.92% gas, 0.08% oil) #7 index\n}\n\ndisplay_url = 'http://www.moldelectrica.md/ro/activity/system_state'\ndata_url = 'http://www.moldelectrica.md/utils/load4.php'\n\n\ndef get_data(session=None):\n \"\"\" Returns generation data as a list of floats.\"\"\"\n\n s = session or requests.Session()\n\n #In order for the data url to return data, cookies from the display url must be obtained then reused.\n response = s.get(display_url)\n data_response = s.get(data_url)\n raw_data = data_response.text\n try:\n data = [float(i) for i in raw_data.split(',')]\n except:\n raise Exception(\"Not able to parse received data. Check that the specifed URL returns correct data.\")\n\n return data\n\n\ndef fetch_production(zone_key='MD', session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known production mix (in MW) of a given country\n\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n grid_status = get_data(session=session)\n production = {'solar': None, 'wind': None, 'biomass': None, 'nuclear': 0.0}\n\n production['gas'] = sum(itemgetter(3, 5, 6)(grid_status))\n production['hydro'] = sum(itemgetter(2, 4)(grid_status))\n production['unknown'] = grid_status[7]\n\n consumption = grid_status[-5]\n\n dt = arrow.now('Europe/Chisinau').datetime\n\n datapoint = {\n 'zoneKey': zone_key,\n 'datetime': dt,\n 'consumption': consumption,\n 'production': production,\n 'storage': {},\n 'source': 'moldelectrica.md'\n }\n\n return datapoint\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two countries\n Arguments:\n zone_key1 -- the first country code\n zone_key2 -- the second country code; order of the two codes in params doesn't matter\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n where net flow is from DK into NO\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sortedZoneKeys = '->'.join(sorted([zone_key1, zone_key2]))\n\n exchange_status = get_data(session=session)\n\n if sortedZoneKeys == 'MD->UA':\n netflow = -1 * exchange_status[-3]\n elif sortedZoneKeys == 'MD->RO':\n netflow = -1 * exchange_status[-2]\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n dt = arrow.now('Europe/Chisinau').datetime\n\n exchange = {\n 'sortedZoneKeys': sortedZoneKeys,\n 'datetime': dt,\n 'netFlow': netflow,\n 'source': 'moldelectrica.md'\n }\n\n return exchange\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_exchange(MD, UA) ->')\n print(fetch_exchange('MD', 'UA'))\n print('fetch_exchange(MD, RO) ->')\n print(fetch_exchange('MD', 'RO'))\n", "path": "parsers/MD.py"}]}
2,183
146
gh_patches_debug_420
rasdani/github-patches
git_diff
fidals__shopelectro-209
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Move CI to our server Трэвис на беспланом серве билдит не очень. Сейчас у нас один travis ci идёт ~20 мин. Учитывая, что мы будем оперировать задачами с оценкой по 15-30 мин, это слишком долго. Кроме того, Трэвис часто отваливается по ресурсам, что плодит нам в ci рандомные ошибки. А это очень дорого, конечно же. Исследуй разные коробки. Например travis, jenkins, team-city и тд. Нам нужно что-то простенькое, позже развернём коробку на своём серве. Результат задачи - отчёт по исследованию здесь в комментах, решение по инструменту принято, создана новая задача по внедрению инструмента Концы от нашего хоста бери у @duker33 </issue> <code> [start of shopelectro/settings/base.py] 1 """ 2 Django settings for shopelectro project. 3 4 Generated by 'django-admin startproject' using Django 1.9.5. 5 6 For more information on this file, see 7 https://docs.djangoproject.com/en/1.9/topics/settings/ 8 9 For the full list of settings and their values, see 10 https://docs.djangoproject.com/en/1.9/ref/settings/ 11 """ 12 13 import os 14 from datetime import datetime 15 16 import dj_database_url 17 18 # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 19 BASE_DIR = os.path.dirname(os.path.dirname( 20 os.path.dirname(os.path.abspath(__file__)))) 21 22 # Quick-start development settings - unsuitable for production 23 # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ 24 25 # SECURITY WARNING: keep the secret key used in production secret! 26 SECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key') 27 28 # SECURITY WARNING: don't run with debug turned on in production! 29 DEBUG = True 30 31 # http://bit.ly/sorl-thumbnail-docs 32 THUMBNAIL_DEBUG = False 33 34 ALLOWED_HOSTS = ['*'] 35 36 if os.environ.get('TEST_ENV', False): 37 # disable https in CI 38 # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header 39 SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http') 40 41 # Enable in frame loading for Ya.Metric 42 # https://docs.djangoproject.com/es/1.10/ref/clickjacking/ 43 # https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page 44 X_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com' 45 46 # Application definition 47 INSTALLED_APPS = [ 48 # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover 49 'django.contrib.contenttypes', 50 'django.contrib.auth', 51 'django.contrib.messages', 52 'django.contrib.redirects', 53 'django.contrib.sessions', 54 'django.contrib.sitemaps', 55 'django.contrib.sites', 56 'django.contrib.staticfiles', 57 'django.contrib.humanize', 58 'django_user_agents', 59 'generic_admin', 60 'django.contrib.admin.apps.SimpleAdminConfig', 61 'debug_toolbar', 62 'mptt', 63 'widget_tweaks', 64 'sorl.thumbnail', 65 'images', 66 'pages', 67 'catalog', 68 'ecommerce', 69 'shopelectro', 70 ] 71 72 MIDDLEWARE = [ 73 'django.middleware.security.SecurityMiddleware', 74 'django.contrib.sessions.middleware.SessionMiddleware', 75 'django.middleware.common.CommonMiddleware', 76 'django.middleware.csrf.CsrfViewMiddleware', 77 'django.contrib.auth.middleware.AuthenticationMiddleware', 78 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 79 'django.contrib.messages.middleware.MessageMiddleware', 80 'django.middleware.clickjacking.XFrameOptionsMiddleware', 81 'django.contrib.redirects.middleware.RedirectFallbackMiddleware', 82 'django.middleware.locale.LocaleMiddleware', 83 'django_user_agents.middleware.UserAgentMiddleware', 84 'debug_toolbar.middleware.DebugToolbarMiddleware', 85 ] 86 87 ROOT_URLCONF = 'shopelectro.urls' 88 89 TEMPLATES = [ 90 { 91 'BACKEND': 'django.template.backends.django.DjangoTemplates', 92 'DIRS': [os.path.join(BASE_DIR, 'templates')], 93 'APP_DIRS': True, 94 'OPTIONS': { 95 'context_processors': [ 96 'django.template.context_processors.debug', 97 'django.template.context_processors.media', 98 'django.template.context_processors.request', 99 'django.template.context_processors.static', 100 'django.contrib.auth.context_processors.auth', 101 'django.contrib.messages.context_processors.messages', 102 'ecommerce.context_processors.cart', 103 'shopelectro.context_processors.shop', 104 ], 105 }, 106 }, 107 ] 108 109 WSGI_APPLICATION = 'shopelectro.wsgi.application' 110 111 # Password validation 112 # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators 113 114 AUTH_PASSWORD_VALIDATORS = [ 115 { 116 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', 117 }, 118 { 119 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 120 }, 121 { 122 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', 123 }, 124 { 125 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', 126 }, 127 ] 128 129 # Internationalization 130 # https://docs.djangoproject.com/en/1.9/topics/i18n/ 131 132 LOCALE_NAME = 'en_US' 133 TIME_ZONE = 'UTC' 134 135 USE_I18N = True 136 USE_L10N = True 137 USE_TZ = True 138 139 LOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')] 140 FORMAT_MODULE_PATH = [ 141 'shopelectro.formats', 142 ] 143 144 # Static files (CSS, JavaScript, Images) 145 # https://docs.djangoproject.com/en/1.9/howto/static-files/ 146 STATIC_URL = '/static/' 147 STATIC_ROOT = os.path.join(BASE_DIR, 'static') 148 ASSETS_DIR = os.path.join(BASE_DIR, 'assets') 149 150 STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage' 151 152 STATICFILES_DIRS = [ 153 os.path.join(BASE_DIR, 'front/build'), 154 ASSETS_DIR, 155 ] 156 157 MEDIA_URL = '/media/' 158 MEDIA_ROOT = os.path.join(BASE_DIR, 'media') 159 160 # It is fake-url. Correct url will be created on `docker-compose up` stage from `docker/.env` 161 DATABASE_URL = 'postgres://user:pass@db_name/table' 162 DATABASES = { 163 'default': dj_database_url.config( 164 env='DATABASE_URL', 165 default=DATABASE_URL, 166 ) 167 } 168 169 LOGGING = { 170 'version': 1, 171 'disable_existing_loggers': False, 172 'handlers': { 173 'console': { 174 'class': 'logging.StreamHandler', 175 }, 176 }, 177 'loggers': { 178 'django': { 179 'handlers': ['console'], 180 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), 181 }, 182 }, 183 } 184 185 SITE_CREATED = datetime(2013, 1, 1) 186 187 LOCALHOST = 'http://127.0.0.1:8000/' 188 BASE_URL = 'https://www.shopelectro.ru' 189 190 PLACEHOLDER_IMAGE = 'images/logo.png' 191 PLACEHOLDER_ALT = 'Логотип компании Shopelectro' 192 193 # Autocomplete and search settings 194 SEARCH_SEE_ALL_LABEL = 'Смотреть все результаты' 195 196 # For sitemaps and sites framework 197 SITE_ID = 1 198 SITE_DOMAIN_NAME = 'www.shopelectro.ru' 199 200 # Used to retrieve instances in ecommerce.Cart 201 CART_ID = 'cart' 202 203 # Used to define choices attr in definition of Order.payment_type field 204 PAYMENT_OPTIONS = ( 205 ('cash', 'Наличные'), 206 ('cashless', 'Безналичные и денежные переводы'), 207 ('AC', 'Банковская карта'), 208 ('PC', 'Яндекс.Деньги'), 209 ('GP', 'Связной (терминал)'), 210 ('AB', 'Альфа-Клик'), 211 ) 212 213 # It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env` 214 YANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass') 215 216 # Used for order's email in ecommerce app 217 FAKE_ORDER_NUMBER = 6000 218 219 # Subjects for different types of emails sent from SE. 220 EMAIL_SUBJECTS = { 221 'call': 'Обратный звонок', 222 'order': 'Заказ №{0.fake_order_number}', 223 'yandex_order': 'Заказ №{0.fake_order_number} | Яндекс.Касса', 224 'one_click': 'Заказ в один клик №{0.fake_order_number}', 225 'ya_feedback_request': 'Оцените нас на Яндекс.Маркете', 226 } 227 228 # Email configs 229 # It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env` 230 EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass') 231 EMAIL_HOST_USER = '[email protected]' 232 EMAIL_USE_TLS = True 233 EMAIL_HOST = 'smtp.yandex.ru' 234 EMAIL_PORT = 587 235 EMAIL_SENDER = '[email protected]' 236 EMAIL_RECIPIENT = '[email protected]' 237 SHOP_EMAIL = '[email protected]' 238 239 # FTP configs 240 FTP_USER = os.environ.get('FTP_USER', 'user') 241 FTP_PASS = os.environ.get('FTP_PASS', 'pass') 242 FTP_IP = os.environ.get('FTP_IP', '0.0.0.0') 243 244 # Used in admin image uploads 245 MODEL_TYPES = { 246 'Product': { 247 'app_name': 'shopelectro', 248 'dir_name': 'products', 249 }, 250 'Category': { 251 'app_name': 'shopelectro', 252 'dir_name': 'categories', 253 } 254 } 255 256 # This need for using {% debug %} variable in templates. 257 INTERNAL_IPS = ( 258 '127.0.0.1', 259 ) 260 261 TOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764] 262 263 SHOP = { 264 'id': '69886', 265 'scid': '64788', 266 'success_url': BASE_URL + '/shop/order-success/', 267 'fail_url': BASE_URL + '/', 268 'cps_phone': '+78124163200', 269 'cps_email': '[email protected]', 270 'local_delivery_cost': 300, 271 'local_delivery_cost_threshold': 5000, 272 } 273 274 # used in data-migrations and tests 275 CUSTOM_PAGES = { 276 'index': { 277 'slug': '', 278 'name': 'Интернет-магазин элементов питания "ShopElectro"', 279 'menu_title': 'Главная', 280 'title': 'Интернет-магазин Элементов питания с доставкой по России', 281 }, 282 'sitemap': { 283 'slug': 'sitemap', 284 'h1': 'Карта сайта', 285 'name': 'Карта сайта', 286 }, 287 'order': { 288 'slug': 'order', 289 'name': 'Оформление заказа', 290 'title': 'Корзина Интернет-магазин shopelectro.ru Санкт-Петербург', 291 }, 292 'search': { 293 'slug': 'search', 294 'name': 'Результаты поиска', 295 }, 296 'catalog': { 297 'slug': 'catalog', 298 'name': 'Каталог товаров', 299 'menu_title': 'Каталог', 300 }, 301 'order_success': { 302 'slug': 'order-success', 303 'name': 'Заказ принят', 304 } 305 } 306 [end of shopelectro/settings/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/shopelectro/settings/base.py b/shopelectro/settings/base.py --- a/shopelectro/settings/base.py +++ b/shopelectro/settings/base.py @@ -182,6 +182,8 @@ }, } +SELENIUM_URL = os.environ.get('SELENIUM_URL', 'http://se-selenium:4444/wd/hub') + SITE_CREATED = datetime(2013, 1, 1) LOCALHOST = 'http://127.0.0.1:8000/'
{"golden_diff": "diff --git a/shopelectro/settings/base.py b/shopelectro/settings/base.py\n--- a/shopelectro/settings/base.py\n+++ b/shopelectro/settings/base.py\n@@ -182,6 +182,8 @@\n },\n }\n \n+SELENIUM_URL = os.environ.get('SELENIUM_URL', 'http://se-selenium:4444/wd/hub')\n+\n SITE_CREATED = datetime(2013, 1, 1)\n \n LOCALHOST = 'http://127.0.0.1:8000/'\n", "issue": "Move CI to our server\n\u0422\u0440\u044d\u0432\u0438\u0441 \u043d\u0430 \u0431\u0435\u0441\u043f\u043b\u0430\u043d\u043e\u043c \u0441\u0435\u0440\u0432\u0435 \u0431\u0438\u043b\u0434\u0438\u0442 \u043d\u0435 \u043e\u0447\u0435\u043d\u044c.\r\n\u0421\u0435\u0439\u0447\u0430\u0441 \u0443 \u043d\u0430\u0441 \u043e\u0434\u0438\u043d travis ci \u0438\u0434\u0451\u0442 ~20 \u043c\u0438\u043d. \u0423\u0447\u0438\u0442\u044b\u0432\u0430\u044f, \u0447\u0442\u043e \u043c\u044b \u0431\u0443\u0434\u0435\u043c \u043e\u043f\u0435\u0440\u0438\u0440\u043e\u0432\u0430\u0442\u044c \u0437\u0430\u0434\u0430\u0447\u0430\u043c\u0438 \u0441 \u043e\u0446\u0435\u043d\u043a\u043e\u0439 \u043f\u043e 15-30 \u043c\u0438\u043d, \u044d\u0442\u043e \u0441\u043b\u0438\u0448\u043a\u043e\u043c \u0434\u043e\u043b\u0433\u043e.\r\n\u041a\u0440\u043e\u043c\u0435 \u0442\u043e\u0433\u043e, \u0422\u0440\u044d\u0432\u0438\u0441 \u0447\u0430\u0441\u0442\u043e \u043e\u0442\u0432\u0430\u043b\u0438\u0432\u0430\u0435\u0442\u0441\u044f \u043f\u043e \u0440\u0435\u0441\u0443\u0440\u0441\u0430\u043c, \u0447\u0442\u043e \u043f\u043b\u043e\u0434\u0438\u0442 \u043d\u0430\u043c \u0432 ci \u0440\u0430\u043d\u0434\u043e\u043c\u043d\u044b\u0435 \u043e\u0448\u0438\u0431\u043a\u0438. \u0410 \u044d\u0442\u043e \u043e\u0447\u0435\u043d\u044c \u0434\u043e\u0440\u043e\u0433\u043e, \u043a\u043e\u043d\u0435\u0447\u043d\u043e \u0436\u0435.\r\n\r\n\u0418\u0441\u0441\u043b\u0435\u0434\u0443\u0439 \u0440\u0430\u0437\u043d\u044b\u0435 \u043a\u043e\u0440\u043e\u0431\u043a\u0438. \u041d\u0430\u043f\u0440\u0438\u043c\u0435\u0440 travis, jenkins, team-city \u0438 \u0442\u0434.\r\n\u041d\u0430\u043c \u043d\u0443\u0436\u043d\u043e \u0447\u0442\u043e-\u0442\u043e \u043f\u0440\u043e\u0441\u0442\u0435\u043d\u044c\u043a\u043e\u0435, \u043f\u043e\u0437\u0436\u0435 \u0440\u0430\u0437\u0432\u0435\u0440\u043d\u0451\u043c \u043a\u043e\u0440\u043e\u0431\u043a\u0443 \u043d\u0430 \u0441\u0432\u043e\u0451\u043c \u0441\u0435\u0440\u0432\u0435.\r\n\r\n\u0420\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442 \u0437\u0430\u0434\u0430\u0447\u0438 - \u043e\u0442\u0447\u0451\u0442 \u043f\u043e \u0438\u0441\u0441\u043b\u0435\u0434\u043e\u0432\u0430\u043d\u0438\u044e \u0437\u0434\u0435\u0441\u044c \u0432 \u043a\u043e\u043c\u043c\u0435\u043d\u0442\u0430\u0445, \u0440\u0435\u0448\u0435\u043d\u0438\u0435 \u043f\u043e \u0438\u043d\u0441\u0442\u0440\u0443\u043c\u0435\u043d\u0442\u0443 \u043f\u0440\u0438\u043d\u044f\u0442\u043e, \u0441\u043e\u0437\u0434\u0430\u043d\u0430 \u043d\u043e\u0432\u0430\u044f \u0437\u0430\u0434\u0430\u0447\u0430 \u043f\u043e \u0432\u043d\u0435\u0434\u0440\u0435\u043d\u0438\u044e \u0438\u043d\u0441\u0442\u0440\u0443\u043c\u0435\u043d\u0442\u0430\r\n\r\n\u041a\u043e\u043d\u0446\u044b \u043e\u0442 \u043d\u0430\u0448\u0435\u0433\u043e \u0445\u043e\u0441\u0442\u0430 \u0431\u0435\u0440\u0438 \u0443 @duker33\n", "before_files": [{"content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\nimport dj_database_url\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nif os.environ.get('TEST_ENV', False):\n # disable https in CI\n # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')\n\n# Enable in frame loading for Ya.Metric\n# https://docs.djangoproject.com/es/1.10/ref/clickjacking/\n# https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page\nX_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'\n\n# Application definition\nINSTALLED_APPS = [\n # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.redirects',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_user_agents',\n 'generic_admin',\n 'django.contrib.admin.apps.SimpleAdminConfig',\n 'debug_toolbar',\n 'mptt',\n 'widget_tweaks',\n 'sorl.thumbnail',\n 'images',\n 'pages',\n 'catalog',\n 'ecommerce',\n 'shopelectro',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.contrib.redirects.middleware.RedirectFallbackMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nROOT_URLCONF = 'shopelectro.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'ecommerce.context_processors.cart',\n 'shopelectro.context_processors.shop',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shopelectro.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLOCALE_NAME = 'en_US'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]\nFORMAT_MODULE_PATH = [\n 'shopelectro.formats',\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nASSETS_DIR = os.path.join(BASE_DIR, 'assets')\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'front/build'),\n ASSETS_DIR,\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# It is fake-url. Correct url will be created on `docker-compose up` stage from `docker/.env`\nDATABASE_URL = 'postgres://user:pass@db_name/table'\nDATABASES = {\n 'default': dj_database_url.config(\n env='DATABASE_URL',\n default=DATABASE_URL,\n )\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nSITE_CREATED = datetime(2013, 1, 1)\n\nLOCALHOST = 'http://127.0.0.1:8000/'\nBASE_URL = 'https://www.shopelectro.ru'\n\nPLACEHOLDER_IMAGE = 'images/logo.png'\nPLACEHOLDER_ALT = '\u041b\u043e\u0433\u043e\u0442\u0438\u043f \u043a\u043e\u043c\u043f\u0430\u043d\u0438\u0438 Shopelectro'\n\n# Autocomplete and search settings\nSEARCH_SEE_ALL_LABEL = '\u0421\u043c\u043e\u0442\u0440\u0435\u0442\u044c \u0432\u0441\u0435 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u044b'\n\n# For sitemaps and sites framework\nSITE_ID = 1\nSITE_DOMAIN_NAME = 'www.shopelectro.ru'\n\n# Used to retrieve instances in ecommerce.Cart\nCART_ID = 'cart'\n\n# Used to define choices attr in definition of Order.payment_type field\nPAYMENT_OPTIONS = (\n ('cash', '\u041d\u0430\u043b\u0438\u0447\u043d\u044b\u0435'),\n ('cashless', '\u0411\u0435\u0437\u043d\u0430\u043b\u0438\u0447\u043d\u044b\u0435 \u0438 \u0434\u0435\u043d\u0435\u0436\u043d\u044b\u0435 \u043f\u0435\u0440\u0435\u0432\u043e\u0434\u044b'),\n ('AC', '\u0411\u0430\u043d\u043a\u043e\u0432\u0441\u043a\u0430\u044f \u043a\u0430\u0440\u0442\u0430'),\n ('PC', '\u042f\u043d\u0434\u0435\u043a\u0441.\u0414\u0435\u043d\u044c\u0433\u0438'),\n ('GP', '\u0421\u0432\u044f\u0437\u043d\u043e\u0439 (\u0442\u0435\u0440\u043c\u0438\u043d\u0430\u043b)'),\n ('AB', '\u0410\u043b\u044c\u0444\u0430-\u041a\u043b\u0438\u043a'),\n)\n\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nYANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')\n\n# Used for order's email in ecommerce app\nFAKE_ORDER_NUMBER = 6000\n\n# Subjects for different types of emails sent from SE.\nEMAIL_SUBJECTS = {\n 'call': '\u041e\u0431\u0440\u0430\u0442\u043d\u044b\u0439 \u0437\u0432\u043e\u043d\u043e\u043a',\n 'order': '\u0417\u0430\u043a\u0430\u0437 \u2116{0.fake_order_number}',\n 'yandex_order': '\u0417\u0430\u043a\u0430\u0437 \u2116{0.fake_order_number} | \u042f\u043d\u0434\u0435\u043a\u0441.\u041a\u0430\u0441\u0441\u0430',\n 'one_click': '\u0417\u0430\u043a\u0430\u0437 \u0432 \u043e\u0434\u0438\u043d \u043a\u043b\u0438\u043a \u2116{0.fake_order_number}',\n 'ya_feedback_request': '\u041e\u0446\u0435\u043d\u0438\u0442\u0435 \u043d\u0430\u0441 \u043d\u0430 \u042f\u043d\u0434\u0435\u043a\u0441.\u041c\u0430\u0440\u043a\u0435\u0442\u0435',\n}\n\n# Email configs\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_PORT = 587\nEMAIL_SENDER = '[email protected]'\nEMAIL_RECIPIENT = '[email protected]'\nSHOP_EMAIL = '[email protected]'\n\n# FTP configs\nFTP_USER = os.environ.get('FTP_USER', 'user')\nFTP_PASS = os.environ.get('FTP_PASS', 'pass')\nFTP_IP = os.environ.get('FTP_IP', '0.0.0.0')\n\n# Used in admin image uploads\nMODEL_TYPES = {\n 'Product': {\n 'app_name': 'shopelectro',\n 'dir_name': 'products',\n },\n 'Category': {\n 'app_name': 'shopelectro',\n 'dir_name': 'categories',\n }\n}\n\n# This need for using {% debug %} variable in templates.\nINTERNAL_IPS = (\n '127.0.0.1',\n)\n\nTOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]\n\nSHOP = {\n 'id': '69886',\n 'scid': '64788',\n 'success_url': BASE_URL + '/shop/order-success/',\n 'fail_url': BASE_URL + '/',\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n 'local_delivery_cost_threshold': 5000,\n}\n\n# used in data-migrations and tests\nCUSTOM_PAGES = {\n 'index': {\n 'slug': '',\n 'name': '\u0418\u043d\u0442\u0435\u0440\u043d\u0435\u0442-\u043c\u0430\u0433\u0430\u0437\u0438\u043d \u044d\u043b\u0435\u043c\u0435\u043d\u0442\u043e\u0432 \u043f\u0438\u0442\u0430\u043d\u0438\u044f \"ShopElectro\"',\n 'menu_title': '\u0413\u043b\u0430\u0432\u043d\u0430\u044f',\n 'title': '\u0418\u043d\u0442\u0435\u0440\u043d\u0435\u0442-\u043c\u0430\u0433\u0430\u0437\u0438\u043d \u042d\u043b\u0435\u043c\u0435\u043d\u0442\u043e\u0432 \u043f\u0438\u0442\u0430\u043d\u0438\u044f \u0441 \u0434\u043e\u0441\u0442\u0430\u0432\u043a\u043e\u0439 \u043f\u043e \u0420\u043e\u0441\u0441\u0438\u0438',\n },\n 'sitemap': {\n 'slug': 'sitemap',\n 'h1': '\u041a\u0430\u0440\u0442\u0430 \u0441\u0430\u0439\u0442\u0430',\n 'name': '\u041a\u0430\u0440\u0442\u0430 \u0441\u0430\u0439\u0442\u0430',\n },\n 'order': {\n 'slug': 'order',\n 'name': '\u041e\u0444\u043e\u0440\u043c\u043b\u0435\u043d\u0438\u0435 \u0437\u0430\u043a\u0430\u0437\u0430',\n 'title': '\u041a\u043e\u0440\u0437\u0438\u043d\u0430 \u0418\u043d\u0442\u0435\u0440\u043d\u0435\u0442-\u043c\u0430\u0433\u0430\u0437\u0438\u043d shopelectro.ru \u0421\u0430\u043d\u043a\u0442-\u041f\u0435\u0442\u0435\u0440\u0431\u0443\u0440\u0433',\n },\n 'search': {\n 'slug': 'search',\n 'name': '\u0420\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u044b \u043f\u043e\u0438\u0441\u043a\u0430',\n },\n 'catalog': {\n 'slug': 'catalog',\n 'name': '\u041a\u0430\u0442\u0430\u043b\u043e\u0433 \u0442\u043e\u0432\u0430\u0440\u043e\u0432',\n 'menu_title': '\u041a\u0430\u0442\u0430\u043b\u043e\u0433',\n },\n 'order_success': {\n 'slug': 'order-success',\n 'name': '\u0417\u0430\u043a\u0430\u0437 \u043f\u0440\u0438\u043d\u044f\u0442',\n }\n}\n", "path": "shopelectro/settings/base.py"}]}
3,944
130
gh_patches_debug_42518
rasdani/github-patches
git_diff
keras-team__autokeras-479
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> feature_request: Support multicore/multiprocess for read_images function ### Feature Description Support multicore for `read_images` function in `image/image_supervised.py`. ### Reason As [read_images function in image_supervised.py](https://github.com/jhfjhfj1/autokeras/blob/36752fcbce58bd1a26c17144637cc0ecbef83da0/autokeras/image/image_supervised.py#L14) does not support multi-core(just read files one by one), so it's **too slow** when reading image files more than 10k files. ### Solution Use multiprocessing or equivalent one in `read_images` function as a default, or optional. </issue> <code> [start of autokeras/image/image_supervised.py] 1 import os 2 from abc import ABC 3 import numpy as np 4 5 from autokeras.constant import Constant 6 from autokeras.nn.loss_function import classification_loss, regression_loss 7 from autokeras.nn.metric import Accuracy, MSE 8 from autokeras.preprocessor import OneHotEncoder, ImageDataTransformer 9 from autokeras.supervised import PortableDeepSupervised, DeepSupervised 10 from autokeras.utils import pickle_to_file, \ 11 read_csv_file, read_image, compute_image_resize_params, resize_image_data 12 13 14 def read_images(img_file_names, images_dir_path): 15 """Read the images from the path and return their numpy.ndarray instance. 16 Return a numpy.ndarray instance containing the training data. 17 18 Args: 19 img_file_names: List containing images names. 20 images_dir_path: Path to the directory containing images. 21 """ 22 x_train = [] 23 if os.path.isdir(images_dir_path): 24 for img_file in img_file_names: 25 img_path = os.path.join(images_dir_path, img_file) 26 if os.path.exists(img_path): 27 img = read_image(img_path) 28 if len(img.shape) < 3: 29 img = img[..., np.newaxis] 30 x_train.append(img) 31 else: 32 raise ValueError("%s image does not exist" % img_file) 33 else: 34 raise ValueError("Directory containing images does not exist") 35 return np.asanyarray(x_train) 36 37 38 def load_image_dataset(csv_file_path, images_path): 39 """Load images from the files and labels from a csv file. 40 41 Second, the dataset is a set of images and the labels are in a CSV file. 42 The CSV file should contain two columns whose names are 'File Name' and 'Label'. 43 The file names in the first column should match the file names of the images with extensions, 44 e.g., .jpg, .png. 45 The path to the CSV file should be passed through the `csv_file_path`. 46 The path to the directory containing all the images should be passed through `image_path`. 47 48 Args: 49 csv_file_path: CSV file path. 50 images_path: Path where images exist. 51 52 Returns: 53 x: Four dimensional numpy.ndarray. The channel dimension is the last dimension. 54 y: The labels. 55 """ 56 img_file_name, y = read_csv_file(csv_file_path) 57 x = read_images(img_file_name, images_path) 58 return np.array(x), np.array(y) 59 60 61 class ImageSupervised(DeepSupervised, ABC): 62 """Abstract image supervised class. 63 64 Attributes: 65 path: A path to the directory to save the classifier as well as intermediate results. 66 cnn: CNN module from net_module.py. 67 y_encoder: Label encoder, used in transform_y or inverse_transform_y for encode the label. For example, 68 if one hot encoder needed, y_encoder can be OneHotEncoder. 69 data_transformer: A transformer class to process the data. See example as ImageDataTransformer. 70 verbose: A boolean value indicating the verbosity mode which determines whether the search process 71 will be printed to stdout. 72 augment: A boolean value indicating whether the data needs augmentation. If not define, then it 73 will use the value of Constant.DATA_AUGMENTATION which is True by default. 74 searcher_args: A dictionary containing the parameters for the searcher's __init__ function. 75 resize_height: resize image height. 76 resize_width: resize image width. 77 """ 78 79 def __init__(self, augment=None, **kwargs): 80 """Initialize the instance. 81 The classifier will be loaded from the files in 'path' if parameter 'resume' is True. 82 Otherwise it would create a new one. 83 Args: 84 verbose: A boolean of whether the search process will be printed to stdout. 85 path: A string. The path to a directory, where the intermediate results are saved. 86 resume: A boolean. If True, the classifier will continue to previous work saved in path. 87 Otherwise, the classifier will start a new search. 88 searcher_args: A dictionary containing the parameters for the searcher's __init__ function. 89 augment: A boolean value indicating whether the data needs augmentation. If not define, then it 90 will use the value of Constant.DATA_AUGMENTATION which is True by default. 91 """ 92 self.augment = augment if augment is not None else Constant.DATA_AUGMENTATION 93 self.resize_shape = [] 94 95 super().__init__(**kwargs) 96 97 def fit(self, x, y, time_limit=None): 98 x = np.array(x) 99 y = np.array(y) 100 101 if self.verbose: 102 print("Preprocessing the images.") 103 104 self.resize_shape = compute_image_resize_params(x) 105 106 x = resize_image_data(x, self.resize_shape) 107 108 if self.verbose: 109 print("Preprocessing finished.") 110 111 super().fit(x, y, time_limit) 112 113 def init_transformer(self, x): 114 if self.data_transformer is None: 115 self.data_transformer = ImageDataTransformer(x, augment=self.augment) 116 117 def preprocess(self, x): 118 return resize_image_data(x, self.resize_shape) 119 120 121 class ImageClassifier(ImageSupervised): 122 """ImageClassifier class. 123 124 It is used for image classification. It searches convolutional neural network architectures 125 for the best configuration for the image dataset. 126 """ 127 128 @property 129 def loss(self): 130 return classification_loss 131 132 @property 133 def metric(self): 134 return Accuracy 135 136 def transform_y(self, y_train): 137 # Transform y_train. 138 if self.y_encoder is None: 139 self.y_encoder = OneHotEncoder() 140 self.y_encoder.fit(y_train) 141 y_train = self.y_encoder.transform(y_train) 142 return y_train 143 144 def inverse_transform_y(self, output): 145 return self.y_encoder.inverse_transform(output) 146 147 def get_n_output_node(self): 148 return self.y_encoder.n_classes 149 150 def export_autokeras_model(self, model_file_name): 151 """ Creates and Exports the AutoKeras model to the given filename. """ 152 portable_model = PortableImageClassifier(graph=self.cnn.best_model, 153 y_encoder=self.y_encoder, 154 data_transformer=self.data_transformer, 155 resize_params=self.resize_shape, 156 path=self.path) 157 pickle_to_file(portable_model, model_file_name) 158 159 160 class ImageClassifier1D(ImageClassifier): 161 """ ImageClassifier1D class. 162 163 It is used for 1D image classification. It searches convolutional neural network architectures 164 for the best configuration for the 1D image dataset. 165 """ 166 167 def __init__(self, **kwargs): 168 kwargs['augment'] = False 169 super().__init__(**kwargs) 170 171 172 class ImageClassifier3D(ImageClassifier): 173 """ ImageClassifier3D class. 174 175 It is used for 3D image classification. It searches convolutional neural network architectures 176 for the best configuration for the 1D image dataset. 177 """ 178 179 def __init__(self, **kwargs): 180 kwargs['augment'] = False 181 super().__init__(**kwargs) 182 183 184 class ImageRegressor(ImageSupervised): 185 """ImageRegressor class. 186 187 It is used for image regression. It searches convolutional neural network architectures 188 for the best configuration for the image dataset. 189 """ 190 191 @property 192 def loss(self): 193 return regression_loss 194 195 @property 196 def metric(self): 197 return MSE 198 199 def get_n_output_node(self): 200 return 1 201 202 def transform_y(self, y_train): 203 return y_train.flatten().reshape(len(y_train), 1) 204 205 def inverse_transform_y(self, output): 206 return output.flatten() 207 208 def export_autokeras_model(self, model_file_name): 209 """ Creates and Exports the AutoKeras model to the given filename. """ 210 portable_model = PortableImageRegressor(graph=self.cnn.best_model, 211 y_encoder=self.y_encoder, 212 data_transformer=self.data_transformer, 213 resize_params=self.resize_shape, 214 path=self.path) 215 pickle_to_file(portable_model, model_file_name) 216 217 218 class ImageRegressor1D(ImageRegressor): 219 """ ImageRegressor1D class. 220 221 It is used for 1D image regression. It searches convolutional neural network architectures 222 for the best configuration for the 1D image dataset. 223 """ 224 225 def __init__(self, **kwargs): 226 kwargs['augment'] = False 227 super().__init__(**kwargs) 228 229 230 class ImageRegressor3D(ImageRegressor): 231 """ ImageRegressor3D class. 232 233 It is used for 3D image regression. It searches convolutional neural network architectures 234 for the best configuration for the 1D image dataset. 235 """ 236 237 def __init__(self, **kwargs): 238 kwargs['augment'] = False 239 super().__init__(**kwargs) 240 241 242 class PortableImageSupervised(PortableDeepSupervised): 243 def __init__(self, graph, y_encoder, data_transformer, resize_params, verbose=False, path=None): 244 """Initialize the instance. 245 Args: 246 graph: The graph form of the learned model 247 """ 248 super().__init__(graph, y_encoder, data_transformer, verbose, path) 249 self.resize_shape = resize_params 250 251 def preprocess(self, x): 252 return resize_image_data(x, self.resize_shape) 253 254 255 class PortableImageClassifier(PortableImageSupervised): 256 @property 257 def loss(self): 258 return classification_loss 259 260 @property 261 def metric(self): 262 return Accuracy 263 264 def transform_y(self, y_train): 265 return self.y_encoder.transform(y_train) 266 267 def inverse_transform_y(self, output): 268 return self.y_encoder.inverse_transform(output) 269 270 271 class PortableImageRegressor(PortableImageSupervised): 272 @property 273 def loss(self): 274 return regression_loss 275 276 @property 277 def metric(self): 278 return MSE 279 280 def transform_y(self, y_train): 281 return y_train.flatten().reshape(len(y_train), 1) 282 283 def inverse_transform_y(self, output): 284 return output.flatten() 285 [end of autokeras/image/image_supervised.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/autokeras/image/image_supervised.py b/autokeras/image/image_supervised.py --- a/autokeras/image/image_supervised.py +++ b/autokeras/image/image_supervised.py @@ -1,6 +1,7 @@ import os from abc import ABC import numpy as np +from multiprocessing import Pool, cpu_count from autokeras.constant import Constant from autokeras.nn.loss_function import classification_loss, regression_loss @@ -11,31 +12,48 @@ read_csv_file, read_image, compute_image_resize_params, resize_image_data -def read_images(img_file_names, images_dir_path): +def _image_to_array(img_path): + """Read the image from the path and return image object. + Return an image object. + + Args: + img_path: image file name in images_dir_path. + """ + if os.path.exists(img_path): + img = read_image(img_path) + if len(img.shape) < 3: + img = img[..., np.newaxis] + return img + else: + raise ValueError("%s image does not exist" % img_path) + + +def read_images(img_file_names, images_dir_path, parallel=True): """Read the images from the path and return their numpy.ndarray instance. Return a numpy.ndarray instance containing the training data. Args: img_file_names: List containing images names. images_dir_path: Path to the directory containing images. + parallel: (Default: True) Run _image_to_array will use multiprocessing. """ - x_train = [] + img_paths = [os.path.join(images_dir_path, img_file) + for img_file in img_file_names] + if os.path.isdir(images_dir_path): - for img_file in img_file_names: - img_path = os.path.join(images_dir_path, img_file) - if os.path.exists(img_path): - img = read_image(img_path) - if len(img.shape) < 3: - img = img[..., np.newaxis] - x_train.append(img) - else: - raise ValueError("%s image does not exist" % img_file) + if parallel: + pool = Pool(processes=cpu_count()) + x_train = pool.map(_image_to_array, img_paths) + pool.close() + pool.join() + else: + x_train = [_image_to_array(img_path) for img_path in img_paths] else: raise ValueError("Directory containing images does not exist") return np.asanyarray(x_train) -def load_image_dataset(csv_file_path, images_path): +def load_image_dataset(csv_file_path, images_path, parallel=True): """Load images from the files and labels from a csv file. Second, the dataset is a set of images and the labels are in a CSV file. @@ -48,13 +66,14 @@ Args: csv_file_path: CSV file path. images_path: Path where images exist. + parallel: (Default: True) Load dataset with multiprocessing. Returns: x: Four dimensional numpy.ndarray. The channel dimension is the last dimension. y: The labels. """ img_file_name, y = read_csv_file(csv_file_path) - x = read_images(img_file_name, images_path) + x = read_images(img_file_name, images_path, parallel) return np.array(x), np.array(y) @@ -112,7 +131,8 @@ def init_transformer(self, x): if self.data_transformer is None: - self.data_transformer = ImageDataTransformer(x, augment=self.augment) + self.data_transformer = ImageDataTransformer( + x, augment=self.augment) def preprocess(self, x): return resize_image_data(x, self.resize_shape)
{"golden_diff": "diff --git a/autokeras/image/image_supervised.py b/autokeras/image/image_supervised.py\n--- a/autokeras/image/image_supervised.py\n+++ b/autokeras/image/image_supervised.py\n@@ -1,6 +1,7 @@\n import os\n from abc import ABC\n import numpy as np\n+from multiprocessing import Pool, cpu_count\n \n from autokeras.constant import Constant\n from autokeras.nn.loss_function import classification_loss, regression_loss\n@@ -11,31 +12,48 @@\n read_csv_file, read_image, compute_image_resize_params, resize_image_data\n \n \n-def read_images(img_file_names, images_dir_path):\n+def _image_to_array(img_path):\n+ \"\"\"Read the image from the path and return image object.\n+ Return an image object.\n+\n+ Args:\n+ img_path: image file name in images_dir_path.\n+ \"\"\"\n+ if os.path.exists(img_path):\n+ img = read_image(img_path)\n+ if len(img.shape) < 3:\n+ img = img[..., np.newaxis]\n+ return img\n+ else:\n+ raise ValueError(\"%s image does not exist\" % img_path)\n+\n+\n+def read_images(img_file_names, images_dir_path, parallel=True):\n \"\"\"Read the images from the path and return their numpy.ndarray instance.\n Return a numpy.ndarray instance containing the training data.\n \n Args:\n img_file_names: List containing images names.\n images_dir_path: Path to the directory containing images.\n+ parallel: (Default: True) Run _image_to_array will use multiprocessing.\n \"\"\"\n- x_train = []\n+ img_paths = [os.path.join(images_dir_path, img_file)\n+ for img_file in img_file_names]\n+\n if os.path.isdir(images_dir_path):\n- for img_file in img_file_names:\n- img_path = os.path.join(images_dir_path, img_file)\n- if os.path.exists(img_path):\n- img = read_image(img_path)\n- if len(img.shape) < 3:\n- img = img[..., np.newaxis]\n- x_train.append(img)\n- else:\n- raise ValueError(\"%s image does not exist\" % img_file)\n+ if parallel:\n+ pool = Pool(processes=cpu_count())\n+ x_train = pool.map(_image_to_array, img_paths)\n+ pool.close()\n+ pool.join()\n+ else:\n+ x_train = [_image_to_array(img_path) for img_path in img_paths]\n else:\n raise ValueError(\"Directory containing images does not exist\")\n return np.asanyarray(x_train)\n \n \n-def load_image_dataset(csv_file_path, images_path):\n+def load_image_dataset(csv_file_path, images_path, parallel=True):\n \"\"\"Load images from the files and labels from a csv file.\n \n Second, the dataset is a set of images and the labels are in a CSV file.\n@@ -48,13 +66,14 @@\n Args:\n csv_file_path: CSV file path.\n images_path: Path where images exist.\n+ parallel: (Default: True) Load dataset with multiprocessing.\n \n Returns:\n x: Four dimensional numpy.ndarray. The channel dimension is the last dimension.\n y: The labels.\n \"\"\"\n img_file_name, y = read_csv_file(csv_file_path)\n- x = read_images(img_file_name, images_path)\n+ x = read_images(img_file_name, images_path, parallel)\n return np.array(x), np.array(y)\n \n \n@@ -112,7 +131,8 @@\n \n def init_transformer(self, x):\n if self.data_transformer is None:\n- self.data_transformer = ImageDataTransformer(x, augment=self.augment)\n+ self.data_transformer = ImageDataTransformer(\n+ x, augment=self.augment)\n \n def preprocess(self, x):\n return resize_image_data(x, self.resize_shape)\n", "issue": "feature_request: Support multicore/multiprocess for read_images function\n### Feature Description\r\nSupport multicore for `read_images` function in `image/image_supervised.py`.\r\n\r\n### Reason\r\nAs [read_images function in image_supervised.py](https://github.com/jhfjhfj1/autokeras/blob/36752fcbce58bd1a26c17144637cc0ecbef83da0/autokeras/image/image_supervised.py#L14) does not support multi-core(just read files one by one), so it's **too slow** when reading image files more than 10k files.\r\n\r\n### Solution\r\nUse multiprocessing or equivalent one in `read_images` function as a default, or optional.\r\n\n", "before_files": [{"content": "import os\nfrom abc import ABC\nimport numpy as np\n\nfrom autokeras.constant import Constant\nfrom autokeras.nn.loss_function import classification_loss, regression_loss\nfrom autokeras.nn.metric import Accuracy, MSE\nfrom autokeras.preprocessor import OneHotEncoder, ImageDataTransformer\nfrom autokeras.supervised import PortableDeepSupervised, DeepSupervised\nfrom autokeras.utils import pickle_to_file, \\\n read_csv_file, read_image, compute_image_resize_params, resize_image_data\n\n\ndef read_images(img_file_names, images_dir_path):\n \"\"\"Read the images from the path and return their numpy.ndarray instance.\n Return a numpy.ndarray instance containing the training data.\n\n Args:\n img_file_names: List containing images names.\n images_dir_path: Path to the directory containing images.\n \"\"\"\n x_train = []\n if os.path.isdir(images_dir_path):\n for img_file in img_file_names:\n img_path = os.path.join(images_dir_path, img_file)\n if os.path.exists(img_path):\n img = read_image(img_path)\n if len(img.shape) < 3:\n img = img[..., np.newaxis]\n x_train.append(img)\n else:\n raise ValueError(\"%s image does not exist\" % img_file)\n else:\n raise ValueError(\"Directory containing images does not exist\")\n return np.asanyarray(x_train)\n\n\ndef load_image_dataset(csv_file_path, images_path):\n \"\"\"Load images from the files and labels from a csv file.\n\n Second, the dataset is a set of images and the labels are in a CSV file.\n The CSV file should contain two columns whose names are 'File Name' and 'Label'.\n The file names in the first column should match the file names of the images with extensions,\n e.g., .jpg, .png.\n The path to the CSV file should be passed through the `csv_file_path`.\n The path to the directory containing all the images should be passed through `image_path`.\n\n Args:\n csv_file_path: CSV file path.\n images_path: Path where images exist.\n\n Returns:\n x: Four dimensional numpy.ndarray. The channel dimension is the last dimension.\n y: The labels.\n \"\"\"\n img_file_name, y = read_csv_file(csv_file_path)\n x = read_images(img_file_name, images_path)\n return np.array(x), np.array(y)\n\n\nclass ImageSupervised(DeepSupervised, ABC):\n \"\"\"Abstract image supervised class.\n\n Attributes:\n path: A path to the directory to save the classifier as well as intermediate results.\n cnn: CNN module from net_module.py.\n y_encoder: Label encoder, used in transform_y or inverse_transform_y for encode the label. For example,\n if one hot encoder needed, y_encoder can be OneHotEncoder.\n data_transformer: A transformer class to process the data. See example as ImageDataTransformer.\n verbose: A boolean value indicating the verbosity mode which determines whether the search process\n will be printed to stdout.\n augment: A boolean value indicating whether the data needs augmentation. If not define, then it\n will use the value of Constant.DATA_AUGMENTATION which is True by default.\n searcher_args: A dictionary containing the parameters for the searcher's __init__ function.\n resize_height: resize image height.\n resize_width: resize image width.\n \"\"\"\n\n def __init__(self, augment=None, **kwargs):\n \"\"\"Initialize the instance.\n The classifier will be loaded from the files in 'path' if parameter 'resume' is True.\n Otherwise it would create a new one.\n Args:\n verbose: A boolean of whether the search process will be printed to stdout.\n path: A string. The path to a directory, where the intermediate results are saved.\n resume: A boolean. If True, the classifier will continue to previous work saved in path.\n Otherwise, the classifier will start a new search.\n searcher_args: A dictionary containing the parameters for the searcher's __init__ function.\n augment: A boolean value indicating whether the data needs augmentation. If not define, then it\n will use the value of Constant.DATA_AUGMENTATION which is True by default.\n \"\"\"\n self.augment = augment if augment is not None else Constant.DATA_AUGMENTATION\n self.resize_shape = []\n\n super().__init__(**kwargs)\n\n def fit(self, x, y, time_limit=None):\n x = np.array(x)\n y = np.array(y)\n\n if self.verbose:\n print(\"Preprocessing the images.\")\n\n self.resize_shape = compute_image_resize_params(x)\n\n x = resize_image_data(x, self.resize_shape)\n\n if self.verbose:\n print(\"Preprocessing finished.\")\n\n super().fit(x, y, time_limit)\n\n def init_transformer(self, x):\n if self.data_transformer is None:\n self.data_transformer = ImageDataTransformer(x, augment=self.augment)\n\n def preprocess(self, x):\n return resize_image_data(x, self.resize_shape)\n\n\nclass ImageClassifier(ImageSupervised):\n \"\"\"ImageClassifier class.\n\n It is used for image classification. It searches convolutional neural network architectures\n for the best configuration for the image dataset.\n \"\"\"\n\n @property\n def loss(self):\n return classification_loss\n\n @property\n def metric(self):\n return Accuracy\n\n def transform_y(self, y_train):\n # Transform y_train.\n if self.y_encoder is None:\n self.y_encoder = OneHotEncoder()\n self.y_encoder.fit(y_train)\n y_train = self.y_encoder.transform(y_train)\n return y_train\n\n def inverse_transform_y(self, output):\n return self.y_encoder.inverse_transform(output)\n\n def get_n_output_node(self):\n return self.y_encoder.n_classes\n\n def export_autokeras_model(self, model_file_name):\n \"\"\" Creates and Exports the AutoKeras model to the given filename. \"\"\"\n portable_model = PortableImageClassifier(graph=self.cnn.best_model,\n y_encoder=self.y_encoder,\n data_transformer=self.data_transformer,\n resize_params=self.resize_shape,\n path=self.path)\n pickle_to_file(portable_model, model_file_name)\n\n\nclass ImageClassifier1D(ImageClassifier):\n \"\"\" ImageClassifier1D class.\n\n It is used for 1D image classification. It searches convolutional neural network architectures\n for the best configuration for the 1D image dataset.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs['augment'] = False\n super().__init__(**kwargs)\n\n\nclass ImageClassifier3D(ImageClassifier):\n \"\"\" ImageClassifier3D class.\n\n It is used for 3D image classification. It searches convolutional neural network architectures\n for the best configuration for the 1D image dataset.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs['augment'] = False\n super().__init__(**kwargs)\n\n\nclass ImageRegressor(ImageSupervised):\n \"\"\"ImageRegressor class.\n\n It is used for image regression. It searches convolutional neural network architectures\n for the best configuration for the image dataset.\n \"\"\"\n\n @property\n def loss(self):\n return regression_loss\n\n @property\n def metric(self):\n return MSE\n\n def get_n_output_node(self):\n return 1\n\n def transform_y(self, y_train):\n return y_train.flatten().reshape(len(y_train), 1)\n\n def inverse_transform_y(self, output):\n return output.flatten()\n\n def export_autokeras_model(self, model_file_name):\n \"\"\" Creates and Exports the AutoKeras model to the given filename. \"\"\"\n portable_model = PortableImageRegressor(graph=self.cnn.best_model,\n y_encoder=self.y_encoder,\n data_transformer=self.data_transformer,\n resize_params=self.resize_shape,\n path=self.path)\n pickle_to_file(portable_model, model_file_name)\n\n\nclass ImageRegressor1D(ImageRegressor):\n \"\"\" ImageRegressor1D class.\n\n It is used for 1D image regression. It searches convolutional neural network architectures\n for the best configuration for the 1D image dataset.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs['augment'] = False\n super().__init__(**kwargs)\n\n\nclass ImageRegressor3D(ImageRegressor):\n \"\"\" ImageRegressor3D class.\n\n It is used for 3D image regression. It searches convolutional neural network architectures\n for the best configuration for the 1D image dataset.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs['augment'] = False\n super().__init__(**kwargs)\n\n\nclass PortableImageSupervised(PortableDeepSupervised):\n def __init__(self, graph, y_encoder, data_transformer, resize_params, verbose=False, path=None):\n \"\"\"Initialize the instance.\n Args:\n graph: The graph form of the learned model\n \"\"\"\n super().__init__(graph, y_encoder, data_transformer, verbose, path)\n self.resize_shape = resize_params\n\n def preprocess(self, x):\n return resize_image_data(x, self.resize_shape)\n\n\nclass PortableImageClassifier(PortableImageSupervised):\n @property\n def loss(self):\n return classification_loss\n\n @property\n def metric(self):\n return Accuracy\n\n def transform_y(self, y_train):\n return self.y_encoder.transform(y_train)\n\n def inverse_transform_y(self, output):\n return self.y_encoder.inverse_transform(output)\n\n\nclass PortableImageRegressor(PortableImageSupervised):\n @property\n def loss(self):\n return regression_loss\n\n @property\n def metric(self):\n return MSE\n\n def transform_y(self, y_train):\n return y_train.flatten().reshape(len(y_train), 1)\n\n def inverse_transform_y(self, output):\n return output.flatten()\n", "path": "autokeras/image/image_supervised.py"}]}
3,580
847
gh_patches_debug_40015
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-1091
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Implement display options for Duration type ## Problem The Duration Mathesar type supports the following display options: - Min unit - Max unit - Whether to show unit labels We will combine this into a single `format` string. ## Solution (1) We should store these display options in the following format in the `display_options` field of the corresponding column. ``` { "format": "HH:mm:ss.SSS" } ``` (2) We should also validate these so that: - Only columns of this type can have these display options. They should not be able to be set if the column is of a different type. - `format` should be a valid JavaScript duration format. (3) If the column type is changed, the display options should be deleted. (4) We should add supported display options to the `types` endpoint. ## Additional Context - [Design of Duration type options on Figma](https://www.figma.com/proto/Uaf1ntcldzK2U41Jhw6vS2/Mathesar-MVP?page-id=4260%3A37440&node-id=4270%3A41231&viewport=324%2C48%2C0.29&scaling=contain&starting-point-node-id=4270%3A41231&show-proto-sidebar=1) - Blocked by #658 - #392 </issue> <code> [start of mathesar/api/display_options.py] 1 from mathesar.database.types import MathesarTypeIdentifier 2 3 DISPLAY_OPTIONS_BY_TYPE_IDENTIFIER = { 4 MathesarTypeIdentifier.BOOLEAN.value: 5 { 6 "options": [{"name": "input", "type": "string", 7 "enum": ['dropdown', 'checkbox']}, 8 {'name': "custom_labels", "type": "object", 9 "items": [{"name": "TRUE", "type": "string"}, 10 {'name': "FALSE", "type": "string"}]}] 11 12 }, 13 MathesarTypeIdentifier.NUMBER.value: 14 { 15 "options": [{"name": "show_as_percentage", "type": "boolean"}, 16 {"name": "locale", "type": "string"}] 17 }, 18 MathesarTypeIdentifier.DATETIME.value: 19 { 20 "options": [{"name": "format", "type": "string"}] 21 } 22 } 23 [end of mathesar/api/display_options.py] [start of mathesar/api/serializers/shared_serializers.py] 1 from abc import ABC, abstractmethod 2 3 import arrow 4 from django.core.exceptions import ImproperlyConfigured 5 from rest_framework import serializers 6 7 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin 8 from mathesar.database.types import MathesarTypeIdentifier, get_mathesar_type_from_db_type 9 10 11 class ReadOnlyPolymorphicSerializerMappingMixin: 12 """ 13 This serializer mixin is helpful in serializing polymorphic models, 14 by switching to correct serializer based on the mapping field value. 15 """ 16 17 def __new__(cls, *args, **kwargs): 18 if cls.serializers_mapping is None: 19 raise ImproperlyConfigured( 20 '`{cls}` is missing a ' 21 '`{cls}.model_serializer_mapping` attribute'.format(cls=cls.__name__) 22 ) 23 return super().__new__(cls, *args, **kwargs) 24 25 def __init__(self, *args, **kwargs): 26 super().__init__(*args, **kwargs) 27 self.serializers_cls_mapping = {} 28 serializers_mapping = self.serializers_mapping 29 self.serializers_mapping = {} 30 for identifier, serializer_cls in serializers_mapping.items(): 31 if callable(serializer_cls): 32 serializer = serializer_cls(*args, **kwargs) 33 serializer.parent = self 34 else: 35 serializer = serializer_cls 36 self.serializers_mapping[identifier] = serializer 37 self.serializers_cls_mapping[identifier] = serializer_cls 38 39 def to_representation(self, instance): 40 serializer = self.serializers_mapping.get(self.get_mapping_field(), None) 41 if serializer is not None: 42 self.__class__ = self.serializers_cls_mapping.get(self.get_mapping_field()) 43 return serializer.to_representation(instance) 44 else: 45 raise Exception(f"Cannot find a matching serializer for the specified type {self.get_mapping_field()}") 46 47 def get_mapping_field(self): 48 mapping_field = getattr(self, "mapping_field", None) 49 if mapping_field is None: 50 raise Exception( 51 "Add a `mapping_field` to be used as a identifier" 52 "or override this method to return a identifier to identify a proper serializer" 53 ) 54 return mapping_field 55 56 57 class ReadWritePolymorphicSerializerMappingMixin(ReadOnlyPolymorphicSerializerMappingMixin): 58 def to_internal_value(self, data): 59 serializer = self.serializers_mapping.get(self.get_mapping_field()) 60 if serializer is not None: 61 self.__class__ = self.serializers_cls_mapping.get(self.get_mapping_field()) 62 return serializer.to_internal_value(data=data) 63 else: 64 raise Exception(f"Cannot find a matching serializer for the specified type {self.get_mapping_field()}") 65 66 67 class MonkeyPatchPartial: 68 """ 69 Work around bug #3847 in djangorestframework by monkey-patching the partial 70 attribute of the root serializer during the call to validate_empty_values. 71 https://github.com/encode/django-rest-framework/issues/3847 72 """ 73 74 def __init__(self, root): 75 self._root = root 76 77 def __enter__(self): 78 self._old = getattr(self._root, 'partial') 79 setattr(self._root, 'partial', False) 80 81 def __exit__(self, *args): 82 setattr(self._root, 'partial', self._old) 83 84 85 class OverrideRootPartialMixin: 86 """ 87 This mixin is used to convert a serializer into a partial serializer, 88 based on the serializer `partial` property rather than the parent's `partial` property. 89 Refer to the issue 90 https://github.com/encode/django-rest-framework/issues/3847 91 """ 92 93 def run_validation(self, *args, **kwargs): 94 if not self.partial: 95 with MonkeyPatchPartial(self.root): 96 return super().run_validation(*args, **kwargs) 97 return super().run_validation(*args, **kwargs) 98 99 100 class CustomBooleanLabelSerializer(MathesarErrorMessageMixin, serializers.Serializer): 101 TRUE = serializers.CharField() 102 FALSE = serializers.CharField() 103 104 105 DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY = 'mathesar_type' 106 107 108 class BooleanDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer): 109 input = serializers.ChoiceField(choices=[("dropdown", 1), ("checkbox", 2)]) 110 custom_labels = CustomBooleanLabelSerializer(required=False) 111 112 113 class NumberDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer): 114 show_as_percentage = serializers.BooleanField(default=False) 115 locale = serializers.CharField(required=False) 116 117 118 class AbstractDateTimeFormatValidator(ABC): 119 requires_context = True 120 121 def __init__(self): 122 pass 123 124 def __call__(self, value, serializer_field): 125 self.date_format_validator(value, serializer_field) 126 127 def date_format_validator(self, value, serializer_field): 128 try: 129 timestamp_with_tz_obj = arrow.get('2013-09-30T15:34:00.000-07:00') 130 parsed_datetime_str = timestamp_with_tz_obj.format(value) 131 datetime_object = arrow.get(parsed_datetime_str, value) 132 except ValueError: 133 raise serializers.ValidationError(f"{value} is not a valid format used for parsing a datetime.") 134 else: 135 self.validate(datetime_object, value, serializer_field) 136 137 @abstractmethod 138 def validate(self, datetime_obj, display_format, serializer_field): 139 pass 140 141 142 class TimestampWithTimeZoneFormatValidator(AbstractDateTimeFormatValidator): 143 144 def validate(self, datetime_obj, display_format, serializer_field): 145 pass 146 147 148 class TimestampWithoutTimeZoneFormatValidator(AbstractDateTimeFormatValidator): 149 150 def validate(self, datetime_obj, display_format, serializer_field): 151 if 'z' in display_format.lower(): 152 raise serializers.ValidationError( 153 "Timestamp without timezone column cannot contain timezone display format" 154 ) 155 156 157 class DateFormatValidator(AbstractDateTimeFormatValidator): 158 159 def validate(self, datetime_obj, display_format, serializer_field): 160 date_obj = arrow.get('2013-09-30') 161 if datetime_obj.time() != date_obj.time(): 162 raise serializers.ValidationError("Date column cannot contain time or timezone display format") 163 164 165 class TimeWithTimeZoneFormatValidator(AbstractDateTimeFormatValidator): 166 167 def validate(self, datetime_obj, display_format, serializer_field): 168 time_only_format = 'HH:mm:ssZZ' 169 time_str = arrow.get('2013-09-30T15:34:00.000-07:00').format(time_only_format) 170 parsed_time_str = arrow.get(time_str, time_only_format) 171 if parsed_time_str.date() != datetime_obj.date(): 172 raise serializers.ValidationError("Time column cannot contain date display format") 173 174 175 class TimeWithoutTimeZoneFormatValidator(TimeWithTimeZoneFormatValidator): 176 177 def validate(self, datetime_obj, display_format, serializer_field): 178 if 'z' in display_format.lower(): 179 raise serializers.ValidationError("Time without timezone column cannot contain timezone display format") 180 return super().validate(datetime_obj, display_format, serializer_field) 181 182 183 class DateDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer): 184 format = serializers.CharField(validators=[DateFormatValidator()]) 185 186 187 class TimestampWithoutTimezoneDisplayOptionSerializer( 188 MathesarErrorMessageMixin, 189 OverrideRootPartialMixin, 190 serializers.Serializer 191 ): 192 format = serializers.CharField(validators=[TimestampWithoutTimeZoneFormatValidator()]) 193 194 195 class TimestampWithTimezoneDisplayOptionSerializer( 196 MathesarErrorMessageMixin, 197 OverrideRootPartialMixin, 198 serializers.Serializer 199 ): 200 format = serializers.CharField(validators=[TimestampWithTimeZoneFormatValidator()]) 201 202 203 class TimeWithTimezoneDisplayOptionSerializer( 204 MathesarErrorMessageMixin, 205 OverrideRootPartialMixin, 206 serializers.Serializer 207 ): 208 format = serializers.CharField(validators=[TimeWithTimeZoneFormatValidator()]) 209 210 211 class TimeWithoutTimezoneDisplayOptionSerializer( 212 MathesarErrorMessageMixin, 213 OverrideRootPartialMixin, 214 serializers.Serializer 215 ): 216 format = serializers.CharField(validators=[TimeWithoutTimeZoneFormatValidator()]) 217 218 219 class DisplayOptionsMappingSerializer( 220 MathesarErrorMessageMixin, 221 ReadWritePolymorphicSerializerMappingMixin, 222 serializers.Serializer 223 ): 224 serializers_mapping = { 225 MathesarTypeIdentifier.BOOLEAN.value: BooleanDisplayOptionSerializer, 226 MathesarTypeIdentifier.NUMBER.value: NumberDisplayOptionSerializer, 227 ('timestamp with time zone', 228 MathesarTypeIdentifier.DATETIME.value): TimestampWithTimezoneDisplayOptionSerializer, 229 ('timestamp without time zone', 230 MathesarTypeIdentifier.DATETIME.value): TimestampWithoutTimezoneDisplayOptionSerializer, 231 ('date', MathesarTypeIdentifier.DATETIME.value): DateDisplayOptionSerializer, 232 ('time with time zone', MathesarTypeIdentifier.DATETIME.value): TimeWithTimezoneDisplayOptionSerializer, 233 ('time without time zone', MathesarTypeIdentifier.DATETIME.value): TimeWithoutTimezoneDisplayOptionSerializer, 234 } 235 236 def get_mapping_field(self): 237 mathesar_type = get_mathesar_type_from_db_type(self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY]) 238 if mathesar_type == MathesarTypeIdentifier.DATETIME.value: 239 return self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY].lower(), mathesar_type 240 else: 241 return mathesar_type 242 [end of mathesar/api/serializers/shared_serializers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mathesar/api/display_options.py b/mathesar/api/display_options.py --- a/mathesar/api/display_options.py +++ b/mathesar/api/display_options.py @@ -16,6 +16,10 @@ {"name": "locale", "type": "string"}] }, MathesarTypeIdentifier.DATETIME.value: + { + "options": [{"name": "format", "type": "string"}] + }, + MathesarTypeIdentifier.DURATION.value: { "options": [{"name": "format", "type": "string"}] } diff --git a/mathesar/api/serializers/shared_serializers.py b/mathesar/api/serializers/shared_serializers.py --- a/mathesar/api/serializers/shared_serializers.py +++ b/mathesar/api/serializers/shared_serializers.py @@ -165,7 +165,7 @@ class TimeWithTimeZoneFormatValidator(AbstractDateTimeFormatValidator): def validate(self, datetime_obj, display_format, serializer_field): - time_only_format = 'HH:mm:ssZZ' + time_only_format = 'HH:mm:ss.SSSZZ' time_str = arrow.get('2013-09-30T15:34:00.000-07:00').format(time_only_format) parsed_time_str = arrow.get(time_str, time_only_format) if parsed_time_str.date() != datetime_obj.date(): @@ -180,6 +180,15 @@ return super().validate(datetime_obj, display_format, serializer_field) +class DurationFormatValidator(AbstractDateTimeFormatValidator): + + def validate(self, datetime_obj, display_format, serializer_field): + if 'z' in display_format.lower(): + raise serializers.ValidationError( + "Duration column cannot contain timezone display format" + ) + + class DateDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer): format = serializers.CharField(validators=[DateFormatValidator()]) @@ -216,6 +225,10 @@ format = serializers.CharField(validators=[TimeWithoutTimeZoneFormatValidator()]) +class DurationDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer): + format = serializers.CharField(validators=[DurationFormatValidator()]) + + class DisplayOptionsMappingSerializer( MathesarErrorMessageMixin, ReadWritePolymorphicSerializerMappingMixin, @@ -231,6 +244,7 @@ ('date', MathesarTypeIdentifier.DATETIME.value): DateDisplayOptionSerializer, ('time with time zone', MathesarTypeIdentifier.DATETIME.value): TimeWithTimezoneDisplayOptionSerializer, ('time without time zone', MathesarTypeIdentifier.DATETIME.value): TimeWithoutTimezoneDisplayOptionSerializer, + MathesarTypeIdentifier.DURATION.value: DurationDisplayOptionSerializer, } def get_mapping_field(self):
{"golden_diff": "diff --git a/mathesar/api/display_options.py b/mathesar/api/display_options.py\n--- a/mathesar/api/display_options.py\n+++ b/mathesar/api/display_options.py\n@@ -16,6 +16,10 @@\n {\"name\": \"locale\", \"type\": \"string\"}]\n },\n MathesarTypeIdentifier.DATETIME.value:\n+ {\n+ \"options\": [{\"name\": \"format\", \"type\": \"string\"}]\n+ },\n+ MathesarTypeIdentifier.DURATION.value:\n {\n \"options\": [{\"name\": \"format\", \"type\": \"string\"}]\n }\ndiff --git a/mathesar/api/serializers/shared_serializers.py b/mathesar/api/serializers/shared_serializers.py\n--- a/mathesar/api/serializers/shared_serializers.py\n+++ b/mathesar/api/serializers/shared_serializers.py\n@@ -165,7 +165,7 @@\n class TimeWithTimeZoneFormatValidator(AbstractDateTimeFormatValidator):\n \n def validate(self, datetime_obj, display_format, serializer_field):\n- time_only_format = 'HH:mm:ssZZ'\n+ time_only_format = 'HH:mm:ss.SSSZZ'\n time_str = arrow.get('2013-09-30T15:34:00.000-07:00').format(time_only_format)\n parsed_time_str = arrow.get(time_str, time_only_format)\n if parsed_time_str.date() != datetime_obj.date():\n@@ -180,6 +180,15 @@\n return super().validate(datetime_obj, display_format, serializer_field)\n \n \n+class DurationFormatValidator(AbstractDateTimeFormatValidator):\n+\n+ def validate(self, datetime_obj, display_format, serializer_field):\n+ if 'z' in display_format.lower():\n+ raise serializers.ValidationError(\n+ \"Duration column cannot contain timezone display format\"\n+ )\n+\n+\n class DateDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):\n format = serializers.CharField(validators=[DateFormatValidator()])\n \n@@ -216,6 +225,10 @@\n format = serializers.CharField(validators=[TimeWithoutTimeZoneFormatValidator()])\n \n \n+class DurationDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):\n+ format = serializers.CharField(validators=[DurationFormatValidator()])\n+\n+\n class DisplayOptionsMappingSerializer(\n MathesarErrorMessageMixin,\n ReadWritePolymorphicSerializerMappingMixin,\n@@ -231,6 +244,7 @@\n ('date', MathesarTypeIdentifier.DATETIME.value): DateDisplayOptionSerializer,\n ('time with time zone', MathesarTypeIdentifier.DATETIME.value): TimeWithTimezoneDisplayOptionSerializer,\n ('time without time zone', MathesarTypeIdentifier.DATETIME.value): TimeWithoutTimezoneDisplayOptionSerializer,\n+ MathesarTypeIdentifier.DURATION.value: DurationDisplayOptionSerializer,\n }\n \n def get_mapping_field(self):\n", "issue": "Implement display options for Duration type\n## Problem\r\nThe Duration Mathesar type supports the following display options:\r\n- Min unit\r\n- Max unit\r\n- Whether to show unit labels\r\n\r\nWe will combine this into a single `format` string.\r\n\r\n## Solution\r\n(1) We should store these display options in the following format in the `display_options` field of the corresponding column.\r\n```\r\n{\r\n \"format\": \"HH:mm:ss.SSS\"\r\n}\r\n```\r\n\r\n(2) We should also validate these so that:\r\n- Only columns of this type can have these display options. They should not be able to be set if the column is of a different type.\r\n- `format` should be a valid JavaScript duration format.\r\n\r\n(3) If the column type is changed, the display options should be deleted.\r\n\r\n(4) We should add supported display options to the `types` endpoint.\r\n\r\n## Additional Context\r\n- [Design of Duration type options on Figma](https://www.figma.com/proto/Uaf1ntcldzK2U41Jhw6vS2/Mathesar-MVP?page-id=4260%3A37440&node-id=4270%3A41231&viewport=324%2C48%2C0.29&scaling=contain&starting-point-node-id=4270%3A41231&show-proto-sidebar=1)\r\n- Blocked by #658\r\n- #392 \n", "before_files": [{"content": "from mathesar.database.types import MathesarTypeIdentifier\n\nDISPLAY_OPTIONS_BY_TYPE_IDENTIFIER = {\n MathesarTypeIdentifier.BOOLEAN.value:\n {\n \"options\": [{\"name\": \"input\", \"type\": \"string\",\n \"enum\": ['dropdown', 'checkbox']},\n {'name': \"custom_labels\", \"type\": \"object\",\n \"items\": [{\"name\": \"TRUE\", \"type\": \"string\"},\n {'name': \"FALSE\", \"type\": \"string\"}]}]\n\n },\n MathesarTypeIdentifier.NUMBER.value:\n {\n \"options\": [{\"name\": \"show_as_percentage\", \"type\": \"boolean\"},\n {\"name\": \"locale\", \"type\": \"string\"}]\n },\n MathesarTypeIdentifier.DATETIME.value:\n {\n \"options\": [{\"name\": \"format\", \"type\": \"string\"}]\n }\n}\n", "path": "mathesar/api/display_options.py"}, {"content": "from abc import ABC, abstractmethod\n\nimport arrow\nfrom django.core.exceptions import ImproperlyConfigured\nfrom rest_framework import serializers\n\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.database.types import MathesarTypeIdentifier, get_mathesar_type_from_db_type\n\n\nclass ReadOnlyPolymorphicSerializerMappingMixin:\n \"\"\"\n This serializer mixin is helpful in serializing polymorphic models,\n by switching to correct serializer based on the mapping field value.\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n if cls.serializers_mapping is None:\n raise ImproperlyConfigured(\n '`{cls}` is missing a '\n '`{cls}.model_serializer_mapping` attribute'.format(cls=cls.__name__)\n )\n return super().__new__(cls, *args, **kwargs)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.serializers_cls_mapping = {}\n serializers_mapping = self.serializers_mapping\n self.serializers_mapping = {}\n for identifier, serializer_cls in serializers_mapping.items():\n if callable(serializer_cls):\n serializer = serializer_cls(*args, **kwargs)\n serializer.parent = self\n else:\n serializer = serializer_cls\n self.serializers_mapping[identifier] = serializer\n self.serializers_cls_mapping[identifier] = serializer_cls\n\n def to_representation(self, instance):\n serializer = self.serializers_mapping.get(self.get_mapping_field(), None)\n if serializer is not None:\n self.__class__ = self.serializers_cls_mapping.get(self.get_mapping_field())\n return serializer.to_representation(instance)\n else:\n raise Exception(f\"Cannot find a matching serializer for the specified type {self.get_mapping_field()}\")\n\n def get_mapping_field(self):\n mapping_field = getattr(self, \"mapping_field\", None)\n if mapping_field is None:\n raise Exception(\n \"Add a `mapping_field` to be used as a identifier\"\n \"or override this method to return a identifier to identify a proper serializer\"\n )\n return mapping_field\n\n\nclass ReadWritePolymorphicSerializerMappingMixin(ReadOnlyPolymorphicSerializerMappingMixin):\n def to_internal_value(self, data):\n serializer = self.serializers_mapping.get(self.get_mapping_field())\n if serializer is not None:\n self.__class__ = self.serializers_cls_mapping.get(self.get_mapping_field())\n return serializer.to_internal_value(data=data)\n else:\n raise Exception(f\"Cannot find a matching serializer for the specified type {self.get_mapping_field()}\")\n\n\nclass MonkeyPatchPartial:\n \"\"\"\n Work around bug #3847 in djangorestframework by monkey-patching the partial\n attribute of the root serializer during the call to validate_empty_values.\n https://github.com/encode/django-rest-framework/issues/3847\n \"\"\"\n\n def __init__(self, root):\n self._root = root\n\n def __enter__(self):\n self._old = getattr(self._root, 'partial')\n setattr(self._root, 'partial', False)\n\n def __exit__(self, *args):\n setattr(self._root, 'partial', self._old)\n\n\nclass OverrideRootPartialMixin:\n \"\"\"\n This mixin is used to convert a serializer into a partial serializer,\n based on the serializer `partial` property rather than the parent's `partial` property.\n Refer to the issue\n https://github.com/encode/django-rest-framework/issues/3847\n \"\"\"\n\n def run_validation(self, *args, **kwargs):\n if not self.partial:\n with MonkeyPatchPartial(self.root):\n return super().run_validation(*args, **kwargs)\n return super().run_validation(*args, **kwargs)\n\n\nclass CustomBooleanLabelSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n TRUE = serializers.CharField()\n FALSE = serializers.CharField()\n\n\nDISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY = 'mathesar_type'\n\n\nclass BooleanDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):\n input = serializers.ChoiceField(choices=[(\"dropdown\", 1), (\"checkbox\", 2)])\n custom_labels = CustomBooleanLabelSerializer(required=False)\n\n\nclass NumberDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):\n show_as_percentage = serializers.BooleanField(default=False)\n locale = serializers.CharField(required=False)\n\n\nclass AbstractDateTimeFormatValidator(ABC):\n requires_context = True\n\n def __init__(self):\n pass\n\n def __call__(self, value, serializer_field):\n self.date_format_validator(value, serializer_field)\n\n def date_format_validator(self, value, serializer_field):\n try:\n timestamp_with_tz_obj = arrow.get('2013-09-30T15:34:00.000-07:00')\n parsed_datetime_str = timestamp_with_tz_obj.format(value)\n datetime_object = arrow.get(parsed_datetime_str, value)\n except ValueError:\n raise serializers.ValidationError(f\"{value} is not a valid format used for parsing a datetime.\")\n else:\n self.validate(datetime_object, value, serializer_field)\n\n @abstractmethod\n def validate(self, datetime_obj, display_format, serializer_field):\n pass\n\n\nclass TimestampWithTimeZoneFormatValidator(AbstractDateTimeFormatValidator):\n\n def validate(self, datetime_obj, display_format, serializer_field):\n pass\n\n\nclass TimestampWithoutTimeZoneFormatValidator(AbstractDateTimeFormatValidator):\n\n def validate(self, datetime_obj, display_format, serializer_field):\n if 'z' in display_format.lower():\n raise serializers.ValidationError(\n \"Timestamp without timezone column cannot contain timezone display format\"\n )\n\n\nclass DateFormatValidator(AbstractDateTimeFormatValidator):\n\n def validate(self, datetime_obj, display_format, serializer_field):\n date_obj = arrow.get('2013-09-30')\n if datetime_obj.time() != date_obj.time():\n raise serializers.ValidationError(\"Date column cannot contain time or timezone display format\")\n\n\nclass TimeWithTimeZoneFormatValidator(AbstractDateTimeFormatValidator):\n\n def validate(self, datetime_obj, display_format, serializer_field):\n time_only_format = 'HH:mm:ssZZ'\n time_str = arrow.get('2013-09-30T15:34:00.000-07:00').format(time_only_format)\n parsed_time_str = arrow.get(time_str, time_only_format)\n if parsed_time_str.date() != datetime_obj.date():\n raise serializers.ValidationError(\"Time column cannot contain date display format\")\n\n\nclass TimeWithoutTimeZoneFormatValidator(TimeWithTimeZoneFormatValidator):\n\n def validate(self, datetime_obj, display_format, serializer_field):\n if 'z' in display_format.lower():\n raise serializers.ValidationError(\"Time without timezone column cannot contain timezone display format\")\n return super().validate(datetime_obj, display_format, serializer_field)\n\n\nclass DateDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):\n format = serializers.CharField(validators=[DateFormatValidator()])\n\n\nclass TimestampWithoutTimezoneDisplayOptionSerializer(\n MathesarErrorMessageMixin,\n OverrideRootPartialMixin,\n serializers.Serializer\n):\n format = serializers.CharField(validators=[TimestampWithoutTimeZoneFormatValidator()])\n\n\nclass TimestampWithTimezoneDisplayOptionSerializer(\n MathesarErrorMessageMixin,\n OverrideRootPartialMixin,\n serializers.Serializer\n):\n format = serializers.CharField(validators=[TimestampWithTimeZoneFormatValidator()])\n\n\nclass TimeWithTimezoneDisplayOptionSerializer(\n MathesarErrorMessageMixin,\n OverrideRootPartialMixin,\n serializers.Serializer\n):\n format = serializers.CharField(validators=[TimeWithTimeZoneFormatValidator()])\n\n\nclass TimeWithoutTimezoneDisplayOptionSerializer(\n MathesarErrorMessageMixin,\n OverrideRootPartialMixin,\n serializers.Serializer\n):\n format = serializers.CharField(validators=[TimeWithoutTimeZoneFormatValidator()])\n\n\nclass DisplayOptionsMappingSerializer(\n MathesarErrorMessageMixin,\n ReadWritePolymorphicSerializerMappingMixin,\n serializers.Serializer\n):\n serializers_mapping = {\n MathesarTypeIdentifier.BOOLEAN.value: BooleanDisplayOptionSerializer,\n MathesarTypeIdentifier.NUMBER.value: NumberDisplayOptionSerializer,\n ('timestamp with time zone',\n MathesarTypeIdentifier.DATETIME.value): TimestampWithTimezoneDisplayOptionSerializer,\n ('timestamp without time zone',\n MathesarTypeIdentifier.DATETIME.value): TimestampWithoutTimezoneDisplayOptionSerializer,\n ('date', MathesarTypeIdentifier.DATETIME.value): DateDisplayOptionSerializer,\n ('time with time zone', MathesarTypeIdentifier.DATETIME.value): TimeWithTimezoneDisplayOptionSerializer,\n ('time without time zone', MathesarTypeIdentifier.DATETIME.value): TimeWithoutTimezoneDisplayOptionSerializer,\n }\n\n def get_mapping_field(self):\n mathesar_type = get_mathesar_type_from_db_type(self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY])\n if mathesar_type == MathesarTypeIdentifier.DATETIME.value:\n return self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY].lower(), mathesar_type\n else:\n return mathesar_type\n", "path": "mathesar/api/serializers/shared_serializers.py"}]}
3,632
626
gh_patches_debug_31667
rasdani/github-patches
git_diff
networkx__networkx-1317
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> xrange vs python 3 Searching the networkx code for `xrange` I see it's used in some "Shapefile" related code and tests. Should this be updated for python 3 compatibility, and is it not tested in the TravisCI testing? </issue> <code> [start of networkx/readwrite/nx_shp.py] 1 """ 2 ********* 3 Shapefile 4 ********* 5 6 Generates a networkx.DiGraph from point and line shapefiles. 7 8 "The Esri Shapefile or simply a shapefile is a popular geospatial vector 9 data format for geographic information systems software. It is developed 10 and regulated by Esri as a (mostly) open specification for data 11 interoperability among Esri and other software products." 12 See http://en.wikipedia.org/wiki/Shapefile for additional information. 13 """ 14 # Copyright (C) 2004-2010 by 15 # Ben Reilly <[email protected]> 16 # Aric Hagberg <[email protected]> 17 # Dan Schult <[email protected]> 18 # Pieter Swart <[email protected]> 19 # All rights reserved. 20 # BSD license. 21 import networkx as nx 22 __author__ = """Ben Reilly ([email protected])""" 23 __all__ = ['read_shp', 'write_shp'] 24 25 26 def read_shp(path): 27 """Generates a networkx.DiGraph from shapefiles. Point geometries are 28 translated into nodes, lines into edges. Coordinate tuples are used as 29 keys. Attributes are preserved, line geometries are simplified into start 30 and end coordinates. Accepts a single shapefile or directory of many 31 shapefiles. 32 33 "The Esri Shapefile or simply a shapefile is a popular geospatial vector 34 data format for geographic information systems software [1]_." 35 36 Parameters 37 ---------- 38 path : file or string 39 File, directory, or filename to read. 40 41 Returns 42 ------- 43 G : NetworkX graph 44 45 Examples 46 -------- 47 >>> G=nx.read_shp('test.shp') # doctest: +SKIP 48 49 References 50 ---------- 51 .. [1] http://en.wikipedia.org/wiki/Shapefile 52 """ 53 try: 54 from osgeo import ogr 55 except ImportError: 56 raise ImportError("read_shp requires OGR: http://www.gdal.org/") 57 58 net = nx.DiGraph() 59 60 def getfieldinfo(lyr, feature, flds): 61 f = feature 62 return [f.GetField(f.GetFieldIndex(x)) for x in flds] 63 64 def addlyr(lyr, fields): 65 for findex in range(lyr.GetFeatureCount()): 66 f = lyr.GetFeature(findex) 67 flddata = getfieldinfo(lyr, f, fields) 68 g = f.geometry() 69 attributes = dict(zip(fields, flddata)) 70 attributes["ShpName"] = lyr.GetName() 71 if g.GetGeometryType() == 1: # point 72 net.add_node((g.GetPoint_2D(0)), attributes) 73 if g.GetGeometryType() == 2: # linestring 74 attributes["Wkb"] = g.ExportToWkb() 75 attributes["Wkt"] = g.ExportToWkt() 76 attributes["Json"] = g.ExportToJson() 77 last = g.GetPointCount() - 1 78 net.add_edge(g.GetPoint_2D(0), g.GetPoint_2D(last), attributes) 79 80 if isinstance(path, str): 81 shp = ogr.Open(path) 82 lyrcount = shp.GetLayerCount() # multiple layers indicate a directory 83 for lyrindex in range(lyrcount): 84 lyr = shp.GetLayerByIndex(lyrindex) 85 flds = [x.GetName() for x in lyr.schema] 86 addlyr(lyr, flds) 87 return net 88 89 90 def write_shp(G, outdir): 91 """Writes a networkx.DiGraph to two shapefiles, edges and nodes. 92 Nodes and edges are expected to have a Well Known Binary (Wkb) or 93 Well Known Text (Wkt) key in order to generate geometries. Also 94 acceptable are nodes with a numeric tuple key (x,y). 95 96 "The Esri Shapefile or simply a shapefile is a popular geospatial vector 97 data format for geographic information systems software [1]_." 98 99 Parameters 100 ---------- 101 outdir : directory path 102 Output directory for the two shapefiles. 103 104 Returns 105 ------- 106 None 107 108 Examples 109 -------- 110 nx.write_shp(digraph, '/shapefiles') # doctest +SKIP 111 112 References 113 ---------- 114 .. [1] http://en.wikipedia.org/wiki/Shapefile 115 """ 116 try: 117 from osgeo import ogr 118 except ImportError: 119 raise ImportError("write_shp requires OGR: http://www.gdal.org/") 120 # easier to debug in python if ogr throws exceptions 121 ogr.UseExceptions() 122 123 def netgeometry(key, data): 124 if 'Wkb' in data: 125 geom = ogr.CreateGeometryFromWkb(data['Wkb']) 126 elif 'Wkt' in data: 127 geom = ogr.CreateGeometryFromWkt(data['Wkt']) 128 elif type(key[0]).__name__ == 'tuple': # edge keys are packed tuples 129 geom = ogr.Geometry(ogr.wkbLineString) 130 _from, _to = key[0], key[1] 131 try: 132 geom.SetPoint(0, *_from) 133 geom.SetPoint(1, *_to) 134 except TypeError: 135 # assume user used tuple of int and choked ogr 136 _ffrom = [float(x) for x in _from] 137 _fto = [float(x) for x in _to] 138 geom.SetPoint(0, *_ffrom) 139 geom.SetPoint(1, *_fto) 140 else: 141 geom = ogr.Geometry(ogr.wkbPoint) 142 try: 143 geom.SetPoint(0, *key) 144 except TypeError: 145 # assume user used tuple of int and choked ogr 146 fkey = [float(x) for x in key] 147 geom.SetPoint(0, *fkey) 148 149 return geom 150 151 # Create_feature with new optional attributes arg (should be dict type) 152 def create_feature(geometry, lyr, attributes=None): 153 feature = ogr.Feature(lyr.GetLayerDefn()) 154 feature.SetGeometry(g) 155 if attributes != None: 156 # Loop through attributes, assigning data to each field 157 for field, data in attributes.items(): 158 feature.SetField(field, data) 159 lyr.CreateFeature(feature) 160 feature.Destroy() 161 162 drv = ogr.GetDriverByName("ESRI Shapefile") 163 shpdir = drv.CreateDataSource(outdir) 164 # delete pre-existing output first otherwise ogr chokes 165 try: 166 shpdir.DeleteLayer("nodes") 167 except: 168 pass 169 nodes = shpdir.CreateLayer("nodes", None, ogr.wkbPoint) 170 for n in G: 171 data = G.node[n] or {} 172 g = netgeometry(n, data) 173 create_feature(g, nodes) 174 try: 175 shpdir.DeleteLayer("edges") 176 except: 177 pass 178 edges = shpdir.CreateLayer("edges", None, ogr.wkbLineString) 179 180 # New edge attribute write support merged into edge loop 181 fields = {} # storage for field names and their data types 182 attributes = {} # storage for attribute data (indexed by field names) 183 184 # Conversion dict between python and ogr types 185 OGRTypes = {int: ogr.OFTInteger, str: ogr.OFTString, float: ogr.OFTReal} 186 187 # Edge loop 188 for e in G.edges(data=True): 189 data = G.get_edge_data(*e) 190 g = netgeometry(e, data) 191 # Loop through attribute data in edges 192 for key, data in e[2].items(): 193 # Reject spatial data not required for attribute table 194 if (key != 'Json' and key != 'Wkt' and key != 'Wkb' 195 and key != 'ShpName'): 196 # For all edges check/add field and data type to fields dict 197 if key not in fields: 198 # Field not in previous edges so add to dict 199 if type(data) in OGRTypes: 200 fields[key] = OGRTypes[type(data)] 201 else: 202 # Data type not supported, default to string (char 80) 203 fields[key] = ogr.OFTString 204 # Create the new field 205 newfield = ogr.FieldDefn(key, fields[key]) 206 edges.CreateField(newfield) 207 # Store the data from new field to dict for CreateLayer() 208 attributes[key] = data 209 else: 210 # Field already exists, add data to dict for CreateLayer() 211 attributes[key] = data 212 # Create the feature with, passing new attribute data 213 create_feature(g, edges, attributes) 214 215 nodes, edges = None, None 216 217 218 # fixture for nose tests 219 def setup_module(module): 220 from nose import SkipTest 221 try: 222 import ogr 223 except: 224 raise SkipTest("OGR not available") 225 [end of networkx/readwrite/nx_shp.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/networkx/readwrite/nx_shp.py b/networkx/readwrite/nx_shp.py --- a/networkx/readwrite/nx_shp.py +++ b/networkx/readwrite/nx_shp.py @@ -55,16 +55,15 @@ except ImportError: raise ImportError("read_shp requires OGR: http://www.gdal.org/") - net = nx.DiGraph() - - def getfieldinfo(lyr, feature, flds): - f = feature - return [f.GetField(f.GetFieldIndex(x)) for x in flds] + if not isinstance(path, str): + return - def addlyr(lyr, fields): - for findex in range(lyr.GetFeatureCount()): - f = lyr.GetFeature(findex) - flddata = getfieldinfo(lyr, f, fields) + net = nx.DiGraph() + shp = ogr.Open(path) + for lyr in shp: + fields = [x.GetName() for x in lyr.schema] + for f in lyr: + flddata = [f.GetField(f.GetFieldIndex(x)) for x in fields] g = f.geometry() attributes = dict(zip(fields, flddata)) attributes["ShpName"] = lyr.GetName() @@ -76,14 +75,6 @@ attributes["Json"] = g.ExportToJson() last = g.GetPointCount() - 1 net.add_edge(g.GetPoint_2D(0), g.GetPoint_2D(last), attributes) - - if isinstance(path, str): - shp = ogr.Open(path) - lyrcount = shp.GetLayerCount() # multiple layers indicate a directory - for lyrindex in range(lyrcount): - lyr = shp.GetLayerByIndex(lyrindex) - flds = [x.GetName() for x in lyr.schema] - addlyr(lyr, flds) return net @@ -168,7 +159,7 @@ pass nodes = shpdir.CreateLayer("nodes", None, ogr.wkbPoint) for n in G: - data = G.node[n] or {} + data = G.node[n] g = netgeometry(n, data) create_feature(g, nodes) try:
{"golden_diff": "diff --git a/networkx/readwrite/nx_shp.py b/networkx/readwrite/nx_shp.py\n--- a/networkx/readwrite/nx_shp.py\n+++ b/networkx/readwrite/nx_shp.py\n@@ -55,16 +55,15 @@\n except ImportError:\n raise ImportError(\"read_shp requires OGR: http://www.gdal.org/\")\n \n- net = nx.DiGraph()\n-\n- def getfieldinfo(lyr, feature, flds):\n- f = feature\n- return [f.GetField(f.GetFieldIndex(x)) for x in flds]\n+ if not isinstance(path, str):\n+ return\n \n- def addlyr(lyr, fields):\n- for findex in range(lyr.GetFeatureCount()):\n- f = lyr.GetFeature(findex)\n- flddata = getfieldinfo(lyr, f, fields)\n+ net = nx.DiGraph()\n+ shp = ogr.Open(path)\n+ for lyr in shp:\n+ fields = [x.GetName() for x in lyr.schema]\n+ for f in lyr:\n+ flddata = [f.GetField(f.GetFieldIndex(x)) for x in fields]\n g = f.geometry()\n attributes = dict(zip(fields, flddata))\n attributes[\"ShpName\"] = lyr.GetName()\n@@ -76,14 +75,6 @@\n attributes[\"Json\"] = g.ExportToJson()\n last = g.GetPointCount() - 1\n net.add_edge(g.GetPoint_2D(0), g.GetPoint_2D(last), attributes)\n-\n- if isinstance(path, str):\n- shp = ogr.Open(path)\n- lyrcount = shp.GetLayerCount() # multiple layers indicate a directory\n- for lyrindex in range(lyrcount):\n- lyr = shp.GetLayerByIndex(lyrindex)\n- flds = [x.GetName() for x in lyr.schema]\n- addlyr(lyr, flds)\n return net\n \n \n@@ -168,7 +159,7 @@\n pass\n nodes = shpdir.CreateLayer(\"nodes\", None, ogr.wkbPoint)\n for n in G:\n- data = G.node[n] or {}\n+ data = G.node[n]\n g = netgeometry(n, data)\n create_feature(g, nodes)\n try:\n", "issue": "xrange vs python 3\nSearching the networkx code for `xrange` I see it's used in some \"Shapefile\" related code and tests. Should this be updated for python 3 compatibility, and is it not tested in the TravisCI testing?\n\n", "before_files": [{"content": "\"\"\"\n*********\nShapefile\n*********\n\nGenerates a networkx.DiGraph from point and line shapefiles.\n\n\"The Esri Shapefile or simply a shapefile is a popular geospatial vector\ndata format for geographic information systems software. It is developed\nand regulated by Esri as a (mostly) open specification for data\ninteroperability among Esri and other software products.\"\nSee http://en.wikipedia.org/wiki/Shapefile for additional information.\n\"\"\"\n# Copyright (C) 2004-2010 by\n# Ben Reilly <[email protected]>\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\nimport networkx as nx\n__author__ = \"\"\"Ben Reilly ([email protected])\"\"\"\n__all__ = ['read_shp', 'write_shp']\n\n\ndef read_shp(path):\n \"\"\"Generates a networkx.DiGraph from shapefiles. Point geometries are\n translated into nodes, lines into edges. Coordinate tuples are used as\n keys. Attributes are preserved, line geometries are simplified into start\n and end coordinates. Accepts a single shapefile or directory of many\n shapefiles.\n\n \"The Esri Shapefile or simply a shapefile is a popular geospatial vector\n data format for geographic information systems software [1]_.\"\n\n Parameters\n ----------\n path : file or string\n File, directory, or filename to read.\n\n Returns\n -------\n G : NetworkX graph\n\n Examples\n --------\n >>> G=nx.read_shp('test.shp') # doctest: +SKIP\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Shapefile\n \"\"\"\n try:\n from osgeo import ogr\n except ImportError:\n raise ImportError(\"read_shp requires OGR: http://www.gdal.org/\")\n\n net = nx.DiGraph()\n\n def getfieldinfo(lyr, feature, flds):\n f = feature\n return [f.GetField(f.GetFieldIndex(x)) for x in flds]\n\n def addlyr(lyr, fields):\n for findex in range(lyr.GetFeatureCount()):\n f = lyr.GetFeature(findex)\n flddata = getfieldinfo(lyr, f, fields)\n g = f.geometry()\n attributes = dict(zip(fields, flddata))\n attributes[\"ShpName\"] = lyr.GetName()\n if g.GetGeometryType() == 1: # point\n net.add_node((g.GetPoint_2D(0)), attributes)\n if g.GetGeometryType() == 2: # linestring\n attributes[\"Wkb\"] = g.ExportToWkb()\n attributes[\"Wkt\"] = g.ExportToWkt()\n attributes[\"Json\"] = g.ExportToJson()\n last = g.GetPointCount() - 1\n net.add_edge(g.GetPoint_2D(0), g.GetPoint_2D(last), attributes)\n\n if isinstance(path, str):\n shp = ogr.Open(path)\n lyrcount = shp.GetLayerCount() # multiple layers indicate a directory\n for lyrindex in range(lyrcount):\n lyr = shp.GetLayerByIndex(lyrindex)\n flds = [x.GetName() for x in lyr.schema]\n addlyr(lyr, flds)\n return net\n\n\ndef write_shp(G, outdir):\n \"\"\"Writes a networkx.DiGraph to two shapefiles, edges and nodes.\n Nodes and edges are expected to have a Well Known Binary (Wkb) or\n Well Known Text (Wkt) key in order to generate geometries. Also\n acceptable are nodes with a numeric tuple key (x,y).\n\n \"The Esri Shapefile or simply a shapefile is a popular geospatial vector\n data format for geographic information systems software [1]_.\"\n\n Parameters\n ----------\n outdir : directory path\n Output directory for the two shapefiles.\n\n Returns\n -------\n None\n\n Examples\n --------\n nx.write_shp(digraph, '/shapefiles') # doctest +SKIP\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Shapefile\n \"\"\"\n try:\n from osgeo import ogr\n except ImportError:\n raise ImportError(\"write_shp requires OGR: http://www.gdal.org/\")\n # easier to debug in python if ogr throws exceptions\n ogr.UseExceptions()\n\n def netgeometry(key, data):\n if 'Wkb' in data:\n geom = ogr.CreateGeometryFromWkb(data['Wkb'])\n elif 'Wkt' in data:\n geom = ogr.CreateGeometryFromWkt(data['Wkt'])\n elif type(key[0]).__name__ == 'tuple': # edge keys are packed tuples\n geom = ogr.Geometry(ogr.wkbLineString)\n _from, _to = key[0], key[1]\n try:\n geom.SetPoint(0, *_from)\n geom.SetPoint(1, *_to)\n except TypeError:\n # assume user used tuple of int and choked ogr\n _ffrom = [float(x) for x in _from]\n _fto = [float(x) for x in _to]\n geom.SetPoint(0, *_ffrom)\n geom.SetPoint(1, *_fto)\n else:\n geom = ogr.Geometry(ogr.wkbPoint)\n try:\n geom.SetPoint(0, *key)\n except TypeError:\n # assume user used tuple of int and choked ogr\n fkey = [float(x) for x in key]\n geom.SetPoint(0, *fkey)\n\n return geom\n\n # Create_feature with new optional attributes arg (should be dict type)\n def create_feature(geometry, lyr, attributes=None):\n feature = ogr.Feature(lyr.GetLayerDefn())\n feature.SetGeometry(g)\n if attributes != None:\n # Loop through attributes, assigning data to each field\n for field, data in attributes.items():\n feature.SetField(field, data)\n lyr.CreateFeature(feature)\n feature.Destroy()\n\n drv = ogr.GetDriverByName(\"ESRI Shapefile\")\n shpdir = drv.CreateDataSource(outdir)\n # delete pre-existing output first otherwise ogr chokes\n try:\n shpdir.DeleteLayer(\"nodes\")\n except:\n pass\n nodes = shpdir.CreateLayer(\"nodes\", None, ogr.wkbPoint)\n for n in G:\n data = G.node[n] or {}\n g = netgeometry(n, data)\n create_feature(g, nodes)\n try:\n shpdir.DeleteLayer(\"edges\")\n except:\n pass\n edges = shpdir.CreateLayer(\"edges\", None, ogr.wkbLineString)\n\n # New edge attribute write support merged into edge loop\n fields = {} # storage for field names and their data types\n attributes = {} # storage for attribute data (indexed by field names)\n\n # Conversion dict between python and ogr types\n OGRTypes = {int: ogr.OFTInteger, str: ogr.OFTString, float: ogr.OFTReal}\n\n # Edge loop\n for e in G.edges(data=True):\n data = G.get_edge_data(*e)\n g = netgeometry(e, data)\n # Loop through attribute data in edges\n for key, data in e[2].items():\n # Reject spatial data not required for attribute table\n if (key != 'Json' and key != 'Wkt' and key != 'Wkb'\n and key != 'ShpName'):\n # For all edges check/add field and data type to fields dict\n if key not in fields:\n # Field not in previous edges so add to dict\n if type(data) in OGRTypes:\n fields[key] = OGRTypes[type(data)]\n else:\n # Data type not supported, default to string (char 80)\n fields[key] = ogr.OFTString\n # Create the new field\n newfield = ogr.FieldDefn(key, fields[key])\n edges.CreateField(newfield)\n # Store the data from new field to dict for CreateLayer()\n attributes[key] = data\n else:\n # Field already exists, add data to dict for CreateLayer()\n attributes[key] = data\n # Create the feature with, passing new attribute data\n create_feature(g, edges, attributes)\n\n nodes, edges = None, None\n\n\n# fixture for nose tests\ndef setup_module(module):\n from nose import SkipTest\n try:\n import ogr\n except:\n raise SkipTest(\"OGR not available\")\n", "path": "networkx/readwrite/nx_shp.py"}]}
3,080
525
gh_patches_debug_27616
rasdani/github-patches
git_diff
NVIDIA__NVFlare-383
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Yaml loader should be replaced by safe_loader or other more secure loader To load yaml files from unknown source, we should avoid using yaml's loader. A better way is to use either safe_loader or other mechanism. </issue> <code> [start of nvflare/ha/overseer/utils.py] 1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 import uuid 17 from datetime import datetime, timedelta 18 19 import yaml 20 21 OVERSEER_STORE = os.environ.get("OVERSEER_STORE") 22 23 if OVERSEER_STORE == "REDIS": 24 from .redis_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp 25 elif OVERSEER_STORE == "SQL": 26 from .sql_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp 27 elif OVERSEER_STORE == "MEM": 28 from .mem_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp 29 else: 30 print("Using default STORE (MEM)") 31 from .mem_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp # noqa 32 33 34 def load_privilege(): 35 privilege_file = os.environ.get("AUTHZ_FILE", "privilege.yml") 36 try: 37 privilege = yaml.load(open(privilege_file, "tr"), Loader=yaml.Loader) 38 except: 39 privilege = dict() 40 return privilege 41 42 43 def update_sp_state(project, now, heartbeat_timeout=10): 44 valid_starting = now - timedelta(seconds=heartbeat_timeout) 45 # mark all late SP as offline and not primary 46 # print(f"{now=} {valid_starting=}") 47 for sp in get_all_sp(project): 48 if datetime.fromisoformat(sp["last_heartbeat"]) < valid_starting: 49 sp["state"] = "offline" 50 sp["primary"] = False 51 else: 52 sp["state"] = "online" 53 update_sp(sp) 54 55 56 def simple_PSP_policy(incoming_sp, now): 57 """Find the primary SP (PSP). 58 59 If there is no PSP or current PSP timeout, choose one without heartbeat timeout. 60 """ 61 project = incoming_sp["project"] 62 sp = get_sp_by(dict(project=project, sp_end_point=incoming_sp["sp_end_point"])) 63 if sp: 64 sp["last_heartbeat"] = now.isoformat() 65 update_sp(sp) 66 else: 67 update_sp( 68 dict( 69 project=incoming_sp["project"], 70 sp_end_point=incoming_sp["sp_end_point"], 71 last_heartbeat=now.isoformat(), 72 state="online", 73 primary=False, 74 ) 75 ) 76 77 psp = get_primary_sp(project) 78 if not psp: 79 psp = get_sp_by(dict(project=project, state="online")) 80 if psp: 81 print(f"{psp['sp_end_point']} online") 82 psp["primary"] = True 83 psp["service_session_id"] = str(uuid.uuid4()) 84 update_sp(psp) 85 86 return psp 87 88 89 def promote_sp(sp): 90 psp = get_sp_by(sp) 91 project = sp["project"] 92 sp_end_point = sp["sp_end_point"] 93 if psp and psp["state"] == "online": 94 current_psp = get_primary_sp(project) 95 if all(current_psp[k] == v for k, v in sp.items()): 96 return True, f"Same sp_end_point, no need to promote {sp_end_point}." 97 psp["primary"] = True 98 current_psp["primary"] = False 99 psp["service_session_id"] = str(uuid.uuid4()) 100 print(f"{psp['sp_end_point']} promoted") 101 print(f"{current_psp['sp_end_point']} demoted") 102 update_sp(psp) 103 update_sp(current_psp) 104 return False, psp 105 else: 106 return True, f"Unable to promote {sp_end_point}, either offline or not registered." 107 [end of nvflare/ha/overseer/utils.py] [start of nvflare/lighter/study.py] 1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from __future__ import absolute_import 16 17 import argparse 18 import json 19 import os 20 from datetime import datetime 21 22 import yaml 23 24 25 def get_input(prompt, item_list, multiple=False): 26 while True: 27 answer = input(prompt) 28 result = None 29 if multiple: 30 try: 31 if answer == "": 32 print("None of the choices is selected.") 33 result = [] 34 else: 35 trimmed = set(answer.split(",")) 36 result = [item_list[int(i)] for i in trimmed] 37 print(f"{result} selected after duplicate inputs removed.") 38 except BaseException: 39 print("Input contains errors (non-integer or out of index range)") 40 else: 41 try: 42 result = item_list[int(answer)] 43 except ValueError: 44 print(f"Expect integer but got {answer.__class__.__name__}") 45 except IndexError: 46 print("Number out of index range") 47 if result is not None: 48 break 49 return result 50 51 52 def get_date_input(prompt): 53 while True: 54 answer = input(prompt) 55 try: 56 result = datetime.strptime(answer, "%m/%d/%Y").date().isoformat() 57 break 58 except: 59 print(f"Expect MM/DD/YYYY but got {answer}") 60 return result 61 62 63 def main(): 64 parser = argparse.ArgumentParser() 65 parser.add_argument("-p", "--project_file", type=str, default="project.yml", help="file to describe FL project") 66 67 args = parser.parse_args() 68 69 current_path = os.getcwd() 70 71 # main project file 72 project_file = args.project_file 73 project_full_path = os.path.join(current_path, project_file) 74 if not os.path.exists(project_full_path): 75 print(f"{project_full_path} not found. Running study requires that file.") 76 exit(0) 77 78 project = yaml.load(open(project_full_path, "r"), Loader=yaml.Loader) 79 api_version = project.get("api_version") 80 if api_version not in [3]: 81 raise ValueError(f"API version expected 3 but found {api_version}") 82 83 admin_list = list() 84 client_list = list() 85 for p in project.get("participants"): 86 if p.get("type") == "admin": 87 admin_list.append(p.get("name")) 88 elif p.get("type") == "client": 89 client_list.append(p.get("name")) 90 91 admin_list_string = ", ".join([f"{i}:{v}" for i, v in enumerate(admin_list)]) 92 client_list_string = ", ".join([f"{i}:{v}" for i, v in enumerate(client_list)]) 93 94 name = input("Please enter the name of this study: ") 95 description = input("and brief description: ") 96 contact = get_input(f"select one admin for contact {admin_list_string}: ", admin_list) 97 98 participating_admins = get_input( 99 f"select participating_admins admins (separated by ',') {admin_list_string} ", admin_list, multiple=True 100 ) 101 participating_clients = get_input( 102 f"select participating clients (separated by ',') {client_list_string} ", client_list, multiple=True 103 ) 104 participating_clients_string = ", ".join([f"{i}:{v}" for i, v in enumerate(participating_clients)]) 105 # reviewer_dict = dict() 106 # for admin in participating_admins: 107 # reviewed_clients = get_input( 108 # f"what clients will reviewer {admin} review {participating_clients_string} ", 109 # participating_clients, 110 # multiple=True, 111 # ) 112 # reviewer_dict[admin] = reviewed_clients 113 start_date = get_date_input("input start date of this study (MM/DD/YYYY): ") 114 end_date = get_date_input("input end date of this study (MM/DD/YYYY): ") 115 116 study_config = dict( 117 name=name, 118 description=description, 119 contact=contact, 120 participating_admins=participating_admins, 121 participating_clients=participating_clients, 122 # reviewers=reviewer_dict, 123 start_date=start_date, 124 end_date=end_date, 125 ) 126 with open(f"{name}.json", "wt") as f: 127 f.write(json.dumps(study_config, indent=2)) 128 print(f"study config file was generated at {name}.json") 129 130 131 if __name__ == "__main__": 132 main() 133 [end of nvflare/lighter/study.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nvflare/ha/overseer/utils.py b/nvflare/ha/overseer/utils.py --- a/nvflare/ha/overseer/utils.py +++ b/nvflare/ha/overseer/utils.py @@ -16,7 +16,7 @@ import uuid from datetime import datetime, timedelta -import yaml +from nvflare.lighter.utils import load_yaml OVERSEER_STORE = os.environ.get("OVERSEER_STORE") @@ -34,7 +34,7 @@ def load_privilege(): privilege_file = os.environ.get("AUTHZ_FILE", "privilege.yml") try: - privilege = yaml.load(open(privilege_file, "tr"), Loader=yaml.Loader) + privilege = load_yaml(privilege_file) except: privilege = dict() return privilege diff --git a/nvflare/lighter/study.py b/nvflare/lighter/study.py --- a/nvflare/lighter/study.py +++ b/nvflare/lighter/study.py @@ -19,7 +19,7 @@ import os from datetime import datetime -import yaml +from nvflare.lighter.utils import load_yaml def get_input(prompt, item_list, multiple=False): @@ -75,7 +75,7 @@ print(f"{project_full_path} not found. Running study requires that file.") exit(0) - project = yaml.load(open(project_full_path, "r"), Loader=yaml.Loader) + project = load_yaml(project_full_path) api_version = project.get("api_version") if api_version not in [3]: raise ValueError(f"API version expected 3 but found {api_version}")
{"golden_diff": "diff --git a/nvflare/ha/overseer/utils.py b/nvflare/ha/overseer/utils.py\n--- a/nvflare/ha/overseer/utils.py\n+++ b/nvflare/ha/overseer/utils.py\n@@ -16,7 +16,7 @@\n import uuid\n from datetime import datetime, timedelta\n \n-import yaml\n+from nvflare.lighter.utils import load_yaml\n \n OVERSEER_STORE = os.environ.get(\"OVERSEER_STORE\")\n \n@@ -34,7 +34,7 @@\n def load_privilege():\n privilege_file = os.environ.get(\"AUTHZ_FILE\", \"privilege.yml\")\n try:\n- privilege = yaml.load(open(privilege_file, \"tr\"), Loader=yaml.Loader)\n+ privilege = load_yaml(privilege_file)\n except:\n privilege = dict()\n return privilege\ndiff --git a/nvflare/lighter/study.py b/nvflare/lighter/study.py\n--- a/nvflare/lighter/study.py\n+++ b/nvflare/lighter/study.py\n@@ -19,7 +19,7 @@\n import os\n from datetime import datetime\n \n-import yaml\n+from nvflare.lighter.utils import load_yaml\n \n \n def get_input(prompt, item_list, multiple=False):\n@@ -75,7 +75,7 @@\n print(f\"{project_full_path} not found. Running study requires that file.\")\n exit(0)\n \n- project = yaml.load(open(project_full_path, \"r\"), Loader=yaml.Loader)\n+ project = load_yaml(project_full_path)\n api_version = project.get(\"api_version\")\n if api_version not in [3]:\n raise ValueError(f\"API version expected 3 but found {api_version}\")\n", "issue": "Yaml loader should be replaced by safe_loader or other more secure loader\nTo load yaml files from unknown source, we should avoid using yaml's loader. A better way is to use either safe_loader or other mechanism.\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport uuid\nfrom datetime import datetime, timedelta\n\nimport yaml\n\nOVERSEER_STORE = os.environ.get(\"OVERSEER_STORE\")\n\nif OVERSEER_STORE == \"REDIS\":\n from .redis_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp\nelif OVERSEER_STORE == \"SQL\":\n from .sql_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp\nelif OVERSEER_STORE == \"MEM\":\n from .mem_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp\nelse:\n print(\"Using default STORE (MEM)\")\n from .mem_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp # noqa\n\n\ndef load_privilege():\n privilege_file = os.environ.get(\"AUTHZ_FILE\", \"privilege.yml\")\n try:\n privilege = yaml.load(open(privilege_file, \"tr\"), Loader=yaml.Loader)\n except:\n privilege = dict()\n return privilege\n\n\ndef update_sp_state(project, now, heartbeat_timeout=10):\n valid_starting = now - timedelta(seconds=heartbeat_timeout)\n # mark all late SP as offline and not primary\n # print(f\"{now=} {valid_starting=}\")\n for sp in get_all_sp(project):\n if datetime.fromisoformat(sp[\"last_heartbeat\"]) < valid_starting:\n sp[\"state\"] = \"offline\"\n sp[\"primary\"] = False\n else:\n sp[\"state\"] = \"online\"\n update_sp(sp)\n\n\ndef simple_PSP_policy(incoming_sp, now):\n \"\"\"Find the primary SP (PSP).\n\n If there is no PSP or current PSP timeout, choose one without heartbeat timeout.\n \"\"\"\n project = incoming_sp[\"project\"]\n sp = get_sp_by(dict(project=project, sp_end_point=incoming_sp[\"sp_end_point\"]))\n if sp:\n sp[\"last_heartbeat\"] = now.isoformat()\n update_sp(sp)\n else:\n update_sp(\n dict(\n project=incoming_sp[\"project\"],\n sp_end_point=incoming_sp[\"sp_end_point\"],\n last_heartbeat=now.isoformat(),\n state=\"online\",\n primary=False,\n )\n )\n\n psp = get_primary_sp(project)\n if not psp:\n psp = get_sp_by(dict(project=project, state=\"online\"))\n if psp:\n print(f\"{psp['sp_end_point']} online\")\n psp[\"primary\"] = True\n psp[\"service_session_id\"] = str(uuid.uuid4())\n update_sp(psp)\n\n return psp\n\n\ndef promote_sp(sp):\n psp = get_sp_by(sp)\n project = sp[\"project\"]\n sp_end_point = sp[\"sp_end_point\"]\n if psp and psp[\"state\"] == \"online\":\n current_psp = get_primary_sp(project)\n if all(current_psp[k] == v for k, v in sp.items()):\n return True, f\"Same sp_end_point, no need to promote {sp_end_point}.\"\n psp[\"primary\"] = True\n current_psp[\"primary\"] = False\n psp[\"service_session_id\"] = str(uuid.uuid4())\n print(f\"{psp['sp_end_point']} promoted\")\n print(f\"{current_psp['sp_end_point']} demoted\")\n update_sp(psp)\n update_sp(current_psp)\n return False, psp\n else:\n return True, f\"Unable to promote {sp_end_point}, either offline or not registered.\"\n", "path": "nvflare/ha/overseer/utils.py"}, {"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport argparse\nimport json\nimport os\nfrom datetime import datetime\n\nimport yaml\n\n\ndef get_input(prompt, item_list, multiple=False):\n while True:\n answer = input(prompt)\n result = None\n if multiple:\n try:\n if answer == \"\":\n print(\"None of the choices is selected.\")\n result = []\n else:\n trimmed = set(answer.split(\",\"))\n result = [item_list[int(i)] for i in trimmed]\n print(f\"{result} selected after duplicate inputs removed.\")\n except BaseException:\n print(\"Input contains errors (non-integer or out of index range)\")\n else:\n try:\n result = item_list[int(answer)]\n except ValueError:\n print(f\"Expect integer but got {answer.__class__.__name__}\")\n except IndexError:\n print(\"Number out of index range\")\n if result is not None:\n break\n return result\n\n\ndef get_date_input(prompt):\n while True:\n answer = input(prompt)\n try:\n result = datetime.strptime(answer, \"%m/%d/%Y\").date().isoformat()\n break\n except:\n print(f\"Expect MM/DD/YYYY but got {answer}\")\n return result\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\", \"--project_file\", type=str, default=\"project.yml\", help=\"file to describe FL project\")\n\n args = parser.parse_args()\n\n current_path = os.getcwd()\n\n # main project file\n project_file = args.project_file\n project_full_path = os.path.join(current_path, project_file)\n if not os.path.exists(project_full_path):\n print(f\"{project_full_path} not found. Running study requires that file.\")\n exit(0)\n\n project = yaml.load(open(project_full_path, \"r\"), Loader=yaml.Loader)\n api_version = project.get(\"api_version\")\n if api_version not in [3]:\n raise ValueError(f\"API version expected 3 but found {api_version}\")\n\n admin_list = list()\n client_list = list()\n for p in project.get(\"participants\"):\n if p.get(\"type\") == \"admin\":\n admin_list.append(p.get(\"name\"))\n elif p.get(\"type\") == \"client\":\n client_list.append(p.get(\"name\"))\n\n admin_list_string = \", \".join([f\"{i}:{v}\" for i, v in enumerate(admin_list)])\n client_list_string = \", \".join([f\"{i}:{v}\" for i, v in enumerate(client_list)])\n\n name = input(\"Please enter the name of this study: \")\n description = input(\"and brief description: \")\n contact = get_input(f\"select one admin for contact {admin_list_string}: \", admin_list)\n\n participating_admins = get_input(\n f\"select participating_admins admins (separated by ',') {admin_list_string} \", admin_list, multiple=True\n )\n participating_clients = get_input(\n f\"select participating clients (separated by ',') {client_list_string} \", client_list, multiple=True\n )\n participating_clients_string = \", \".join([f\"{i}:{v}\" for i, v in enumerate(participating_clients)])\n # reviewer_dict = dict()\n # for admin in participating_admins:\n # reviewed_clients = get_input(\n # f\"what clients will reviewer {admin} review {participating_clients_string} \",\n # participating_clients,\n # multiple=True,\n # )\n # reviewer_dict[admin] = reviewed_clients\n start_date = get_date_input(\"input start date of this study (MM/DD/YYYY): \")\n end_date = get_date_input(\"input end date of this study (MM/DD/YYYY): \")\n\n study_config = dict(\n name=name,\n description=description,\n contact=contact,\n participating_admins=participating_admins,\n participating_clients=participating_clients,\n # reviewers=reviewer_dict,\n start_date=start_date,\n end_date=end_date,\n )\n with open(f\"{name}.json\", \"wt\") as f:\n f.write(json.dumps(study_config, indent=2))\n print(f\"study config file was generated at {name}.json\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "nvflare/lighter/study.py"}]}
3,097
375
gh_patches_debug_17658
rasdani/github-patches
git_diff
pantsbuild__pants-12060
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `./pants run ...` does not work for non-venv-mode `pex_binary` targets that re-exec sys.argv[0]. In short, the `run` goal executes via ~: ``` export PEX_PATH=/path/to/requirements.pex export PEX_EXTRA_SYS_PATH=/path/to/source_root_1:/path/to/cource_root2 ./only-contains-entry-point-metadata.pex ``` If the executed code then tries to re-execute via argv[0] (the PEX file itself), then sys.path scrubbing is engaged which strips back off the PEX_PATH and PEX_EXTRA_SYS_PATH triggered sys.path additions since those two env vars are also stripped by default. Either Pants needs to expose the `--no-strip-pex-env` option as a `pex_binary` parameter or else it needs to set this option for `pants run` unconditionally. The concrete example of apps that re-exec via sys.argv[0] are django manage.py apps. See https://github.com/pantsbuild/pex/issues/1349 where @asherf discovered the issue and the mechanism behind it was all worked out. </issue> <code> [start of src/python/pants/backend/python/goals/run_pex_binary.py] 1 # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 import os 5 6 from pants.backend.python.goals.package_pex_binary import PexBinaryFieldSet 7 from pants.backend.python.target_types import ( 8 PexBinaryDefaults, 9 ResolvedPexEntryPoint, 10 ResolvePexEntryPointRequest, 11 ) 12 from pants.backend.python.util_rules.pex import Pex, PexRequest 13 from pants.backend.python.util_rules.pex_environment import PexEnvironment 14 from pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest 15 from pants.backend.python.util_rules.python_sources import ( 16 PythonSourceFiles, 17 PythonSourceFilesRequest, 18 ) 19 from pants.core.goals.run import RunFieldSet, RunRequest 20 from pants.engine.fs import Digest, MergeDigests 21 from pants.engine.rules import Get, MultiGet, collect_rules, rule 22 from pants.engine.target import TransitiveTargets, TransitiveTargetsRequest 23 from pants.engine.unions import UnionRule 24 from pants.util.logging import LogLevel 25 26 27 @rule(level=LogLevel.DEBUG) 28 async def create_pex_binary_run_request( 29 field_set: PexBinaryFieldSet, 30 pex_binary_defaults: PexBinaryDefaults, 31 pex_env: PexEnvironment, 32 ) -> RunRequest: 33 entry_point, transitive_targets = await MultiGet( 34 Get( 35 ResolvedPexEntryPoint, 36 ResolvePexEntryPointRequest(field_set.entry_point), 37 ), 38 Get(TransitiveTargets, TransitiveTargetsRequest([field_set.address])), 39 ) 40 41 # Note that we get an intermediate PexRequest here (instead of going straight to a Pex) 42 # so that we can get the interpreter constraints for use in runner_pex_request. 43 requirements_pex_request = await Get( 44 PexRequest, 45 PexFromTargetsRequest, 46 PexFromTargetsRequest.for_requirements([field_set.address], internal_only=True), 47 ) 48 49 requirements_request = Get(Pex, PexRequest, requirements_pex_request) 50 51 sources_request = Get( 52 PythonSourceFiles, PythonSourceFilesRequest(transitive_targets.closure, include_files=True) 53 ) 54 55 output_filename = f"{field_set.address.target_name}.pex" 56 runner_pex_request = Get( 57 Pex, 58 PexRequest( 59 output_filename=output_filename, 60 interpreter_constraints=requirements_pex_request.interpreter_constraints, 61 additional_args=field_set.generate_additional_args(pex_binary_defaults), 62 internal_only=True, 63 # Note that the entry point file is not in the PEX itself. It's loaded by setting 64 # `PEX_EXTRA_SYS_PATH`. 65 # TODO(John Sirois): Support ConsoleScript in PexBinary targets: 66 # https://github.com/pantsbuild/pants/issues/11619 67 main=entry_point.val, 68 ), 69 ) 70 71 requirements, sources, runner_pex = await MultiGet( 72 requirements_request, sources_request, runner_pex_request 73 ) 74 75 merged_digest = await Get( 76 Digest, 77 MergeDigests( 78 [requirements.digest, sources.source_files.snapshot.digest, runner_pex.digest] 79 ), 80 ) 81 82 def in_chroot(relpath: str) -> str: 83 return os.path.join("{chroot}", relpath) 84 85 args = pex_env.create_argv(in_chroot(runner_pex.name), python=runner_pex.python) 86 87 chrooted_source_roots = [in_chroot(sr) for sr in sources.source_roots] 88 extra_env = { 89 **pex_env.environment_dict(python_configured=runner_pex.python is not None), 90 "PEX_PATH": in_chroot(requirements_pex_request.output_filename), 91 "PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots), 92 } 93 94 return RunRequest(digest=merged_digest, args=args, extra_env=extra_env) 95 96 97 def rules(): 98 return [*collect_rules(), UnionRule(RunFieldSet, PexBinaryFieldSet)] 99 [end of src/python/pants/backend/python/goals/run_pex_binary.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/python/pants/backend/python/goals/run_pex_binary.py b/src/python/pants/backend/python/goals/run_pex_binary.py --- a/src/python/pants/backend/python/goals/run_pex_binary.py +++ b/src/python/pants/backend/python/goals/run_pex_binary.py @@ -58,7 +58,13 @@ PexRequest( output_filename=output_filename, interpreter_constraints=requirements_pex_request.interpreter_constraints, - additional_args=field_set.generate_additional_args(pex_binary_defaults), + additional_args=( + *field_set.generate_additional_args(pex_binary_defaults), + # N.B.: Since we cobble together the runtime environment via PEX_PATH and + # PEX_EXTRA_SYS_PATH below, it's important for any app that re-executes itself that + # these environment variables are not stripped. + "--no-strip-pex-env", + ), internal_only=True, # Note that the entry point file is not in the PEX itself. It's loaded by setting # `PEX_EXTRA_SYS_PATH`.
{"golden_diff": "diff --git a/src/python/pants/backend/python/goals/run_pex_binary.py b/src/python/pants/backend/python/goals/run_pex_binary.py\n--- a/src/python/pants/backend/python/goals/run_pex_binary.py\n+++ b/src/python/pants/backend/python/goals/run_pex_binary.py\n@@ -58,7 +58,13 @@\n PexRequest(\n output_filename=output_filename,\n interpreter_constraints=requirements_pex_request.interpreter_constraints,\n- additional_args=field_set.generate_additional_args(pex_binary_defaults),\n+ additional_args=(\n+ *field_set.generate_additional_args(pex_binary_defaults),\n+ # N.B.: Since we cobble together the runtime environment via PEX_PATH and\n+ # PEX_EXTRA_SYS_PATH below, it's important for any app that re-executes itself that\n+ # these environment variables are not stripped.\n+ \"--no-strip-pex-env\",\n+ ),\n internal_only=True,\n # Note that the entry point file is not in the PEX itself. It's loaded by setting\n # `PEX_EXTRA_SYS_PATH`.\n", "issue": "`./pants run ...` does not work for non-venv-mode `pex_binary` targets that re-exec sys.argv[0].\nIn short, the `run` goal executes via ~:\r\n```\r\nexport PEX_PATH=/path/to/requirements.pex\r\nexport PEX_EXTRA_SYS_PATH=/path/to/source_root_1:/path/to/cource_root2\r\n./only-contains-entry-point-metadata.pex\r\n```\r\n\r\nIf the executed code then tries to re-execute via argv[0] (the PEX file itself), then sys.path scrubbing is engaged which strips back off the PEX_PATH and PEX_EXTRA_SYS_PATH triggered sys.path additions since those two env vars are also stripped by default. Either Pants needs to expose the `--no-strip-pex-env` option as a `pex_binary` parameter or else it needs to set this option for `pants run` unconditionally.\r\n\r\nThe concrete example of apps that re-exec via sys.argv[0] are django manage.py apps.\r\n\r\nSee https://github.com/pantsbuild/pex/issues/1349 where @asherf discovered the issue and the mechanism behind it was all worked out.\n", "before_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport os\n\nfrom pants.backend.python.goals.package_pex_binary import PexBinaryFieldSet\nfrom pants.backend.python.target_types import (\n PexBinaryDefaults,\n ResolvedPexEntryPoint,\n ResolvePexEntryPointRequest,\n)\nfrom pants.backend.python.util_rules.pex import Pex, PexRequest\nfrom pants.backend.python.util_rules.pex_environment import PexEnvironment\nfrom pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest\nfrom pants.backend.python.util_rules.python_sources import (\n PythonSourceFiles,\n PythonSourceFilesRequest,\n)\nfrom pants.core.goals.run import RunFieldSet, RunRequest\nfrom pants.engine.fs import Digest, MergeDigests\nfrom pants.engine.rules import Get, MultiGet, collect_rules, rule\nfrom pants.engine.target import TransitiveTargets, TransitiveTargetsRequest\nfrom pants.engine.unions import UnionRule\nfrom pants.util.logging import LogLevel\n\n\n@rule(level=LogLevel.DEBUG)\nasync def create_pex_binary_run_request(\n field_set: PexBinaryFieldSet,\n pex_binary_defaults: PexBinaryDefaults,\n pex_env: PexEnvironment,\n) -> RunRequest:\n entry_point, transitive_targets = await MultiGet(\n Get(\n ResolvedPexEntryPoint,\n ResolvePexEntryPointRequest(field_set.entry_point),\n ),\n Get(TransitiveTargets, TransitiveTargetsRequest([field_set.address])),\n )\n\n # Note that we get an intermediate PexRequest here (instead of going straight to a Pex)\n # so that we can get the interpreter constraints for use in runner_pex_request.\n requirements_pex_request = await Get(\n PexRequest,\n PexFromTargetsRequest,\n PexFromTargetsRequest.for_requirements([field_set.address], internal_only=True),\n )\n\n requirements_request = Get(Pex, PexRequest, requirements_pex_request)\n\n sources_request = Get(\n PythonSourceFiles, PythonSourceFilesRequest(transitive_targets.closure, include_files=True)\n )\n\n output_filename = f\"{field_set.address.target_name}.pex\"\n runner_pex_request = Get(\n Pex,\n PexRequest(\n output_filename=output_filename,\n interpreter_constraints=requirements_pex_request.interpreter_constraints,\n additional_args=field_set.generate_additional_args(pex_binary_defaults),\n internal_only=True,\n # Note that the entry point file is not in the PEX itself. It's loaded by setting\n # `PEX_EXTRA_SYS_PATH`.\n # TODO(John Sirois): Support ConsoleScript in PexBinary targets:\n # https://github.com/pantsbuild/pants/issues/11619\n main=entry_point.val,\n ),\n )\n\n requirements, sources, runner_pex = await MultiGet(\n requirements_request, sources_request, runner_pex_request\n )\n\n merged_digest = await Get(\n Digest,\n MergeDigests(\n [requirements.digest, sources.source_files.snapshot.digest, runner_pex.digest]\n ),\n )\n\n def in_chroot(relpath: str) -> str:\n return os.path.join(\"{chroot}\", relpath)\n\n args = pex_env.create_argv(in_chroot(runner_pex.name), python=runner_pex.python)\n\n chrooted_source_roots = [in_chroot(sr) for sr in sources.source_roots]\n extra_env = {\n **pex_env.environment_dict(python_configured=runner_pex.python is not None),\n \"PEX_PATH\": in_chroot(requirements_pex_request.output_filename),\n \"PEX_EXTRA_SYS_PATH\": \":\".join(chrooted_source_roots),\n }\n\n return RunRequest(digest=merged_digest, args=args, extra_env=extra_env)\n\n\ndef rules():\n return [*collect_rules(), UnionRule(RunFieldSet, PexBinaryFieldSet)]\n", "path": "src/python/pants/backend/python/goals/run_pex_binary.py"}]}
1,826
234
gh_patches_debug_8408
rasdani/github-patches
git_diff
ckan__ckan-3735
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Tolerate missing system_info table If you have an old or incomplete database some commands (such as db clean) will fail because they can't find the system_info table. ```python-traceback $ cd ckan; paster db clean -c test-core.ini; paster db init -c test-core.ini Traceback (most recent call last): File "/home/ubuntu/virtualenvs/venv-system/bin/paster", line 11, in <module> sys.exit(run()) File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/paste/script/command.py", line 102, in run invoke(command, command_name, options, args[1:]) File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/paste/script/command.py", line 141, in invoke exit_code = runner.run(args) File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/paste/script/command.py", line 236, in run result = self.command() File "/home/ubuntu/ckanext-scheming/ckan/ckan/lib/cli.py", line 217, in command self._load_config(cmd!='upgrade') File "/home/ubuntu/ckanext-scheming/ckan/ckan/lib/cli.py", line 161, in _load_config load_environment(conf.global_conf, conf.local_conf) File "/home/ubuntu/ckanext-scheming/ckan/ckan/config/environment.py", line 99, in load_environment app_globals.reset() File "/home/ubuntu/ckanext-scheming/ckan/ckan/lib/app_globals.py", line 172, in reset get_config_value(key) File "/home/ubuntu/ckanext-scheming/ckan/ckan/lib/app_globals.py", line 139, in get_config_value value = model.get_system_info(key) File "/home/ubuntu/ckanext-scheming/ckan/ckan/model/system_info.py", line 56, in get_system_info obj = meta.Session.query(SystemInfo).filter_by(key=key).first() File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2334, in first ret = list(self[0:1]) File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2201, in __getitem__ return list(res) File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2405, in __iter__ return self._execute_and_instances(context) File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2420, in _execute_and_instances result = conn.execute(querycontext.statement, self._params) File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 727, in execute return meth(self, multiparams, params) File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/sql/elements.py", line 322, in _execute_on_connection return connection._execute_clauseelement(self, multiparams, params) File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 824, in _execute_clauseelement compiled_sql, distilled_params File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 954, in _execute_context context) File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1116, in _handle_dbapi_exception exc_info File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/util/compat.py", line 189, in raise_from_cause reraise(type(exception), exception, tb=exc_tb) File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 947, in _execute_context context) File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/default.py", line 435, in do_execute cursor.execute(statement, parameters) sqlalchemy.exc.ProgrammingError: (ProgrammingError) column system_info.state does not exist LINE 1: ...info_key, system_info.value AS system_info_value, system_inf... ^ 'SELECT system_info.id AS system_info_id, system_info.key AS system_info_key, system_info.value AS system_info_value, system_info.state AS system_info_state, system_info.revision_id AS system_info_revision_id \nFROM system_info \nWHERE system_info.key = %(key_1)s \n LIMIT %(param_1)s' {'param_1': 1, 'key_1': 'ckan.site_description'} ``` This change treats a missing system_info table the same as no overridden configuration. </issue> <code> [start of ckan/model/system_info.py] 1 # encoding: utf-8 2 3 ''' 4 The system_info table and SystemInfo mapped class store runtime-editable 5 configuration options. 6 7 For more details, check :doc:`maintaining/configuration`. 8 ''' 9 10 from sqlalchemy import types, Column, Table 11 12 import vdm.sqlalchemy 13 import meta 14 import core 15 import domain_object 16 17 __all__ = ['system_info_revision_table', 'system_info_table', 'SystemInfo', 18 'SystemInfoRevision', 'get_system_info', 'set_system_info'] 19 20 system_info_table = Table( 21 'system_info', meta.metadata, 22 Column('id', types.Integer(), primary_key=True, nullable=False), 23 Column('key', types.Unicode(100), unique=True, nullable=False), 24 Column('value', types.UnicodeText), 25 ) 26 27 vdm.sqlalchemy.make_table_stateful(system_info_table) 28 system_info_revision_table = core.make_revisioned_table(system_info_table) 29 30 31 class SystemInfo(vdm.sqlalchemy.RevisionedObjectMixin, 32 vdm.sqlalchemy.StatefulObjectMixin, 33 domain_object.DomainObject): 34 35 def __init__(self, key, value): 36 37 super(SystemInfo, self).__init__() 38 39 self.key = key 40 self.value = unicode(value) 41 42 43 meta.mapper(SystemInfo, system_info_table, 44 extension=[ 45 vdm.sqlalchemy.Revisioner(system_info_revision_table), 46 ]) 47 48 vdm.sqlalchemy.modify_base_object_mapper(SystemInfo, core.Revision, core.State) 49 SystemInfoRevision = vdm.sqlalchemy.create_object_version(meta.mapper, 50 SystemInfo, 51 system_info_revision_table) 52 53 54 def get_system_info(key, default=None): 55 ''' get data from system_info table ''' 56 obj = meta.Session.query(SystemInfo).filter_by(key=key).first() 57 if obj: 58 return obj.value 59 else: 60 return default 61 62 63 def delete_system_info(key, default=None): 64 ''' delete data from system_info table ''' 65 obj = meta.Session.query(SystemInfo).filter_by(key=key).first() 66 if obj: 67 meta.Session.delete(obj) 68 meta.Session.commit() 69 70 71 def set_system_info(key, value): 72 ''' save data in the system_info table ''' 73 obj = None 74 obj = meta.Session.query(SystemInfo).filter_by(key=key).first() 75 if obj and obj.value == unicode(value): 76 return 77 if not obj: 78 obj = SystemInfo(key, value) 79 else: 80 obj.value = unicode(value) 81 82 from ckan.model import repo 83 rev = repo.new_revision() 84 rev.message = 'Set {0} setting in system_info table'.format(key) 85 meta.Session.add(obj) 86 meta.Session.commit() 87 88 return True 89 [end of ckan/model/system_info.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ckan/model/system_info.py b/ckan/model/system_info.py --- a/ckan/model/system_info.py +++ b/ckan/model/system_info.py @@ -53,11 +53,14 @@ def get_system_info(key, default=None): ''' get data from system_info table ''' - obj = meta.Session.query(SystemInfo).filter_by(key=key).first() - if obj: - return obj.value - else: - return default + from sqlalchemy.exc import ProgrammingError + try: + obj = meta.Session.query(SystemInfo).filter_by(key=key).first() + if obj: + return obj.value + except ProgrammingError: + meta.Session.rollback() + return default def delete_system_info(key, default=None):
{"golden_diff": "diff --git a/ckan/model/system_info.py b/ckan/model/system_info.py\n--- a/ckan/model/system_info.py\n+++ b/ckan/model/system_info.py\n@@ -53,11 +53,14 @@\n \n def get_system_info(key, default=None):\n ''' get data from system_info table '''\n- obj = meta.Session.query(SystemInfo).filter_by(key=key).first()\n- if obj:\n- return obj.value\n- else:\n- return default\n+ from sqlalchemy.exc import ProgrammingError\n+ try:\n+ obj = meta.Session.query(SystemInfo).filter_by(key=key).first()\n+ if obj:\n+ return obj.value\n+ except ProgrammingError:\n+ meta.Session.rollback()\n+ return default\n \n \n def delete_system_info(key, default=None):\n", "issue": "Tolerate missing system_info table\nIf you have an old or incomplete database some commands (such as db clean) will fail because they can't find the system_info table.\r\n\r\n```python-traceback\r\n$ cd ckan; paster db clean -c test-core.ini; paster db init -c test-core.ini\r\nTraceback (most recent call last):\r\n File \"/home/ubuntu/virtualenvs/venv-system/bin/paster\", line 11, in <module>\r\n sys.exit(run())\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/paste/script/command.py\", line 102, in run\r\n invoke(command, command_name, options, args[1:])\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/paste/script/command.py\", line 141, in invoke\r\n exit_code = runner.run(args)\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/paste/script/command.py\", line 236, in run\r\n result = self.command()\r\n File \"/home/ubuntu/ckanext-scheming/ckan/ckan/lib/cli.py\", line 217, in command\r\n self._load_config(cmd!='upgrade')\r\n File \"/home/ubuntu/ckanext-scheming/ckan/ckan/lib/cli.py\", line 161, in _load_config\r\n load_environment(conf.global_conf, conf.local_conf)\r\n File \"/home/ubuntu/ckanext-scheming/ckan/ckan/config/environment.py\", line 99, in load_environment\r\n app_globals.reset()\r\n File \"/home/ubuntu/ckanext-scheming/ckan/ckan/lib/app_globals.py\", line 172, in reset\r\n get_config_value(key)\r\n File \"/home/ubuntu/ckanext-scheming/ckan/ckan/lib/app_globals.py\", line 139, in get_config_value\r\n value = model.get_system_info(key)\r\n File \"/home/ubuntu/ckanext-scheming/ckan/ckan/model/system_info.py\", line 56, in get_system_info\r\n obj = meta.Session.query(SystemInfo).filter_by(key=key).first()\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py\", line 2334, in first\r\n ret = list(self[0:1])\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py\", line 2201, in __getitem__\r\n return list(res)\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py\", line 2405, in __iter__\r\n return self._execute_and_instances(context)\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py\", line 2420, in _execute_and_instances\r\n result = conn.execute(querycontext.statement, self._params)\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py\", line 727, in execute\r\n return meth(self, multiparams, params)\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/sql/elements.py\", line 322, in _execute_on_connection\r\n return connection._execute_clauseelement(self, multiparams, params)\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py\", line 824, in _execute_clauseelement\r\n compiled_sql, distilled_params\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py\", line 954, in _execute_context\r\n context)\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py\", line 1116, in _handle_dbapi_exception\r\n exc_info\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/util/compat.py\", line 189, in raise_from_cause\r\n reraise(type(exception), exception, tb=exc_tb)\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py\", line 947, in _execute_context\r\n context)\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/default.py\", line 435, in do_execute\r\n cursor.execute(statement, parameters)\r\nsqlalchemy.exc.ProgrammingError: (ProgrammingError) column system_info.state does not exist\r\nLINE 1: ...info_key, system_info.value AS system_info_value, system_inf...\r\n ^\r\n 'SELECT system_info.id AS system_info_id, system_info.key AS system_info_key, system_info.value AS system_info_value, system_info.state AS system_info_state, system_info.revision_id AS system_info_revision_id \\nFROM system_info \\nWHERE system_info.key = %(key_1)s \\n LIMIT %(param_1)s' {'param_1': 1, 'key_1': 'ckan.site_description'}\r\n```\r\n\r\nThis change treats a missing system_info table the same as no overridden configuration.\n", "before_files": [{"content": "# encoding: utf-8\n\n'''\nThe system_info table and SystemInfo mapped class store runtime-editable\nconfiguration options.\n\nFor more details, check :doc:`maintaining/configuration`.\n'''\n\nfrom sqlalchemy import types, Column, Table\n\nimport vdm.sqlalchemy\nimport meta\nimport core\nimport domain_object\n\n__all__ = ['system_info_revision_table', 'system_info_table', 'SystemInfo',\n 'SystemInfoRevision', 'get_system_info', 'set_system_info']\n\nsystem_info_table = Table(\n 'system_info', meta.metadata,\n Column('id', types.Integer(), primary_key=True, nullable=False),\n Column('key', types.Unicode(100), unique=True, nullable=False),\n Column('value', types.UnicodeText),\n)\n\nvdm.sqlalchemy.make_table_stateful(system_info_table)\nsystem_info_revision_table = core.make_revisioned_table(system_info_table)\n\n\nclass SystemInfo(vdm.sqlalchemy.RevisionedObjectMixin,\n vdm.sqlalchemy.StatefulObjectMixin,\n domain_object.DomainObject):\n\n def __init__(self, key, value):\n\n super(SystemInfo, self).__init__()\n\n self.key = key\n self.value = unicode(value)\n\n\nmeta.mapper(SystemInfo, system_info_table,\n extension=[\n vdm.sqlalchemy.Revisioner(system_info_revision_table),\n ])\n\nvdm.sqlalchemy.modify_base_object_mapper(SystemInfo, core.Revision, core.State)\nSystemInfoRevision = vdm.sqlalchemy.create_object_version(meta.mapper,\n SystemInfo,\n system_info_revision_table)\n\n\ndef get_system_info(key, default=None):\n ''' get data from system_info table '''\n obj = meta.Session.query(SystemInfo).filter_by(key=key).first()\n if obj:\n return obj.value\n else:\n return default\n\n\ndef delete_system_info(key, default=None):\n ''' delete data from system_info table '''\n obj = meta.Session.query(SystemInfo).filter_by(key=key).first()\n if obj:\n meta.Session.delete(obj)\n meta.Session.commit()\n\n\ndef set_system_info(key, value):\n ''' save data in the system_info table '''\n obj = None\n obj = meta.Session.query(SystemInfo).filter_by(key=key).first()\n if obj and obj.value == unicode(value):\n return\n if not obj:\n obj = SystemInfo(key, value)\n else:\n obj.value = unicode(value)\n\n from ckan.model import repo\n rev = repo.new_revision()\n rev.message = 'Set {0} setting in system_info table'.format(key)\n meta.Session.add(obj)\n meta.Session.commit()\n\n return True\n", "path": "ckan/model/system_info.py"}]}
2,473
178
gh_patches_debug_5087
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-3641
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve security contact webpage We need to improve our documentation about a user who found a security issue could contact us to report this vulnerability in a confidential way. This page should be clear regarding how to report the issue, how submit a patch (wihtout making it public) and what to do receive feedback / discuss about the solution. There is a page already but it's too poor: https://readthedocs.org/security/ </issue> <code> [start of readthedocs/urls.py] 1 # pylint: disable=missing-docstring 2 from __future__ import absolute_import 3 4 from functools import reduce 5 from operator import add 6 7 from django.conf.urls import url, include 8 from django.contrib import admin 9 from django.conf import settings 10 from django.conf.urls.static import static 11 from django.views.generic.base import TemplateView 12 from tastypie.api import Api 13 14 from readthedocs.api.base import (ProjectResource, UserResource, 15 VersionResource, FileResource) 16 from readthedocs.core.urls import docs_urls, core_urls, deprecated_urls 17 from readthedocs.core.views import (HomepageView, SupportView, 18 server_error_404, server_error_500) 19 from readthedocs.search import views as search_views 20 21 22 v1_api = Api(api_name='v1') 23 v1_api.register(UserResource()) 24 v1_api.register(ProjectResource()) 25 v1_api.register(VersionResource()) 26 v1_api.register(FileResource()) 27 28 admin.autodiscover() 29 30 handler404 = server_error_404 31 handler500 = server_error_500 32 33 basic_urls = [ 34 url(r'^$', HomepageView.as_view(), name='homepage'), 35 url(r'^support/', SupportView.as_view(), name='support'), 36 url(r'^security/', TemplateView.as_view(template_name='security.html')), 37 ] 38 39 rtd_urls = [ 40 url(r'^bookmarks/', include('readthedocs.bookmarks.urls')), 41 url(r'^search/$', search_views.elastic_search, name='search'), 42 url(r'^dashboard/', include('readthedocs.projects.urls.private')), 43 url(r'^profiles/', include('readthedocs.profiles.urls.public')), 44 url(r'^accounts/', include('readthedocs.profiles.urls.private')), 45 url(r'^accounts/', include('allauth.urls')), 46 url(r'^notifications/', include('readthedocs.notifications.urls')), 47 url(r'^accounts/gold/', include('readthedocs.gold.urls')), 48 # For redirects 49 url(r'^builds/', include('readthedocs.builds.urls')), 50 # For testing the 404's with DEBUG on. 51 url(r'^404/$', handler404), 52 # For testing the 500's with DEBUG on. 53 url(r'^500/$', handler500), 54 ] 55 56 project_urls = [ 57 url(r'^projects/', include('readthedocs.projects.urls.public')), 58 ] 59 60 api_urls = [ 61 url(r'^api/', include(v1_api.urls)), 62 url(r'^api/v2/', include('readthedocs.restapi.urls')), 63 url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), 64 url(r'^websupport/', include('readthedocs.comments.urls')), 65 ] 66 67 i18n_urls = [ 68 url(r'^i18n/', include('django.conf.urls.i18n')), 69 ] 70 71 admin_urls = [ 72 url(r'^admin/', include(admin.site.urls)), 73 ] 74 75 debug_urls = add( 76 [ 77 url('style-catalog/$', 78 TemplateView.as_view(template_name='style_catalog.html')), 79 ], 80 static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) 81 ) 82 83 # Export URLs 84 groups = [basic_urls, rtd_urls, project_urls, api_urls, core_urls, i18n_urls, 85 deprecated_urls] 86 87 if settings.USE_PROMOS: 88 # Include donation URL's 89 groups.append([ 90 url(r'^sustainability/', include('readthedocsext.donate.urls')), 91 ]) 92 93 if 'readthedocsext.embed' in settings.INSTALLED_APPS: 94 api_urls.insert( 95 0, 96 url(r'^api/v1/embed/', include('readthedocsext.embed.urls')) 97 ) 98 99 if not getattr(settings, 'USE_SUBDOMAIN', False) or settings.DEBUG: 100 groups.insert(0, docs_urls) 101 if getattr(settings, 'ALLOW_ADMIN', True): 102 groups.append(admin_urls) 103 if getattr(settings, 'DEBUG', False): 104 groups.append(debug_urls) 105 106 urlpatterns = reduce(add, groups) 107 [end of readthedocs/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/readthedocs/urls.py b/readthedocs/urls.py --- a/readthedocs/urls.py +++ b/readthedocs/urls.py @@ -34,6 +34,8 @@ url(r'^$', HomepageView.as_view(), name='homepage'), url(r'^support/', SupportView.as_view(), name='support'), url(r'^security/', TemplateView.as_view(template_name='security.html')), + url(r'^.well-known/security.txt', + TemplateView.as_view(template_name='security.txt', content_type='text/plain')), ] rtd_urls = [
{"golden_diff": "diff --git a/readthedocs/urls.py b/readthedocs/urls.py\n--- a/readthedocs/urls.py\n+++ b/readthedocs/urls.py\n@@ -34,6 +34,8 @@\n url(r'^$', HomepageView.as_view(), name='homepage'),\n url(r'^support/', SupportView.as_view(), name='support'),\n url(r'^security/', TemplateView.as_view(template_name='security.html')),\n+ url(r'^.well-known/security.txt',\n+ TemplateView.as_view(template_name='security.txt', content_type='text/plain')),\n ]\n \n rtd_urls = [\n", "issue": "Improve security contact webpage\nWe need to improve our documentation about a user who found a security issue could contact us to report this vulnerability in a confidential way.\r\n\r\nThis page should be clear regarding how to report the issue, how submit a patch (wihtout making it public) and what to do receive feedback / discuss about the solution.\r\n\r\nThere is a page already but it's too poor: https://readthedocs.org/security/\n", "before_files": [{"content": "# pylint: disable=missing-docstring\nfrom __future__ import absolute_import\n\nfrom functools import reduce\nfrom operator import add\n\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.views.generic.base import TemplateView\nfrom tastypie.api import Api\n\nfrom readthedocs.api.base import (ProjectResource, UserResource,\n VersionResource, FileResource)\nfrom readthedocs.core.urls import docs_urls, core_urls, deprecated_urls\nfrom readthedocs.core.views import (HomepageView, SupportView,\n server_error_404, server_error_500)\nfrom readthedocs.search import views as search_views\n\n\nv1_api = Api(api_name='v1')\nv1_api.register(UserResource())\nv1_api.register(ProjectResource())\nv1_api.register(VersionResource())\nv1_api.register(FileResource())\n\nadmin.autodiscover()\n\nhandler404 = server_error_404\nhandler500 = server_error_500\n\nbasic_urls = [\n url(r'^$', HomepageView.as_view(), name='homepage'),\n url(r'^support/', SupportView.as_view(), name='support'),\n url(r'^security/', TemplateView.as_view(template_name='security.html')),\n]\n\nrtd_urls = [\n url(r'^bookmarks/', include('readthedocs.bookmarks.urls')),\n url(r'^search/$', search_views.elastic_search, name='search'),\n url(r'^dashboard/', include('readthedocs.projects.urls.private')),\n url(r'^profiles/', include('readthedocs.profiles.urls.public')),\n url(r'^accounts/', include('readthedocs.profiles.urls.private')),\n url(r'^accounts/', include('allauth.urls')),\n url(r'^notifications/', include('readthedocs.notifications.urls')),\n url(r'^accounts/gold/', include('readthedocs.gold.urls')),\n # For redirects\n url(r'^builds/', include('readthedocs.builds.urls')),\n # For testing the 404's with DEBUG on.\n url(r'^404/$', handler404),\n # For testing the 500's with DEBUG on.\n url(r'^500/$', handler500),\n]\n\nproject_urls = [\n url(r'^projects/', include('readthedocs.projects.urls.public')),\n]\n\napi_urls = [\n url(r'^api/', include(v1_api.urls)),\n url(r'^api/v2/', include('readthedocs.restapi.urls')),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n url(r'^websupport/', include('readthedocs.comments.urls')),\n]\n\ni18n_urls = [\n url(r'^i18n/', include('django.conf.urls.i18n')),\n]\n\nadmin_urls = [\n url(r'^admin/', include(admin.site.urls)),\n]\n\ndebug_urls = add(\n [\n url('style-catalog/$',\n TemplateView.as_view(template_name='style_catalog.html')),\n ],\n static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n)\n\n# Export URLs\ngroups = [basic_urls, rtd_urls, project_urls, api_urls, core_urls, i18n_urls,\n deprecated_urls]\n\nif settings.USE_PROMOS:\n # Include donation URL's\n groups.append([\n url(r'^sustainability/', include('readthedocsext.donate.urls')),\n ])\n\nif 'readthedocsext.embed' in settings.INSTALLED_APPS:\n api_urls.insert(\n 0,\n url(r'^api/v1/embed/', include('readthedocsext.embed.urls'))\n )\n\nif not getattr(settings, 'USE_SUBDOMAIN', False) or settings.DEBUG:\n groups.insert(0, docs_urls)\nif getattr(settings, 'ALLOW_ADMIN', True):\n groups.append(admin_urls)\nif getattr(settings, 'DEBUG', False):\n groups.append(debug_urls)\n\nurlpatterns = reduce(add, groups)\n", "path": "readthedocs/urls.py"}]}
1,680
128
gh_patches_debug_876
rasdani/github-patches
git_diff
microsoft__Qcodes-867
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> missing dependency`jsonschema` in requirements.txt The latest pip installable version of QCoDeS does not list jsonschema as a dependency but requires it. This problem came to light when running tests on a project that depeneds on QCoDeS. Part of my build script installs qcodes (pip install qcodes). Importing qcodes then raises an exception because jsonschema is missing. </issue> <code> [start of setup.py] 1 from setuptools import setup, find_packages 2 from distutils.version import StrictVersion 3 from importlib import import_module 4 import re 5 6 def get_version(verbose=1): 7 """ Extract version information from source code """ 8 9 try: 10 with open('qcodes/version.py', 'r') as f: 11 ln = f.readline() 12 # print(ln) 13 m = re.search('.* ''(.*)''', ln) 14 version = (m.group(1)).strip('\'') 15 except Exception as E: 16 print(E) 17 version = 'none' 18 if verbose: 19 print('get_version: %s' % version) 20 return version 21 22 23 def readme(): 24 with open('README.rst') as f: 25 return f.read() 26 27 extras = { 28 'MatPlot': ('matplotlib', '2.0.2'), 29 'QtPlot': ('pyqtgraph', '0.10.0'), 30 'coverage tests': ('coverage', '4.0'), 31 'Slack': ('slacker', '0.9.42') 32 } 33 extras_require = {k: '>='.join(v) for k, v in extras.items()} 34 35 setup(name='qcodes', 36 version=get_version(), 37 use_2to3=False, 38 39 maintainer='Jens H Nielsen', 40 maintainer_email='[email protected]', 41 description='Python-based data acquisition framework developed by the ' 42 'Copenhagen / Delft / Sydney / Microsoft quantum computing ' 43 'consortium', 44 long_description=readme(), 45 url='https://github.com/QCoDeS/Qcodes', 46 classifiers=[ 47 'Development Status :: 3 - Alpha', 48 'Intended Audience :: Science/Research', 49 'Programming Language :: Python :: 3 :: Only', 50 'Programming Language :: Python :: 3.5', 51 'Programming Language :: Python :: 3.6', 52 'Topic :: Scientific/Engineering' 53 ], 54 license='MIT', 55 # if we want to install without tests: 56 # packages=find_packages(exclude=["*.tests", "tests"]), 57 packages=find_packages(), 58 package_data={'qcodes': ['monitor/dist/*', 'monitor/dist/js/*', 59 'monitor/dist/css/*', 'config/*.json']}, 60 install_requires=[ 61 'numpy>=1.10', 62 'pyvisa>=1.8', 63 'h5py>=2.6', 64 'websockets>=3.2,<3.4' 65 ], 66 67 test_suite='qcodes.tests', 68 extras_require=extras_require, 69 70 # I think the only part of qcodes that would care about zip_safe 71 # is utils.helpers.reload_code; users of a zip-installed package 72 # shouldn't be needing to do this anyway, but we should test first. 73 zip_safe=False) 74 75 version_template = ''' 76 ***** 77 ***** package {0} must be at least version {1}. 78 ***** Please upgrade it (pip install -U {0} or conda install {0}) 79 ***** in order to use {2} 80 ***** 81 ''' 82 83 missing_template = ''' 84 ***** 85 ***** package {0} not found 86 ***** Please install it (pip install {0} or conda install {0}) 87 ***** in order to use {1} 88 ***** 89 ''' 90 91 valueerror_template = ''' 92 ***** 93 ***** package {0} version not understood 94 ***** Please make sure the installed version ({1}) 95 ***** is compatible with the minimum required version ({2}) 96 ***** in order to use {3} 97 ***** 98 ''' 99 100 # now test the versions of extras 101 for extra, (module_name, min_version) in extras.items(): 102 try: 103 module = import_module(module_name) 104 if StrictVersion(module.__version__) < StrictVersion(min_version): 105 print(version_template.format(module_name, min_version, extra)) 106 except ImportError: 107 print(missing_template.format(module_name, extra)) 108 except ValueError: 109 print(valueerror_template.format( 110 module_name, module.__version__, min_version, extra)) 111 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -61,7 +61,8 @@ 'numpy>=1.10', 'pyvisa>=1.8', 'h5py>=2.6', - 'websockets>=3.2,<3.4' + 'websockets>=3.2,<3.4', + 'jsonschema' ], test_suite='qcodes.tests',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,7 +61,8 @@\n 'numpy>=1.10',\n 'pyvisa>=1.8',\n 'h5py>=2.6',\n- 'websockets>=3.2,<3.4'\n+ 'websockets>=3.2,<3.4',\n+ 'jsonschema'\n ],\n \n test_suite='qcodes.tests',\n", "issue": "missing dependency`jsonschema` in requirements.txt\nThe latest pip installable version of QCoDeS does not list jsonschema as a dependency but requires it. \r\n\r\nThis problem came to light when running tests on a project that depeneds on QCoDeS. Part of my build script installs qcodes (pip install qcodes). Importing qcodes then raises an exception because jsonschema is missing. \n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom distutils.version import StrictVersion\nfrom importlib import import_module\nimport re\n\ndef get_version(verbose=1):\n \"\"\" Extract version information from source code \"\"\"\n\n try:\n with open('qcodes/version.py', 'r') as f:\n ln = f.readline()\n # print(ln)\n m = re.search('.* ''(.*)''', ln)\n version = (m.group(1)).strip('\\'')\n except Exception as E:\n print(E)\n version = 'none'\n if verbose:\n print('get_version: %s' % version)\n return version\n\n\ndef readme():\n with open('README.rst') as f:\n return f.read()\n\nextras = {\n 'MatPlot': ('matplotlib', '2.0.2'),\n 'QtPlot': ('pyqtgraph', '0.10.0'),\n 'coverage tests': ('coverage', '4.0'),\n 'Slack': ('slacker', '0.9.42')\n}\nextras_require = {k: '>='.join(v) for k, v in extras.items()}\n\nsetup(name='qcodes',\n version=get_version(),\n use_2to3=False,\n\n maintainer='Jens H Nielsen',\n maintainer_email='[email protected]',\n description='Python-based data acquisition framework developed by the '\n 'Copenhagen / Delft / Sydney / Microsoft quantum computing '\n 'consortium',\n long_description=readme(),\n url='https://github.com/QCoDeS/Qcodes',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering'\n ],\n license='MIT',\n # if we want to install without tests:\n # packages=find_packages(exclude=[\"*.tests\", \"tests\"]),\n packages=find_packages(),\n package_data={'qcodes': ['monitor/dist/*', 'monitor/dist/js/*',\n 'monitor/dist/css/*', 'config/*.json']},\n install_requires=[\n 'numpy>=1.10',\n 'pyvisa>=1.8',\n 'h5py>=2.6',\n 'websockets>=3.2,<3.4'\n ],\n\n test_suite='qcodes.tests',\n extras_require=extras_require,\n\n # I think the only part of qcodes that would care about zip_safe\n # is utils.helpers.reload_code; users of a zip-installed package\n # shouldn't be needing to do this anyway, but we should test first.\n zip_safe=False)\n\nversion_template = '''\n*****\n***** package {0} must be at least version {1}.\n***** Please upgrade it (pip install -U {0} or conda install {0})\n***** in order to use {2}\n*****\n'''\n\nmissing_template = '''\n*****\n***** package {0} not found\n***** Please install it (pip install {0} or conda install {0})\n***** in order to use {1}\n*****\n'''\n\nvalueerror_template = '''\n*****\n***** package {0} version not understood\n***** Please make sure the installed version ({1})\n***** is compatible with the minimum required version ({2})\n***** in order to use {3}\n*****\n'''\n\n# now test the versions of extras\nfor extra, (module_name, min_version) in extras.items():\n try:\n module = import_module(module_name)\n if StrictVersion(module.__version__) < StrictVersion(min_version):\n print(version_template.format(module_name, min_version, extra))\n except ImportError:\n print(missing_template.format(module_name, extra))\n except ValueError:\n print(valueerror_template.format(\n module_name, module.__version__, min_version, extra))\n", "path": "setup.py"}]}
1,676
105
gh_patches_debug_15498
rasdani/github-patches
git_diff
obspy__obspy-3407
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Possible typo on the documentation page of `obspy.signal.rotate.rotate_ne_rt` On its documentation page: https://docs.obspy.org/packages/autogen/obspy.signal.rotate.rotate_rt_ne.html the inputs of the `obspy.signal.rotate_rt_ne()` are shown as the north and east components, which should be the radial and transverse components instead. </issue> <code> [start of obspy/signal/rotate.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # ------------------------------------------------------------------ 4 # Filename: rotate.py 5 # Purpose: Various Seismogram Rotation Functions 6 # Author: Tobias Megies, Tom Richter, Lion Krischer 7 # Email: [email protected] 8 # 9 # Copyright (C) 2009-2013 Tobias Megies, Tom Richter, Lion Krischer 10 # -------------------------------------------------------------------- 11 """ 12 Various Seismogram Rotation Functions 13 14 :copyright: 15 The ObsPy Development Team ([email protected]) 16 :license: 17 GNU Lesser General Public License, Version 3 18 (https://www.gnu.org/copyleft/lesser.html) 19 """ 20 import warnings 21 from math import cos, sin, radians 22 23 import numpy as np 24 25 26 def rotate_ne_rt(n, e, ba): 27 """ 28 Rotates horizontal components of a seismogram. 29 30 The North- and East-Component of a seismogram will be rotated in Radial 31 and Transversal Component. The angle is given as the back-azimuth, that is 32 defined as the angle measured between the vector pointing from the station 33 to the source and the vector pointing from the station to the North. 34 35 :type n: :class:`~numpy.ndarray` 36 :param n: Data of the North component of the seismogram. 37 :type e: :class:`~numpy.ndarray` 38 :param e: Data of the East component of the seismogram. 39 :type ba: float 40 :param ba: The back azimuth from station to source in degrees. 41 :return: Radial and Transversal component of seismogram. 42 """ 43 if len(n) != len(e): 44 raise TypeError("North and East component have different length.") 45 if ba < 0 or ba > 360: 46 raise ValueError("Back Azimuth should be between 0 and 360 degrees.") 47 ba = radians(ba) 48 r = - e * sin(ba) - n * cos(ba) 49 t = - e * cos(ba) + n * sin(ba) 50 return r, t 51 52 53 def rotate_rt_ne(n, e, ba): 54 """ 55 Rotates horizontal components of a seismogram. 56 57 Rotates from radial and transversal components to North and East 58 components. 59 60 This is the inverse transformation of the transformation described 61 in :func:`rotate_ne_rt`. 62 """ 63 ba = 360.0 - ba 64 return rotate_ne_rt(n, e, ba) 65 66 67 def rotate_zne_lqt(z, n, e, ba, inc): 68 """ 69 Rotates all components of a seismogram. 70 71 The components will be rotated from ZNE (Z, North, East, left-handed) to 72 LQT (e.g. ray coordinate system, right-handed). The rotation angles are 73 given as the back-azimuth and inclination. 74 75 The transformation consists of 3 steps:: 76 77 1. mirroring of E-component at ZN plain: ZNE -> ZNW 78 2. negative rotation of coordinate system around Z-axis with angle ba: 79 ZNW -> ZRT 80 3. negative rotation of coordinate system around T-axis with angle inc: 81 ZRT -> LQT 82 83 :type z: :class:`~numpy.ndarray` 84 :param z: Data of the Z component of the seismogram. 85 :type n: :class:`~numpy.ndarray` 86 :param n: Data of the North component of the seismogram. 87 :type e: :class:`~numpy.ndarray` 88 :param e: Data of the East component of the seismogram. 89 :type ba: float 90 :param ba: The back azimuth from station to source in degrees. 91 :type inc: float 92 :param inc: The inclination of the ray at the station in degrees. 93 :return: L-, Q- and T-component of seismogram. 94 """ 95 if len(z) != len(n) or len(z) != len(e): 96 raise TypeError("Z, North and East component have different length!?!") 97 if ba < 0 or ba > 360: 98 raise ValueError("Back Azimuth should be between 0 and 360 degrees!") 99 if inc < 0 or inc > 360: 100 raise ValueError("Inclination should be between 0 and 360 degrees!") 101 ba = radians(ba) 102 inc = radians(inc) 103 l = z * cos(inc) - n * sin(inc) * cos(ba) - e * sin(inc) * sin(ba) # NOQA 104 q = z * sin(inc) + n * cos(inc) * cos(ba) + e * cos(inc) * sin(ba) # NOQA 105 t = n * sin(ba) - e * cos(ba) # NOQA 106 return l, q, t 107 108 109 def rotate_lqt_zne(l, q, t, ba, inc): # NOQA 110 """ 111 Rotates all components of a seismogram. 112 113 The components will be rotated from LQT to ZNE. 114 This is the inverse transformation of the transformation described 115 in :func:`rotate_zne_lqt`. 116 """ 117 if len(l) != len(q) or len(l) != len(t): 118 raise TypeError("L, Q and T component have different length!?!") 119 if ba < 0 or ba > 360: 120 raise ValueError("Back Azimuth should be between 0 and 360 degrees!") 121 if inc < 0 or inc > 360: 122 raise ValueError("Inclination should be between 0 and 360 degrees!") 123 ba = radians(ba) 124 inc = radians(inc) 125 z = l * cos(inc) + q * sin(inc) 126 n = -l * sin(inc) * cos(ba) + q * cos(inc) * cos(ba) + t * sin(ba) 127 e = -l * sin(inc) * sin(ba) + q * cos(inc) * sin(ba) - t * cos(ba) 128 return z, n, e 129 130 131 def _dip_azimuth2zne_base_vector(dip, azimuth): 132 """ 133 Helper function converting a vector described with azimuth and dip of unit 134 length to a vector in the ZNE (Vertical, North, East) base. 135 136 The definition of azimuth and dip is according to the SEED reference 137 manual. 138 """ 139 dip = np.deg2rad(dip) 140 azimuth = np.deg2rad(azimuth) 141 142 return np.array([-np.sin(dip), 143 np.cos(azimuth) * np.cos(dip), 144 np.sin(azimuth) * np.cos(dip)]) 145 146 147 def rotate2zne(data_1, azimuth_1, dip_1, data_2, azimuth_2, dip_2, data_3, 148 azimuth_3, dip_3, inverse=False): 149 """ 150 Rotates an arbitrarily oriented three-component vector to ZNE. 151 152 Each components orientation is described with a azimuth and a dip. The 153 azimuth is defined as the degrees from North, clockwise and the dip is the 154 defined as the number of degrees, down from horizontal. Both definitions 155 are according to the SEED standard. 156 157 The three components need not be orthogonal to each other but the 158 components have to be linearly independent. The function performs a full 159 base change to orthogonal Vertical, North, and East orientations. 160 161 :param data_1: Data component 1. 162 :param azimuth_1: The azimuth of component 1. 163 :param dip_1: The dip of component 1. 164 :param data_2: Data component 2. 165 :param azimuth_2: The azimuth of component 2. 166 :param dip_2: The dip of component 2. 167 :param data_3: Data component 3. 168 :param azimuth_3: The azimuth of component 3. 169 :param dip_3: The dip of component 3. 170 :param inverse: If `True`, the data arrays will be converted from ZNE to 171 whatever coordinate system the azimuths and dips specify. In that 172 case data_1, data_2, data_3 have to be data arrays for Z, N, 173 and E and the dips and azimuths specify where to transform to. 174 :type inverse: bool 175 176 :rtype: tuple(:class:`numpy.ndarray`, :class:`numpy.ndarray`, 177 :class:`numpy.ndarray`) 178 :returns: The three rotated components, oriented in Z, N, and E if 179 `inverse` is `False`. Otherwise they will be oriented as specified 180 by the dips and azimuths. 181 182 An input of ZNE yields an output of ZNE 183 184 >>> rotate2zne(np.arange(3), 0, -90, np.arange(3) * 2, 0, 0, \ 185 np.arange(3) * 3, 90, 0) # doctest: +NORMALIZE_WHITESPACE 186 (array([ 0., 1., 2.]), array([ 0., 2., 4.]), array([ 0., 3., 6.])) 187 188 An input of ZSE yields an output of ZNE 189 190 >>> rotate2zne(np.arange(3), 0, -90, np.arange(3) * 2, 180, 0, \ 191 np.arange(3) * 3, 90, 0) # doctest: +NORMALIZE_WHITESPACE 192 (array([ 0., 1., 2.]), array([ 0., -2., -4.]), array([ 0., 3., 6.])) 193 194 Mixed up components should get rotated to ZNE. 195 196 >>> rotate2zne(np.arange(3), 0, 0, np.arange(3) * 2, 90, 0, \ 197 np.arange(3) * 3, 0, -90) # doctest: +NORMALIZE_WHITESPACE 198 (array([ 0., 3., 6.]), array([ 0., 1., 2.]), array([ 0., 2., 4.])) 199 """ 200 if len(set(len(i_) for i_ in (data_1, data_2, data_3))) != 1: 201 msg = "All three data arrays must be of same length." 202 raise ValueError(msg) 203 204 # Define the base vectors of the old base in terms of the new base vectors. 205 base_vector_1 = _dip_azimuth2zne_base_vector(dip_1, azimuth_1) 206 base_vector_2 = _dip_azimuth2zne_base_vector(dip_2, azimuth_2) 207 base_vector_3 = _dip_azimuth2zne_base_vector(dip_3, azimuth_3) 208 209 # Base change matrix. 210 m = np.array([base_vector_1, 211 base_vector_2, 212 base_vector_3]) 213 214 # Determinant gives the volume change of a unit cube going from one 215 # basis to the next. It should neither be too small nor to large. These 216 # here are arbitrary limits. 217 with warnings.catch_warnings(): 218 warnings.filterwarnings('ignore', 219 '.*invalid value encountered in det.*') 220 det = np.linalg.det(m) 221 if not (1E-6 < abs(det) < 1E6): 222 raise ValueError("The given directions are not linearly independent, " 223 "at least within numerical precision. Determinant " 224 "of the base change matrix: %g" % det) 225 226 if not inverse: 227 m = np.linalg.inv(m) 228 229 z, n, e = np.dot(m, [data_1, data_2, data_3]) 230 231 # Replace all negative zeros. These might confuse some further 232 # processing programs. 233 z = np.array(z).ravel() 234 z[z == -0.0] = 0 235 n = np.array(n).ravel() 236 n[n == -0.0] = 0 237 e = np.array(e).ravel() 238 e[e == -0.0] = 0 239 240 return z, n, e 241 242 243 if __name__ == '__main__': 244 import doctest 245 doctest.testmod(exclude_empty=True) 246 [end of obspy/signal/rotate.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/obspy/signal/rotate.py b/obspy/signal/rotate.py --- a/obspy/signal/rotate.py +++ b/obspy/signal/rotate.py @@ -50,7 +50,7 @@ return r, t -def rotate_rt_ne(n, e, ba): +def rotate_rt_ne(r, t, ba): """ Rotates horizontal components of a seismogram. @@ -59,9 +59,15 @@ This is the inverse transformation of the transformation described in :func:`rotate_ne_rt`. + + :type r: :class:`~numpy.ndarray` + :param r: Data of the Radial component of the seismogram. + :type t: :class:`~numpy.ndarray` + :param t: Data of the Transverse component of the seismogram. + :returns: North and East component of seismogram. """ ba = 360.0 - ba - return rotate_ne_rt(n, e, ba) + return rotate_ne_rt(r, t, ba) def rotate_zne_lqt(z, n, e, ba, inc):
{"golden_diff": "diff --git a/obspy/signal/rotate.py b/obspy/signal/rotate.py\n--- a/obspy/signal/rotate.py\n+++ b/obspy/signal/rotate.py\n@@ -50,7 +50,7 @@\n return r, t\n \n \n-def rotate_rt_ne(n, e, ba):\n+def rotate_rt_ne(r, t, ba):\n \"\"\"\n Rotates horizontal components of a seismogram.\n \n@@ -59,9 +59,15 @@\n \n This is the inverse transformation of the transformation described\n in :func:`rotate_ne_rt`.\n+\n+ :type r: :class:`~numpy.ndarray`\n+ :param r: Data of the Radial component of the seismogram.\n+ :type t: :class:`~numpy.ndarray`\n+ :param t: Data of the Transverse component of the seismogram.\n+ :returns: North and East component of seismogram.\n \"\"\"\n ba = 360.0 - ba\n- return rotate_ne_rt(n, e, ba)\n+ return rotate_ne_rt(r, t, ba)\n \n \n def rotate_zne_lqt(z, n, e, ba, inc):\n", "issue": "Possible typo on the documentation page of `obspy.signal.rotate.rotate_ne_rt`\nOn its documentation page: https://docs.obspy.org/packages/autogen/obspy.signal.rotate.rotate_rt_ne.html\r\nthe inputs of the `obspy.signal.rotate_rt_ne()` are shown as the north and east components, which should be the radial and transverse components instead. \n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------\n# Filename: rotate.py\n# Purpose: Various Seismogram Rotation Functions\n# Author: Tobias Megies, Tom Richter, Lion Krischer\n# Email: [email protected]\n#\n# Copyright (C) 2009-2013 Tobias Megies, Tom Richter, Lion Krischer\n# --------------------------------------------------------------------\n\"\"\"\nVarious Seismogram Rotation Functions\n\n:copyright:\n The ObsPy Development Team ([email protected])\n:license:\n GNU Lesser General Public License, Version 3\n (https://www.gnu.org/copyleft/lesser.html)\n\"\"\"\nimport warnings\nfrom math import cos, sin, radians\n\nimport numpy as np\n\n\ndef rotate_ne_rt(n, e, ba):\n \"\"\"\n Rotates horizontal components of a seismogram.\n\n The North- and East-Component of a seismogram will be rotated in Radial\n and Transversal Component. The angle is given as the back-azimuth, that is\n defined as the angle measured between the vector pointing from the station\n to the source and the vector pointing from the station to the North.\n\n :type n: :class:`~numpy.ndarray`\n :param n: Data of the North component of the seismogram.\n :type e: :class:`~numpy.ndarray`\n :param e: Data of the East component of the seismogram.\n :type ba: float\n :param ba: The back azimuth from station to source in degrees.\n :return: Radial and Transversal component of seismogram.\n \"\"\"\n if len(n) != len(e):\n raise TypeError(\"North and East component have different length.\")\n if ba < 0 or ba > 360:\n raise ValueError(\"Back Azimuth should be between 0 and 360 degrees.\")\n ba = radians(ba)\n r = - e * sin(ba) - n * cos(ba)\n t = - e * cos(ba) + n * sin(ba)\n return r, t\n\n\ndef rotate_rt_ne(n, e, ba):\n \"\"\"\n Rotates horizontal components of a seismogram.\n\n Rotates from radial and transversal components to North and East\n components.\n\n This is the inverse transformation of the transformation described\n in :func:`rotate_ne_rt`.\n \"\"\"\n ba = 360.0 - ba\n return rotate_ne_rt(n, e, ba)\n\n\ndef rotate_zne_lqt(z, n, e, ba, inc):\n \"\"\"\n Rotates all components of a seismogram.\n\n The components will be rotated from ZNE (Z, North, East, left-handed) to\n LQT (e.g. ray coordinate system, right-handed). The rotation angles are\n given as the back-azimuth and inclination.\n\n The transformation consists of 3 steps::\n\n 1. mirroring of E-component at ZN plain: ZNE -> ZNW\n 2. negative rotation of coordinate system around Z-axis with angle ba:\n ZNW -> ZRT\n 3. negative rotation of coordinate system around T-axis with angle inc:\n ZRT -> LQT\n\n :type z: :class:`~numpy.ndarray`\n :param z: Data of the Z component of the seismogram.\n :type n: :class:`~numpy.ndarray`\n :param n: Data of the North component of the seismogram.\n :type e: :class:`~numpy.ndarray`\n :param e: Data of the East component of the seismogram.\n :type ba: float\n :param ba: The back azimuth from station to source in degrees.\n :type inc: float\n :param inc: The inclination of the ray at the station in degrees.\n :return: L-, Q- and T-component of seismogram.\n \"\"\"\n if len(z) != len(n) or len(z) != len(e):\n raise TypeError(\"Z, North and East component have different length!?!\")\n if ba < 0 or ba > 360:\n raise ValueError(\"Back Azimuth should be between 0 and 360 degrees!\")\n if inc < 0 or inc > 360:\n raise ValueError(\"Inclination should be between 0 and 360 degrees!\")\n ba = radians(ba)\n inc = radians(inc)\n l = z * cos(inc) - n * sin(inc) * cos(ba) - e * sin(inc) * sin(ba) # NOQA\n q = z * sin(inc) + n * cos(inc) * cos(ba) + e * cos(inc) * sin(ba) # NOQA\n t = n * sin(ba) - e * cos(ba) # NOQA\n return l, q, t\n\n\ndef rotate_lqt_zne(l, q, t, ba, inc): # NOQA\n \"\"\"\n Rotates all components of a seismogram.\n\n The components will be rotated from LQT to ZNE.\n This is the inverse transformation of the transformation described\n in :func:`rotate_zne_lqt`.\n \"\"\"\n if len(l) != len(q) or len(l) != len(t):\n raise TypeError(\"L, Q and T component have different length!?!\")\n if ba < 0 or ba > 360:\n raise ValueError(\"Back Azimuth should be between 0 and 360 degrees!\")\n if inc < 0 or inc > 360:\n raise ValueError(\"Inclination should be between 0 and 360 degrees!\")\n ba = radians(ba)\n inc = radians(inc)\n z = l * cos(inc) + q * sin(inc)\n n = -l * sin(inc) * cos(ba) + q * cos(inc) * cos(ba) + t * sin(ba)\n e = -l * sin(inc) * sin(ba) + q * cos(inc) * sin(ba) - t * cos(ba)\n return z, n, e\n\n\ndef _dip_azimuth2zne_base_vector(dip, azimuth):\n \"\"\"\n Helper function converting a vector described with azimuth and dip of unit\n length to a vector in the ZNE (Vertical, North, East) base.\n\n The definition of azimuth and dip is according to the SEED reference\n manual.\n \"\"\"\n dip = np.deg2rad(dip)\n azimuth = np.deg2rad(azimuth)\n\n return np.array([-np.sin(dip),\n np.cos(azimuth) * np.cos(dip),\n np.sin(azimuth) * np.cos(dip)])\n\n\ndef rotate2zne(data_1, azimuth_1, dip_1, data_2, azimuth_2, dip_2, data_3,\n azimuth_3, dip_3, inverse=False):\n \"\"\"\n Rotates an arbitrarily oriented three-component vector to ZNE.\n\n Each components orientation is described with a azimuth and a dip. The\n azimuth is defined as the degrees from North, clockwise and the dip is the\n defined as the number of degrees, down from horizontal. Both definitions\n are according to the SEED standard.\n\n The three components need not be orthogonal to each other but the\n components have to be linearly independent. The function performs a full\n base change to orthogonal Vertical, North, and East orientations.\n\n :param data_1: Data component 1.\n :param azimuth_1: The azimuth of component 1.\n :param dip_1: The dip of component 1.\n :param data_2: Data component 2.\n :param azimuth_2: The azimuth of component 2.\n :param dip_2: The dip of component 2.\n :param data_3: Data component 3.\n :param azimuth_3: The azimuth of component 3.\n :param dip_3: The dip of component 3.\n :param inverse: If `True`, the data arrays will be converted from ZNE to\n whatever coordinate system the azimuths and dips specify. In that\n case data_1, data_2, data_3 have to be data arrays for Z, N,\n and E and the dips and azimuths specify where to transform to.\n :type inverse: bool\n\n :rtype: tuple(:class:`numpy.ndarray`, :class:`numpy.ndarray`,\n :class:`numpy.ndarray`)\n :returns: The three rotated components, oriented in Z, N, and E if\n `inverse` is `False`. Otherwise they will be oriented as specified\n by the dips and azimuths.\n\n An input of ZNE yields an output of ZNE\n\n >>> rotate2zne(np.arange(3), 0, -90, np.arange(3) * 2, 0, 0, \\\n np.arange(3) * 3, 90, 0) # doctest: +NORMALIZE_WHITESPACE\n (array([ 0., 1., 2.]), array([ 0., 2., 4.]), array([ 0., 3., 6.]))\n\n An input of ZSE yields an output of ZNE\n\n >>> rotate2zne(np.arange(3), 0, -90, np.arange(3) * 2, 180, 0, \\\n np.arange(3) * 3, 90, 0) # doctest: +NORMALIZE_WHITESPACE\n (array([ 0., 1., 2.]), array([ 0., -2., -4.]), array([ 0., 3., 6.]))\n\n Mixed up components should get rotated to ZNE.\n\n >>> rotate2zne(np.arange(3), 0, 0, np.arange(3) * 2, 90, 0, \\\n np.arange(3) * 3, 0, -90) # doctest: +NORMALIZE_WHITESPACE\n (array([ 0., 3., 6.]), array([ 0., 1., 2.]), array([ 0., 2., 4.]))\n \"\"\"\n if len(set(len(i_) for i_ in (data_1, data_2, data_3))) != 1:\n msg = \"All three data arrays must be of same length.\"\n raise ValueError(msg)\n\n # Define the base vectors of the old base in terms of the new base vectors.\n base_vector_1 = _dip_azimuth2zne_base_vector(dip_1, azimuth_1)\n base_vector_2 = _dip_azimuth2zne_base_vector(dip_2, azimuth_2)\n base_vector_3 = _dip_azimuth2zne_base_vector(dip_3, azimuth_3)\n\n # Base change matrix.\n m = np.array([base_vector_1,\n base_vector_2,\n base_vector_3])\n\n # Determinant gives the volume change of a unit cube going from one\n # basis to the next. It should neither be too small nor to large. These\n # here are arbitrary limits.\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore',\n '.*invalid value encountered in det.*')\n det = np.linalg.det(m)\n if not (1E-6 < abs(det) < 1E6):\n raise ValueError(\"The given directions are not linearly independent, \"\n \"at least within numerical precision. Determinant \"\n \"of the base change matrix: %g\" % det)\n\n if not inverse:\n m = np.linalg.inv(m)\n\n z, n, e = np.dot(m, [data_1, data_2, data_3])\n\n # Replace all negative zeros. These might confuse some further\n # processing programs.\n z = np.array(z).ravel()\n z[z == -0.0] = 0\n n = np.array(n).ravel()\n n[n == -0.0] = 0\n e = np.array(e).ravel()\n e[e == -0.0] = 0\n\n return z, n, e\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(exclude_empty=True)\n", "path": "obspy/signal/rotate.py"}]}
3,969
262
gh_patches_debug_60939
rasdani/github-patches
git_diff
Netflix__lemur-796
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Adding domain fails on unselectable "sensitive" Client side ![screenshot_2017-05-12_11-06-54](https://cloud.githubusercontent.com/assets/445200/25991405/528d417a-3703-11e7-9e6c-d70beb6d38e2.png) Server side ``` May 12 09:05:48 lemur supervisord: lemur-web [2017-05-12 09:05:48,892] ERROR in schema: 'sensitive' May 12 09:05:48 lemur supervisord: lemur-web Traceback (most recent call last): May 12 09:05:48 lemur supervisord: lemur-web File "/var/www/lemur/lemur/common/schema.py", line 158, in decorated_function May 12 09:05:48 lemur supervisord: lemur-web resp = f(*args, **kwargs) May 12 09:05:48 lemur supervisord: lemur-web File "/var/www/lemur/lemur/domains/views.py", line 126, in post May 12 09:05:48 lemur supervisord: lemur-web return service.create(data['name'], data['sensitive']) May 12 09:05:48 lemur supervisord: lemur-web KeyError: 'sensitive' May 12 09:05:48 lemur supervisord: lemur-web May 12 09:05:48 lemur supervisord: lemur-web 'sensitive' May 12 09:05:48 lemur supervisord: lemur-web Traceback (most recent call last): May 12 09:05:48 lemur supervisord: lemur-web File "/var/www/lemur/lemur/common/schema.py", line 158, in decorated_function May 12 09:05:48 lemur supervisord: lemur-web resp = f(*args, **kwargs) May 12 09:05:48 lemur supervisord: lemur-web File "/var/www/lemur/lemur/domains/views.py", line 126, in post May 12 09:05:48 lemur supervisord: lemur-web return service.create(data['name'], data['sensitive']) May 12 09:05:48 lemur supervisord: lemur-web KeyError: 'sensitive' ``` </issue> <code> [start of lemur/domains/schemas.py] 1 """ 2 .. module: lemur.domains.schemas 3 :platform: unix 4 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more 5 :license: Apache, see LICENSE for more details. 6 .. moduleauthor:: Kevin Glisson <[email protected]> 7 """ 8 from marshmallow import fields 9 from lemur.common.schema import LemurInputSchema, LemurOutputSchema 10 from lemur.schemas import AssociatedCertificateSchema 11 12 # from lemur.certificates.schemas import CertificateNestedOutputSchema 13 14 15 class DomainInputSchema(LemurInputSchema): 16 id = fields.Integer() 17 name = fields.String(required=True) 18 sensitive = fields.Boolean() 19 certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[]) 20 21 22 class DomainOutputSchema(LemurOutputSchema): 23 id = fields.Integer() 24 name = fields.String() 25 sensitive = fields.Boolean() 26 # certificates = fields.Nested(CertificateNestedOutputSchema, many=True, missing=[]) 27 28 29 class DomainNestedOutputSchema(DomainOutputSchema): 30 __envelope__ = False 31 32 33 domain_input_schema = DomainInputSchema() 34 domain_output_schema = DomainOutputSchema() 35 domains_output_schema = DomainOutputSchema(many=True) 36 [end of lemur/domains/schemas.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lemur/domains/schemas.py b/lemur/domains/schemas.py --- a/lemur/domains/schemas.py +++ b/lemur/domains/schemas.py @@ -15,7 +15,7 @@ class DomainInputSchema(LemurInputSchema): id = fields.Integer() name = fields.String(required=True) - sensitive = fields.Boolean() + sensitive = fields.Boolean(missing=False) certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[])
{"golden_diff": "diff --git a/lemur/domains/schemas.py b/lemur/domains/schemas.py\n--- a/lemur/domains/schemas.py\n+++ b/lemur/domains/schemas.py\n@@ -15,7 +15,7 @@\n class DomainInputSchema(LemurInputSchema):\n id = fields.Integer()\n name = fields.String(required=True)\n- sensitive = fields.Boolean()\n+ sensitive = fields.Boolean(missing=False)\n certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[])\n", "issue": "Adding domain fails on unselectable \"sensitive\"\nClient side\r\n\r\n![screenshot_2017-05-12_11-06-54](https://cloud.githubusercontent.com/assets/445200/25991405/528d417a-3703-11e7-9e6c-d70beb6d38e2.png)\r\n\r\n\r\nServer side\r\n\r\n```\r\nMay 12 09:05:48 lemur supervisord: lemur-web [2017-05-12 09:05:48,892] ERROR in schema: 'sensitive'\r\nMay 12 09:05:48 lemur supervisord: lemur-web Traceback (most recent call last):\r\nMay 12 09:05:48 lemur supervisord: lemur-web File \"/var/www/lemur/lemur/common/schema.py\", line 158, in decorated_function\r\nMay 12 09:05:48 lemur supervisord: lemur-web resp = f(*args, **kwargs)\r\nMay 12 09:05:48 lemur supervisord: lemur-web File \"/var/www/lemur/lemur/domains/views.py\", line 126, in post\r\nMay 12 09:05:48 lemur supervisord: lemur-web return service.create(data['name'], data['sensitive'])\r\nMay 12 09:05:48 lemur supervisord: lemur-web KeyError: 'sensitive'\r\nMay 12 09:05:48 lemur supervisord: lemur-web\r\nMay 12 09:05:48 lemur supervisord: lemur-web 'sensitive'\r\nMay 12 09:05:48 lemur supervisord: lemur-web Traceback (most recent call last):\r\nMay 12 09:05:48 lemur supervisord: lemur-web File \"/var/www/lemur/lemur/common/schema.py\", line 158, in decorated_function\r\nMay 12 09:05:48 lemur supervisord: lemur-web resp = f(*args, **kwargs)\r\nMay 12 09:05:48 lemur supervisord: lemur-web File \"/var/www/lemur/lemur/domains/views.py\", line 126, in post\r\nMay 12 09:05:48 lemur supervisord: lemur-web return service.create(data['name'], data['sensitive'])\r\nMay 12 09:05:48 lemur supervisord: lemur-web KeyError: 'sensitive'\r\n```\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.domains.schemas\n :platform: unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nfrom marshmallow import fields\nfrom lemur.common.schema import LemurInputSchema, LemurOutputSchema\nfrom lemur.schemas import AssociatedCertificateSchema\n\n# from lemur.certificates.schemas import CertificateNestedOutputSchema\n\n\nclass DomainInputSchema(LemurInputSchema):\n id = fields.Integer()\n name = fields.String(required=True)\n sensitive = fields.Boolean()\n certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[])\n\n\nclass DomainOutputSchema(LemurOutputSchema):\n id = fields.Integer()\n name = fields.String()\n sensitive = fields.Boolean()\n # certificates = fields.Nested(CertificateNestedOutputSchema, many=True, missing=[])\n\n\nclass DomainNestedOutputSchema(DomainOutputSchema):\n __envelope__ = False\n\n\ndomain_input_schema = DomainInputSchema()\ndomain_output_schema = DomainOutputSchema()\ndomains_output_schema = DomainOutputSchema(many=True)\n", "path": "lemur/domains/schemas.py"}]}
1,510
116
gh_patches_debug_10372
rasdani/github-patches
git_diff
scrapy__scrapy-4170
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Handle it gracefully when start_url is used instead of start_urls Over the last year I’ve seen a few cases ([recent example](https://stackoverflow.com/q/58664004/939364)) of this, people missing the `s` at the end of the `start_urls`. It may be nice to find a way to gracefully let the developer know where the issue is, why there is no crawling happening. </issue> <code> [start of scrapy/spiders/__init__.py] 1 """ 2 Base class for Scrapy spiders 3 4 See documentation in docs/topics/spiders.rst 5 """ 6 import logging 7 import warnings 8 9 from scrapy import signals 10 from scrapy.http import Request 11 from scrapy.utils.trackref import object_ref 12 from scrapy.utils.url import url_is_from_spider 13 from scrapy.exceptions import ScrapyDeprecationWarning 14 from scrapy.utils.deprecate import method_is_overridden 15 16 17 class Spider(object_ref): 18 """Base class for scrapy spiders. All spiders must inherit from this 19 class. 20 """ 21 22 name = None 23 custom_settings = None 24 25 def __init__(self, name=None, **kwargs): 26 if name is not None: 27 self.name = name 28 elif not getattr(self, 'name', None): 29 raise ValueError("%s must have a name" % type(self).__name__) 30 self.__dict__.update(kwargs) 31 if not hasattr(self, 'start_urls'): 32 self.start_urls = [] 33 34 @property 35 def logger(self): 36 logger = logging.getLogger(self.name) 37 return logging.LoggerAdapter(logger, {'spider': self}) 38 39 def log(self, message, level=logging.DEBUG, **kw): 40 """Log the given message at the given log level 41 42 This helper wraps a log call to the logger within the spider, but you 43 can use it directly (e.g. Spider.logger.info('msg')) or use any other 44 Python logger too. 45 """ 46 self.logger.log(level, message, **kw) 47 48 @classmethod 49 def from_crawler(cls, crawler, *args, **kwargs): 50 spider = cls(*args, **kwargs) 51 spider._set_crawler(crawler) 52 return spider 53 54 def _set_crawler(self, crawler): 55 self.crawler = crawler 56 self.settings = crawler.settings 57 crawler.signals.connect(self.close, signals.spider_closed) 58 59 def start_requests(self): 60 cls = self.__class__ 61 if method_is_overridden(cls, Spider, 'make_requests_from_url'): 62 warnings.warn( 63 "Spider.make_requests_from_url method is deprecated; it " 64 "won't be called in future Scrapy releases. Please " 65 "override Spider.start_requests method instead (see %s.%s)." % ( 66 cls.__module__, cls.__name__ 67 ), 68 ) 69 for url in self.start_urls: 70 yield self.make_requests_from_url(url) 71 else: 72 for url in self.start_urls: 73 yield Request(url, dont_filter=True) 74 75 def make_requests_from_url(self, url): 76 """ This method is deprecated. """ 77 return Request(url, dont_filter=True) 78 79 def parse(self, response): 80 raise NotImplementedError('{}.parse callback is not defined'.format(self.__class__.__name__)) 81 82 @classmethod 83 def update_settings(cls, settings): 84 settings.setdict(cls.custom_settings or {}, priority='spider') 85 86 @classmethod 87 def handles_request(cls, request): 88 return url_is_from_spider(request.url, cls) 89 90 @staticmethod 91 def close(spider, reason): 92 closed = getattr(spider, 'closed', None) 93 if callable(closed): 94 return closed(reason) 95 96 def __str__(self): 97 return "<%s %r at 0x%0x>" % (type(self).__name__, self.name, id(self)) 98 99 __repr__ = __str__ 100 101 102 # Top-level imports 103 from scrapy.spiders.crawl import CrawlSpider, Rule 104 from scrapy.spiders.feed import XMLFeedSpider, CSVFeedSpider 105 from scrapy.spiders.sitemap import SitemapSpider 106 [end of scrapy/spiders/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/spiders/__init__.py b/scrapy/spiders/__init__.py --- a/scrapy/spiders/__init__.py +++ b/scrapy/spiders/__init__.py @@ -58,6 +58,11 @@ def start_requests(self): cls = self.__class__ + if not self.start_urls and hasattr(self, 'start_url'): + raise AttributeError( + "Crawling could not start: 'start_urls' not found " + "or empty (but found 'start_url' attribute instead, " + "did you miss an 's'?)") if method_is_overridden(cls, Spider, 'make_requests_from_url'): warnings.warn( "Spider.make_requests_from_url method is deprecated; it "
{"golden_diff": "diff --git a/scrapy/spiders/__init__.py b/scrapy/spiders/__init__.py\n--- a/scrapy/spiders/__init__.py\n+++ b/scrapy/spiders/__init__.py\n@@ -58,6 +58,11 @@\n \n def start_requests(self):\n cls = self.__class__\n+ if not self.start_urls and hasattr(self, 'start_url'):\n+ raise AttributeError(\n+ \"Crawling could not start: 'start_urls' not found \"\n+ \"or empty (but found 'start_url' attribute instead, \"\n+ \"did you miss an 's'?)\")\n if method_is_overridden(cls, Spider, 'make_requests_from_url'):\n warnings.warn(\n \"Spider.make_requests_from_url method is deprecated; it \"\n", "issue": "Handle it gracefully when start_url is used instead of start_urls\nOver the last year I\u2019ve seen a few cases ([recent example](https://stackoverflow.com/q/58664004/939364)) of this, people missing the `s` at the end of the `start_urls`.\r\n\r\nIt may be nice to find a way to gracefully let the developer know where the issue is, why there is no crawling happening.\n", "before_files": [{"content": "\"\"\"\nBase class for Scrapy spiders\n\nSee documentation in docs/topics/spiders.rst\n\"\"\"\nimport logging\nimport warnings\n\nfrom scrapy import signals\nfrom scrapy.http import Request\nfrom scrapy.utils.trackref import object_ref\nfrom scrapy.utils.url import url_is_from_spider\nfrom scrapy.exceptions import ScrapyDeprecationWarning\nfrom scrapy.utils.deprecate import method_is_overridden\n\n\nclass Spider(object_ref):\n \"\"\"Base class for scrapy spiders. All spiders must inherit from this\n class.\n \"\"\"\n\n name = None\n custom_settings = None\n\n def __init__(self, name=None, **kwargs):\n if name is not None:\n self.name = name\n elif not getattr(self, 'name', None):\n raise ValueError(\"%s must have a name\" % type(self).__name__)\n self.__dict__.update(kwargs)\n if not hasattr(self, 'start_urls'):\n self.start_urls = []\n\n @property\n def logger(self):\n logger = logging.getLogger(self.name)\n return logging.LoggerAdapter(logger, {'spider': self})\n\n def log(self, message, level=logging.DEBUG, **kw):\n \"\"\"Log the given message at the given log level\n\n This helper wraps a log call to the logger within the spider, but you\n can use it directly (e.g. Spider.logger.info('msg')) or use any other\n Python logger too.\n \"\"\"\n self.logger.log(level, message, **kw)\n\n @classmethod\n def from_crawler(cls, crawler, *args, **kwargs):\n spider = cls(*args, **kwargs)\n spider._set_crawler(crawler)\n return spider\n\n def _set_crawler(self, crawler):\n self.crawler = crawler\n self.settings = crawler.settings\n crawler.signals.connect(self.close, signals.spider_closed)\n\n def start_requests(self):\n cls = self.__class__\n if method_is_overridden(cls, Spider, 'make_requests_from_url'):\n warnings.warn(\n \"Spider.make_requests_from_url method is deprecated; it \"\n \"won't be called in future Scrapy releases. Please \"\n \"override Spider.start_requests method instead (see %s.%s).\" % (\n cls.__module__, cls.__name__\n ),\n )\n for url in self.start_urls:\n yield self.make_requests_from_url(url)\n else:\n for url in self.start_urls:\n yield Request(url, dont_filter=True)\n\n def make_requests_from_url(self, url):\n \"\"\" This method is deprecated. \"\"\"\n return Request(url, dont_filter=True)\n\n def parse(self, response):\n raise NotImplementedError('{}.parse callback is not defined'.format(self.__class__.__name__))\n\n @classmethod\n def update_settings(cls, settings):\n settings.setdict(cls.custom_settings or {}, priority='spider')\n\n @classmethod\n def handles_request(cls, request):\n return url_is_from_spider(request.url, cls)\n\n @staticmethod\n def close(spider, reason):\n closed = getattr(spider, 'closed', None)\n if callable(closed):\n return closed(reason)\n\n def __str__(self):\n return \"<%s %r at 0x%0x>\" % (type(self).__name__, self.name, id(self))\n\n __repr__ = __str__\n\n\n# Top-level imports\nfrom scrapy.spiders.crawl import CrawlSpider, Rule\nfrom scrapy.spiders.feed import XMLFeedSpider, CSVFeedSpider\nfrom scrapy.spiders.sitemap import SitemapSpider\n", "path": "scrapy/spiders/__init__.py"}]}
1,595
169
gh_patches_debug_866
rasdani/github-patches
git_diff
streamlit__streamlit-5184
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> It should be : https://github.com/streamlit/streamlit/blob/535f11765817657892506d6904bbbe04908dbdf3/lib/streamlit/elements/alert.py#L145 </issue> <code> [start of lib/streamlit/elements/alert.py] 1 # Copyright 2018-2022 Streamlit Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from typing import cast, Optional, TYPE_CHECKING 16 17 from streamlit.errors import StreamlitAPIException 18 from streamlit.proto.Alert_pb2 import Alert as AlertProto 19 from streamlit.string_util import clean_text, is_emoji 20 21 if TYPE_CHECKING: 22 from streamlit.delta_generator import DeltaGenerator 23 from streamlit.type_util import SupportsStr 24 25 26 def validate_emoji(maybe_emoji: Optional[str]) -> str: 27 if maybe_emoji is None: 28 return "" 29 elif is_emoji(maybe_emoji): 30 return maybe_emoji 31 else: 32 raise StreamlitAPIException( 33 f'The value "{maybe_emoji}" is not a valid emoji. Shortcodes are not allowed, please use a single character instead.' 34 ) 35 36 37 class AlertMixin: 38 def error( 39 self, 40 body: "SupportsStr", 41 *, # keyword-only args: 42 icon: Optional[str] = None, 43 ) -> "DeltaGenerator": 44 """Display error message. 45 46 Parameters 47 ---------- 48 icon : None 49 An optional parameter, that adds an emoji to the alert. 50 The default is None. 51 This argument can only be supplied by keyword. 52 body : str 53 The error text to display. 54 55 Example 56 ------- 57 >>> st.error('This is an error', icon="🚨") 58 59 """ 60 alert_proto = AlertProto() 61 alert_proto.icon = validate_emoji(icon) 62 alert_proto.body = clean_text(body) 63 alert_proto.format = AlertProto.ERROR 64 return self.dg._enqueue("alert", alert_proto) 65 66 def warning( 67 self, 68 body: "SupportsStr", 69 *, # keyword-only args: 70 icon: Optional[str] = None, 71 ) -> "DeltaGenerator": 72 """Display warning message. 73 74 Parameters 75 ---------- 76 icon : None 77 An optional parameter, that adds an emoji to the alert. 78 The default is None. 79 This argument can only be supplied by keyword. 80 81 body : str 82 The warning text to display. 83 84 Example 85 ------- 86 >>> st.warning('This is a warning', icon="⚠️") 87 88 """ 89 alert_proto = AlertProto() 90 alert_proto.body = clean_text(body) 91 alert_proto.icon = validate_emoji(icon) 92 alert_proto.format = AlertProto.WARNING 93 return self.dg._enqueue("alert", alert_proto) 94 95 def info( 96 self, 97 body: "SupportsStr", 98 *, # keyword-only args: 99 icon: Optional[str] = None, 100 ) -> "DeltaGenerator": 101 """Display an informational message. 102 103 Parameters 104 ---------- 105 icon : None 106 An optional parameter, that adds an emoji to the alert. 107 The default is None. 108 This argument can only be supplied by keyword. 109 110 body : str 111 The info text to display. 112 113 Example 114 ------- 115 >>> st.info('This is a purely informational message', icon="ℹ️") 116 117 """ 118 119 alert_proto = AlertProto() 120 alert_proto.body = clean_text(body) 121 alert_proto.icon = validate_emoji(icon) 122 alert_proto.format = AlertProto.INFO 123 return self.dg._enqueue("alert", alert_proto) 124 125 def success( 126 self, 127 body: "SupportsStr", 128 *, # keyword-only args: 129 icon: Optional[str] = None, 130 ) -> "DeltaGenerator": 131 """Display a success message. 132 133 Parameters 134 ---------- 135 icon : None 136 An optional parameter, that adds an emoji to the alert. 137 The default is None. 138 This argument can only be supplied by keyword. 139 140 body : str 141 The success text to display. 142 143 Example 144 ------- 145 >>> st.success('This is a success message!', icon:"✅") 146 147 """ 148 alert_proto = AlertProto() 149 alert_proto.body = clean_text(body) 150 alert_proto.icon = validate_emoji(icon) 151 alert_proto.format = AlertProto.SUCCESS 152 return self.dg._enqueue("alert", alert_proto) 153 154 @property 155 def dg(self) -> "DeltaGenerator": 156 """Get our DeltaGenerator.""" 157 return cast("DeltaGenerator", self) 158 [end of lib/streamlit/elements/alert.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/streamlit/elements/alert.py b/lib/streamlit/elements/alert.py --- a/lib/streamlit/elements/alert.py +++ b/lib/streamlit/elements/alert.py @@ -142,7 +142,7 @@ Example ------- - >>> st.success('This is a success message!', icon:"✅") + >>> st.success('This is a success message!', icon="✅") """ alert_proto = AlertProto()
{"golden_diff": "diff --git a/lib/streamlit/elements/alert.py b/lib/streamlit/elements/alert.py\n--- a/lib/streamlit/elements/alert.py\n+++ b/lib/streamlit/elements/alert.py\n@@ -142,7 +142,7 @@\n \n Example\n -------\n- >>> st.success('This is a success message!', icon:\"\u2705\")\n+ >>> st.success('This is a success message!', icon=\"\u2705\")\n \n \"\"\"\n alert_proto = AlertProto()\n", "issue": "It should be :\nhttps://github.com/streamlit/streamlit/blob/535f11765817657892506d6904bbbe04908dbdf3/lib/streamlit/elements/alert.py#L145\r\n\n", "before_files": [{"content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import cast, Optional, TYPE_CHECKING\n\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.proto.Alert_pb2 import Alert as AlertProto\nfrom streamlit.string_util import clean_text, is_emoji\n\nif TYPE_CHECKING:\n from streamlit.delta_generator import DeltaGenerator\n from streamlit.type_util import SupportsStr\n\n\ndef validate_emoji(maybe_emoji: Optional[str]) -> str:\n if maybe_emoji is None:\n return \"\"\n elif is_emoji(maybe_emoji):\n return maybe_emoji\n else:\n raise StreamlitAPIException(\n f'The value \"{maybe_emoji}\" is not a valid emoji. Shortcodes are not allowed, please use a single character instead.'\n )\n\n\nclass AlertMixin:\n def error(\n self,\n body: \"SupportsStr\",\n *, # keyword-only args:\n icon: Optional[str] = None,\n ) -> \"DeltaGenerator\":\n \"\"\"Display error message.\n\n Parameters\n ----------\n icon : None\n An optional parameter, that adds an emoji to the alert.\n The default is None.\n This argument can only be supplied by keyword.\n body : str\n The error text to display.\n\n Example\n -------\n >>> st.error('This is an error', icon=\"\ud83d\udea8\")\n\n \"\"\"\n alert_proto = AlertProto()\n alert_proto.icon = validate_emoji(icon)\n alert_proto.body = clean_text(body)\n alert_proto.format = AlertProto.ERROR\n return self.dg._enqueue(\"alert\", alert_proto)\n\n def warning(\n self,\n body: \"SupportsStr\",\n *, # keyword-only args:\n icon: Optional[str] = None,\n ) -> \"DeltaGenerator\":\n \"\"\"Display warning message.\n\n Parameters\n ----------\n icon : None\n An optional parameter, that adds an emoji to the alert.\n The default is None.\n This argument can only be supplied by keyword.\n\n body : str\n The warning text to display.\n\n Example\n -------\n >>> st.warning('This is a warning', icon=\"\u26a0\ufe0f\")\n\n \"\"\"\n alert_proto = AlertProto()\n alert_proto.body = clean_text(body)\n alert_proto.icon = validate_emoji(icon)\n alert_proto.format = AlertProto.WARNING\n return self.dg._enqueue(\"alert\", alert_proto)\n\n def info(\n self,\n body: \"SupportsStr\",\n *, # keyword-only args:\n icon: Optional[str] = None,\n ) -> \"DeltaGenerator\":\n \"\"\"Display an informational message.\n\n Parameters\n ----------\n icon : None\n An optional parameter, that adds an emoji to the alert.\n The default is None.\n This argument can only be supplied by keyword.\n\n body : str\n The info text to display.\n\n Example\n -------\n >>> st.info('This is a purely informational message', icon=\"\u2139\ufe0f\")\n\n \"\"\"\n\n alert_proto = AlertProto()\n alert_proto.body = clean_text(body)\n alert_proto.icon = validate_emoji(icon)\n alert_proto.format = AlertProto.INFO\n return self.dg._enqueue(\"alert\", alert_proto)\n\n def success(\n self,\n body: \"SupportsStr\",\n *, # keyword-only args:\n icon: Optional[str] = None,\n ) -> \"DeltaGenerator\":\n \"\"\"Display a success message.\n\n Parameters\n ----------\n icon : None\n An optional parameter, that adds an emoji to the alert.\n The default is None.\n This argument can only be supplied by keyword.\n\n body : str\n The success text to display.\n\n Example\n -------\n >>> st.success('This is a success message!', icon:\"\u2705\")\n\n \"\"\"\n alert_proto = AlertProto()\n alert_proto.body = clean_text(body)\n alert_proto.icon = validate_emoji(icon)\n alert_proto.format = AlertProto.SUCCESS\n return self.dg._enqueue(\"alert\", alert_proto)\n\n @property\n def dg(self) -> \"DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/alert.py"}]}
1,988
102
gh_patches_debug_8773
rasdani/github-patches
git_diff
google__fuzzbench-72
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Eclipser maxfilelen value I experienced the same problem that you had in choosing maxfilelen to evaluate Eclipser some time ago. I found that they used 1048576 in their experiments (https://github.com/SoftSec-KAIST/Eclipser-Artifact/blob/master/docker-scripts/experiment-scripts/package-exp/run_eclipser.sh#L25), so maybe you want to use this value to produce consistent results with the paper. </issue> <code> [start of fuzzers/eclipser/fuzzer.py] 1 # Copyright 2020 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """Integration code for Eclipser fuzzer.""" 15 16 import os 17 import subprocess 18 import time 19 from multiprocessing import Process 20 21 from fuzzers import utils 22 23 24 def build(): 25 """Build fuzzer.""" 26 # QEMU does not work with sanitizers, so skip -fsanitize=. See 27 # https://github.com/SoftSec-KAIST/Eclipser/issues/5 28 utils.set_no_sanitizer_compilation_flags() 29 cflags = [ 30 '-O2', 31 '-fno-omit-frame-pointer', 32 ] 33 utils.append_flags('CFLAGS', cflags) 34 utils.append_flags('CXXFLAGS', cflags) 35 36 os.environ['CC'] = 'clang' 37 os.environ['CXX'] = 'clang++' 38 os.environ['FUZZER_LIB'] = '/libStandaloneFuzzTarget.a' 39 40 utils.build_benchmark() 41 42 43 def fuzz(input_corpus, output_corpus, target_binary): 44 """Run fuzzer.""" 45 # Create an encoded temp corpus directory. 46 encoded_temp_corpus = os.path.join(os.path.dirname(input_corpus), 47 'temp-corpus') 48 if not os.path.exists(encoded_temp_corpus): 49 os.mkdir(encoded_temp_corpus) 50 51 print('[run_fuzzer] Running target with Eclipser') 52 command = [ 53 'dotnet', 54 '/Eclipser/build/Eclipser.dll', 55 'fuzz', 56 '-p', 57 target_binary, 58 '-t', 59 '1048576', # FIXME: Find the max value allowed here. 60 '-o', 61 encoded_temp_corpus, 62 '--src', 63 'file', 64 '--initarg', 65 'foo', # Specifies how command line argument is passed, just a file. 66 '-f', 67 'foo', 68 '--maxfilelen', 69 str(10 * 1024 * 1024), # Increase since default is too low (8 bytes). 70 ] 71 if os.listdir(input_corpus): # Important, otherwise Eclipser crashes. 72 command += ['-i', input_corpus] 73 subprocess.Popen(command) 74 75 process = Process(target=copy_corpus_directory, 76 args=( 77 encoded_temp_corpus, 78 output_corpus, 79 )) 80 process.start() 81 82 83 def copy_corpus_directory(encoded_temp_corpus, output_corpus): 84 """Copies corpus periodically from encoded corpus directory into output 85 directory.""" 86 while True: 87 # Wait for initial fuzzer initialization, and after every copy. 88 time.sleep(120) 89 90 subprocess.call([ 91 'dotnet', 92 '/Eclipser/build/Eclipser.dll', 93 'decode', 94 '-i', 95 os.path.join(encoded_temp_corpus, 'testcase'), 96 '-o', 97 output_corpus, 98 ]) 99 [end of fuzzers/eclipser/fuzzer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/fuzzers/eclipser/fuzzer.py b/fuzzers/eclipser/fuzzer.py --- a/fuzzers/eclipser/fuzzer.py +++ b/fuzzers/eclipser/fuzzer.py @@ -66,7 +66,9 @@ '-f', 'foo', '--maxfilelen', - str(10 * 1024 * 1024), # Increase since default is too low (8 bytes). + # Default is too low (8 bytes), match experiment config at: + # https://github.com/SoftSec-KAIST/Eclipser-Artifact/blob/6aadf02eeadb0416bd4c5edeafc8627bc24ebc82/docker-scripts/experiment-scripts/package-exp/run_eclipser.sh#L25 + '1048576', ] if os.listdir(input_corpus): # Important, otherwise Eclipser crashes. command += ['-i', input_corpus]
{"golden_diff": "diff --git a/fuzzers/eclipser/fuzzer.py b/fuzzers/eclipser/fuzzer.py\n--- a/fuzzers/eclipser/fuzzer.py\n+++ b/fuzzers/eclipser/fuzzer.py\n@@ -66,7 +66,9 @@\n '-f',\n 'foo',\n '--maxfilelen',\n- str(10 * 1024 * 1024), # Increase since default is too low (8 bytes).\n+ # Default is too low (8 bytes), match experiment config at:\n+ # https://github.com/SoftSec-KAIST/Eclipser-Artifact/blob/6aadf02eeadb0416bd4c5edeafc8627bc24ebc82/docker-scripts/experiment-scripts/package-exp/run_eclipser.sh#L25\n+ '1048576',\n ]\n if os.listdir(input_corpus): # Important, otherwise Eclipser crashes.\n command += ['-i', input_corpus]\n", "issue": "Eclipser maxfilelen value\nI experienced the same problem that you had in choosing maxfilelen to evaluate Eclipser some time ago.\r\nI found that they used 1048576 in their experiments (https://github.com/SoftSec-KAIST/Eclipser-Artifact/blob/master/docker-scripts/experiment-scripts/package-exp/run_eclipser.sh#L25), so maybe you want to use this value to produce consistent results with the paper.\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Integration code for Eclipser fuzzer.\"\"\"\n\nimport os\nimport subprocess\nimport time\nfrom multiprocessing import Process\n\nfrom fuzzers import utils\n\n\ndef build():\n \"\"\"Build fuzzer.\"\"\"\n # QEMU does not work with sanitizers, so skip -fsanitize=. See\n # https://github.com/SoftSec-KAIST/Eclipser/issues/5\n utils.set_no_sanitizer_compilation_flags()\n cflags = [\n '-O2',\n '-fno-omit-frame-pointer',\n ]\n utils.append_flags('CFLAGS', cflags)\n utils.append_flags('CXXFLAGS', cflags)\n\n os.environ['CC'] = 'clang'\n os.environ['CXX'] = 'clang++'\n os.environ['FUZZER_LIB'] = '/libStandaloneFuzzTarget.a'\n\n utils.build_benchmark()\n\n\ndef fuzz(input_corpus, output_corpus, target_binary):\n \"\"\"Run fuzzer.\"\"\"\n # Create an encoded temp corpus directory.\n encoded_temp_corpus = os.path.join(os.path.dirname(input_corpus),\n 'temp-corpus')\n if not os.path.exists(encoded_temp_corpus):\n os.mkdir(encoded_temp_corpus)\n\n print('[run_fuzzer] Running target with Eclipser')\n command = [\n 'dotnet',\n '/Eclipser/build/Eclipser.dll',\n 'fuzz',\n '-p',\n target_binary,\n '-t',\n '1048576', # FIXME: Find the max value allowed here.\n '-o',\n encoded_temp_corpus,\n '--src',\n 'file',\n '--initarg',\n 'foo', # Specifies how command line argument is passed, just a file.\n '-f',\n 'foo',\n '--maxfilelen',\n str(10 * 1024 * 1024), # Increase since default is too low (8 bytes).\n ]\n if os.listdir(input_corpus): # Important, otherwise Eclipser crashes.\n command += ['-i', input_corpus]\n subprocess.Popen(command)\n\n process = Process(target=copy_corpus_directory,\n args=(\n encoded_temp_corpus,\n output_corpus,\n ))\n process.start()\n\n\ndef copy_corpus_directory(encoded_temp_corpus, output_corpus):\n \"\"\"Copies corpus periodically from encoded corpus directory into output\n directory.\"\"\"\n while True:\n # Wait for initial fuzzer initialization, and after every copy.\n time.sleep(120)\n\n subprocess.call([\n 'dotnet',\n '/Eclipser/build/Eclipser.dll',\n 'decode',\n '-i',\n os.path.join(encoded_temp_corpus, 'testcase'),\n '-o',\n output_corpus,\n ])\n", "path": "fuzzers/eclipser/fuzzer.py"}]}
1,531
226
gh_patches_debug_9888
rasdani/github-patches
git_diff
DDMAL__CantusDB-1415
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use Django Extensions, deprecate and remove ptvsd [Django Extensions](https://django-extensions.readthedocs.io/en/latest/) are a really useful set of management and development tools. One of the most useful ones (I find) is `runserver_plus`, which you can run instead of the normal `runserver` when developing. This gives you access to an in-browser debugger tool, replacing the standard Django error pages with an interactive traceback and debugger. Another useful one is `shell_plus` which can pre-load all of your models into an interactive Python shell. If you also have iPython installed it will use that, making the Python repl much easier to use. With a move to these tools, I think [the modifications](https://github.com/DDMAL/CantusDB/blob/develop/django/cantusdb_project/manage.py#L9-L18) to `manage.py` can be un-done, and the dependency on the ptvsd module can be removed. This module anyway [seems to be deprecated](https://github.com/microsoft/ptvsd). </issue> <code> [start of django/cantusdb_project/manage.py] 1 #!/usr/bin/env python 2 """Django's command-line utility for administrative tasks.""" 3 import os 4 import sys 5 6 7 def main(): 8 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cantusdb.settings") 9 # start new section 10 from django.conf import settings 11 12 if settings.DEBUG: 13 if os.environ.get("RUN_MAIN") or os.environ.get("WERKZEUG_RUN_MAIN"): 14 import ptvsd 15 16 ptvsd.enable_attach(address=("0.0.0.0", 3000)) 17 print("Attached!") 18 # end new section 19 20 try: 21 from django.core.management import execute_from_command_line 22 except ImportError as exc: 23 raise ImportError( 24 "Couldn't import Django. Are you sure it's installed and " 25 "available on your PYTHONPATH environment variable? Did you " 26 "forget to activate a virtual environment?" 27 ) from exc 28 execute_from_command_line(sys.argv) 29 30 31 if __name__ == "__main__": 32 main() 33 [end of django/cantusdb_project/manage.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/django/cantusdb_project/manage.py b/django/cantusdb_project/manage.py --- a/django/cantusdb_project/manage.py +++ b/django/cantusdb_project/manage.py @@ -6,17 +6,6 @@ def main(): os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cantusdb.settings") - # start new section - from django.conf import settings - - if settings.DEBUG: - if os.environ.get("RUN_MAIN") or os.environ.get("WERKZEUG_RUN_MAIN"): - import ptvsd - - ptvsd.enable_attach(address=("0.0.0.0", 3000)) - print("Attached!") - # end new section - try: from django.core.management import execute_from_command_line except ImportError as exc:
{"golden_diff": "diff --git a/django/cantusdb_project/manage.py b/django/cantusdb_project/manage.py\n--- a/django/cantusdb_project/manage.py\n+++ b/django/cantusdb_project/manage.py\n@@ -6,17 +6,6 @@\n \n def main():\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"cantusdb.settings\")\n- # start new section\n- from django.conf import settings\n-\n- if settings.DEBUG:\n- if os.environ.get(\"RUN_MAIN\") or os.environ.get(\"WERKZEUG_RUN_MAIN\"):\n- import ptvsd\n-\n- ptvsd.enable_attach(address=(\"0.0.0.0\", 3000))\n- print(\"Attached!\")\n- # end new section\n-\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n", "issue": "Use Django Extensions, deprecate and remove ptvsd\n[Django Extensions](https://django-extensions.readthedocs.io/en/latest/) are a really useful set of management and development tools. One of the most useful ones (I find) is `runserver_plus`, which you can run instead of the normal `runserver` when developing. This gives you access to an in-browser debugger tool, replacing the standard Django error pages with an interactive traceback and debugger.\r\n\r\nAnother useful one is `shell_plus` which can pre-load all of your models into an interactive Python shell. If you also have iPython installed it will use that, making the Python repl much easier to use.\r\n\r\nWith a move to these tools, I think [the modifications](https://github.com/DDMAL/CantusDB/blob/develop/django/cantusdb_project/manage.py#L9-L18) to `manage.py` can be un-done, and the dependency on the ptvsd module can be removed. This module anyway [seems to be deprecated](https://github.com/microsoft/ptvsd). \n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"Django's command-line utility for administrative tasks.\"\"\"\nimport os\nimport sys\n\n\ndef main():\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"cantusdb.settings\")\n # start new section\n from django.conf import settings\n\n if settings.DEBUG:\n if os.environ.get(\"RUN_MAIN\") or os.environ.get(\"WERKZEUG_RUN_MAIN\"):\n import ptvsd\n\n ptvsd.enable_attach(address=(\"0.0.0.0\", 3000))\n print(\"Attached!\")\n # end new section\n\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line(sys.argv)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "django/cantusdb_project/manage.py"}]}
1,037
188
gh_patches_debug_23871
rasdani/github-patches
git_diff
learningequality__kolibri-6104
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> import is broken ### Observed behavior trying to import doesn't work: ![import](https://user-images.githubusercontent.com/2367265/69167242-a0724d00-0ac2-11ea-9a37-a21040f7f812.gif) ### Expected behavior import should work ### User-facing consequences cannot import ### Errors and logs none observed ### Steps to reproduce unsure. it happened during all imports, but then I cleared my `KOLIBRI_HOME` folder and things started working again ### Context 0.13.0 alpha 3 </issue> <code> [start of kolibri/core/utils/cache.py] 1 from django.core.cache import caches 2 3 from kolibri.utils.conf import OPTIONS 4 5 6 cache_options = OPTIONS["Cache"] 7 8 9 class CrossProcessCache(object): 10 def __init__(self, default_timeout=cache_options["CACHE_TIMEOUT"]): 11 self.default_timeout = default_timeout 12 13 def __contains__(self, key): 14 if key in caches["default"]: 15 return True 16 if cache_options["CACHE_BACKEND"] != "redis" and key in caches["process_cache"]: 17 return True 18 return False 19 20 def get(self, key, default=None, version=None): 21 if key in caches["default"] or cache_options["CACHE_BACKEND"] == "redis": 22 return caches["default"].get(key, default=default, version=version) 23 item = caches["process_cache"].get(key, default=None, version=None) 24 caches["default"].set(key, item, timeout=self.default_timeout, version=version) 25 return item 26 27 def set(self, key, value, timeout=None, version=None): 28 caches["default"].set( 29 key, value, timeout=timeout or self.default_timeout, version=version 30 ) 31 if cache_options["CACHE_BACKEND"] != "redis": 32 caches["process_cache"].set( 33 key, value, timeout=timeout or self.default_timeout, version=version 34 ) 35 [end of kolibri/core/utils/cache.py] [start of kolibri/core/content/utils/import_export_content.py] 1 import hashlib 2 3 from django.db.models import Sum 4 from requests.exceptions import ChunkedEncodingError 5 from requests.exceptions import ConnectionError 6 from requests.exceptions import HTTPError 7 from requests.exceptions import Timeout 8 9 from kolibri.core.content.models import ContentNode 10 from kolibri.core.content.models import LocalFile 11 from kolibri.core.content.utils.content_types_tools import ( 12 renderable_contentnodes_q_filter, 13 ) 14 from kolibri.core.content.utils.importability_annotation import ( 15 get_channel_stats_from_disk, 16 ) 17 from kolibri.core.content.utils.importability_annotation import ( 18 get_channel_stats_from_peer, 19 ) 20 21 try: 22 import OpenSSL 23 24 SSLERROR = OpenSSL.SSL.Error 25 except ImportError: 26 import requests 27 28 SSLERROR = requests.exceptions.SSLError 29 30 RETRY_STATUS_CODE = [502, 503, 504, 521, 522, 523, 524] 31 32 33 def get_nodes_to_transfer( 34 channel_id, 35 node_ids, 36 exclude_node_ids, 37 available, 38 renderable_only=True, 39 drive_id=None, 40 peer_id=None, 41 ): 42 nodes_to_include = ContentNode.objects.filter(channel_id=channel_id) 43 44 # if requested, filter down to only include particular topics/nodes 45 if node_ids: 46 nodes_to_include = nodes_to_include.filter(pk__in=node_ids).get_descendants( 47 include_self=True 48 ) 49 50 # if requested, filter out nodes we're not able to render 51 if renderable_only: 52 nodes_to_include = nodes_to_include.filter(renderable_contentnodes_q_filter) 53 54 # filter down the query to remove files associated with nodes we've specifically been asked to exclude 55 if exclude_node_ids: 56 nodes_to_exclude = ContentNode.objects.filter( 57 pk__in=exclude_node_ids 58 ).get_descendants(include_self=True) 59 60 nodes_to_include = nodes_to_include.order_by().difference( 61 nodes_to_exclude.order_by() 62 ) 63 64 # By default don't filter node ids by their underlying file importability 65 file_based_node_id_list = None 66 if drive_id: 67 file_based_node_id_list = get_channel_stats_from_disk( 68 channel_id, drive_id 69 ).keys() 70 71 if peer_id: 72 file_based_node_id_list = get_channel_stats_from_peer( 73 channel_id, peer_id 74 ).keys() 75 if file_based_node_id_list is not None: 76 nodes_to_include = nodes_to_include.filter(pk__in=file_based_node_id_list) 77 return nodes_to_include.filter(available=available).order_by() 78 79 80 def get_files_to_transfer( 81 channel_id, 82 node_ids, 83 exclude_node_ids, 84 available, 85 renderable_only=True, 86 drive_id=None, 87 peer_id=None, 88 ): 89 90 nodes_to_include = get_nodes_to_transfer( 91 channel_id, 92 node_ids, 93 exclude_node_ids, 94 renderable_only, 95 available, 96 drive_id=drive_id, 97 peer_id=peer_id, 98 ) 99 return calculate_files_to_transfer(nodes_to_include, available) 100 101 102 def calculate_files_to_transfer(nodes_to_include, available): 103 files_to_transfer = LocalFile.objects.filter( 104 available=available, files__contentnode__in=nodes_to_include 105 ) 106 107 # Make sure the files are unique, to avoid duplicating downloads 108 files_to_transfer = files_to_transfer.distinct() 109 110 # calculate the total file sizes across all files being returned in the queryset 111 total_bytes_to_transfer = ( 112 files_to_transfer.aggregate(Sum("file_size"))["file_size__sum"] or 0 113 ) 114 115 return files_to_transfer, total_bytes_to_transfer 116 117 118 def _get_node_ids(node_ids): 119 120 return ( 121 ContentNode.objects.filter(pk__in=node_ids) 122 .get_descendants(include_self=True) 123 .values_list("id", flat=True) 124 ) 125 126 127 def retry_import(e, **kwargs): 128 """ 129 When an exception occurs during channel/content import, if 130 * there is an Internet connection error or timeout error, 131 or HTTPError where the error code is one of the RETRY_STATUS_CODE, 132 return return True to retry the file transfer 133 * the file does not exist on the server or disk, skip the file and return False. 134 This only applies to content import not channel import. 135 * otherwise, raise the exception. 136 return value: 137 * True - needs retry. 138 * False - file is skipped. Does not need retry. 139 """ 140 141 skip_404 = kwargs.pop("skip_404") 142 143 if ( 144 isinstance(e, ConnectionError) 145 or isinstance(e, Timeout) 146 or isinstance(e, ChunkedEncodingError) 147 or (isinstance(e, HTTPError) and e.response.status_code in RETRY_STATUS_CODE) 148 or (isinstance(e, SSLERROR) and "decryption failed or bad record mac" in str(e)) 149 ): 150 return True 151 152 elif skip_404 and ( 153 (isinstance(e, HTTPError) and e.response.status_code == 404) 154 or (isinstance(e, OSError) and e.errno == 2) 155 ): 156 return False 157 158 else: 159 raise e 160 161 162 def compare_checksums(file_name, file_id): 163 hasher = hashlib.md5() 164 with open(file_name, "rb") as f: 165 # Read chunks of 4096 bytes for memory efficiency 166 for chunk in iter(lambda: f.read(4096), b""): 167 hasher.update(chunk) 168 checksum = hasher.hexdigest() 169 return checksum == file_id 170 [end of kolibri/core/content/utils/import_export_content.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kolibri/core/content/utils/import_export_content.py b/kolibri/core/content/utils/import_export_content.py --- a/kolibri/core/content/utils/import_export_content.py +++ b/kolibri/core/content/utils/import_export_content.py @@ -91,8 +91,8 @@ channel_id, node_ids, exclude_node_ids, - renderable_only, available, + renderable_only=renderable_only, drive_id=drive_id, peer_id=peer_id, ) diff --git a/kolibri/core/utils/cache.py b/kolibri/core/utils/cache.py --- a/kolibri/core/utils/cache.py +++ b/kolibri/core/utils/cache.py @@ -20,9 +20,13 @@ def get(self, key, default=None, version=None): if key in caches["default"] or cache_options["CACHE_BACKEND"] == "redis": return caches["default"].get(key, default=default, version=version) - item = caches["process_cache"].get(key, default=None, version=None) - caches["default"].set(key, item, timeout=self.default_timeout, version=version) - return item + if key in caches["process_cache"]: + item = caches["process_cache"].get(key, default=None, version=None) + caches["default"].set( + key, item, timeout=self.default_timeout, version=version + ) + return item + return default def set(self, key, value, timeout=None, version=None): caches["default"].set(
{"golden_diff": "diff --git a/kolibri/core/content/utils/import_export_content.py b/kolibri/core/content/utils/import_export_content.py\n--- a/kolibri/core/content/utils/import_export_content.py\n+++ b/kolibri/core/content/utils/import_export_content.py\n@@ -91,8 +91,8 @@\n channel_id,\n node_ids,\n exclude_node_ids,\n- renderable_only,\n available,\n+ renderable_only=renderable_only,\n drive_id=drive_id,\n peer_id=peer_id,\n )\ndiff --git a/kolibri/core/utils/cache.py b/kolibri/core/utils/cache.py\n--- a/kolibri/core/utils/cache.py\n+++ b/kolibri/core/utils/cache.py\n@@ -20,9 +20,13 @@\n def get(self, key, default=None, version=None):\n if key in caches[\"default\"] or cache_options[\"CACHE_BACKEND\"] == \"redis\":\n return caches[\"default\"].get(key, default=default, version=version)\n- item = caches[\"process_cache\"].get(key, default=None, version=None)\n- caches[\"default\"].set(key, item, timeout=self.default_timeout, version=version)\n- return item\n+ if key in caches[\"process_cache\"]:\n+ item = caches[\"process_cache\"].get(key, default=None, version=None)\n+ caches[\"default\"].set(\n+ key, item, timeout=self.default_timeout, version=version\n+ )\n+ return item\n+ return default\n \n def set(self, key, value, timeout=None, version=None):\n caches[\"default\"].set(\n", "issue": "import is broken\n### Observed behavior\r\n\r\ntrying to import doesn't work:\r\n\r\n![import](https://user-images.githubusercontent.com/2367265/69167242-a0724d00-0ac2-11ea-9a37-a21040f7f812.gif)\r\n\r\n\r\n### Expected behavior\r\n\r\nimport should work\r\n\r\n### User-facing consequences\r\n\r\ncannot import\r\n\r\n### Errors and logs\r\n\r\nnone observed\r\n\r\n### Steps to reproduce\r\n\r\nunsure. it happened during all imports, but then I cleared my `KOLIBRI_HOME` folder and things started working again\r\n\r\n### Context\r\n\r\n0.13.0 alpha 3\r\n\n", "before_files": [{"content": "from django.core.cache import caches\n\nfrom kolibri.utils.conf import OPTIONS\n\n\ncache_options = OPTIONS[\"Cache\"]\n\n\nclass CrossProcessCache(object):\n def __init__(self, default_timeout=cache_options[\"CACHE_TIMEOUT\"]):\n self.default_timeout = default_timeout\n\n def __contains__(self, key):\n if key in caches[\"default\"]:\n return True\n if cache_options[\"CACHE_BACKEND\"] != \"redis\" and key in caches[\"process_cache\"]:\n return True\n return False\n\n def get(self, key, default=None, version=None):\n if key in caches[\"default\"] or cache_options[\"CACHE_BACKEND\"] == \"redis\":\n return caches[\"default\"].get(key, default=default, version=version)\n item = caches[\"process_cache\"].get(key, default=None, version=None)\n caches[\"default\"].set(key, item, timeout=self.default_timeout, version=version)\n return item\n\n def set(self, key, value, timeout=None, version=None):\n caches[\"default\"].set(\n key, value, timeout=timeout or self.default_timeout, version=version\n )\n if cache_options[\"CACHE_BACKEND\"] != \"redis\":\n caches[\"process_cache\"].set(\n key, value, timeout=timeout or self.default_timeout, version=version\n )\n", "path": "kolibri/core/utils/cache.py"}, {"content": "import hashlib\n\nfrom django.db.models import Sum\nfrom requests.exceptions import ChunkedEncodingError\nfrom requests.exceptions import ConnectionError\nfrom requests.exceptions import HTTPError\nfrom requests.exceptions import Timeout\n\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.content.models import LocalFile\nfrom kolibri.core.content.utils.content_types_tools import (\n renderable_contentnodes_q_filter,\n)\nfrom kolibri.core.content.utils.importability_annotation import (\n get_channel_stats_from_disk,\n)\nfrom kolibri.core.content.utils.importability_annotation import (\n get_channel_stats_from_peer,\n)\n\ntry:\n import OpenSSL\n\n SSLERROR = OpenSSL.SSL.Error\nexcept ImportError:\n import requests\n\n SSLERROR = requests.exceptions.SSLError\n\nRETRY_STATUS_CODE = [502, 503, 504, 521, 522, 523, 524]\n\n\ndef get_nodes_to_transfer(\n channel_id,\n node_ids,\n exclude_node_ids,\n available,\n renderable_only=True,\n drive_id=None,\n peer_id=None,\n):\n nodes_to_include = ContentNode.objects.filter(channel_id=channel_id)\n\n # if requested, filter down to only include particular topics/nodes\n if node_ids:\n nodes_to_include = nodes_to_include.filter(pk__in=node_ids).get_descendants(\n include_self=True\n )\n\n # if requested, filter out nodes we're not able to render\n if renderable_only:\n nodes_to_include = nodes_to_include.filter(renderable_contentnodes_q_filter)\n\n # filter down the query to remove files associated with nodes we've specifically been asked to exclude\n if exclude_node_ids:\n nodes_to_exclude = ContentNode.objects.filter(\n pk__in=exclude_node_ids\n ).get_descendants(include_self=True)\n\n nodes_to_include = nodes_to_include.order_by().difference(\n nodes_to_exclude.order_by()\n )\n\n # By default don't filter node ids by their underlying file importability\n file_based_node_id_list = None\n if drive_id:\n file_based_node_id_list = get_channel_stats_from_disk(\n channel_id, drive_id\n ).keys()\n\n if peer_id:\n file_based_node_id_list = get_channel_stats_from_peer(\n channel_id, peer_id\n ).keys()\n if file_based_node_id_list is not None:\n nodes_to_include = nodes_to_include.filter(pk__in=file_based_node_id_list)\n return nodes_to_include.filter(available=available).order_by()\n\n\ndef get_files_to_transfer(\n channel_id,\n node_ids,\n exclude_node_ids,\n available,\n renderable_only=True,\n drive_id=None,\n peer_id=None,\n):\n\n nodes_to_include = get_nodes_to_transfer(\n channel_id,\n node_ids,\n exclude_node_ids,\n renderable_only,\n available,\n drive_id=drive_id,\n peer_id=peer_id,\n )\n return calculate_files_to_transfer(nodes_to_include, available)\n\n\ndef calculate_files_to_transfer(nodes_to_include, available):\n files_to_transfer = LocalFile.objects.filter(\n available=available, files__contentnode__in=nodes_to_include\n )\n\n # Make sure the files are unique, to avoid duplicating downloads\n files_to_transfer = files_to_transfer.distinct()\n\n # calculate the total file sizes across all files being returned in the queryset\n total_bytes_to_transfer = (\n files_to_transfer.aggregate(Sum(\"file_size\"))[\"file_size__sum\"] or 0\n )\n\n return files_to_transfer, total_bytes_to_transfer\n\n\ndef _get_node_ids(node_ids):\n\n return (\n ContentNode.objects.filter(pk__in=node_ids)\n .get_descendants(include_self=True)\n .values_list(\"id\", flat=True)\n )\n\n\ndef retry_import(e, **kwargs):\n \"\"\"\n When an exception occurs during channel/content import, if\n * there is an Internet connection error or timeout error,\n or HTTPError where the error code is one of the RETRY_STATUS_CODE,\n return return True to retry the file transfer\n * the file does not exist on the server or disk, skip the file and return False.\n This only applies to content import not channel import.\n * otherwise, raise the exception.\n return value:\n * True - needs retry.\n * False - file is skipped. Does not need retry.\n \"\"\"\n\n skip_404 = kwargs.pop(\"skip_404\")\n\n if (\n isinstance(e, ConnectionError)\n or isinstance(e, Timeout)\n or isinstance(e, ChunkedEncodingError)\n or (isinstance(e, HTTPError) and e.response.status_code in RETRY_STATUS_CODE)\n or (isinstance(e, SSLERROR) and \"decryption failed or bad record mac\" in str(e))\n ):\n return True\n\n elif skip_404 and (\n (isinstance(e, HTTPError) and e.response.status_code == 404)\n or (isinstance(e, OSError) and e.errno == 2)\n ):\n return False\n\n else:\n raise e\n\n\ndef compare_checksums(file_name, file_id):\n hasher = hashlib.md5()\n with open(file_name, \"rb\") as f:\n # Read chunks of 4096 bytes for memory efficiency\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hasher.update(chunk)\n checksum = hasher.hexdigest()\n return checksum == file_id\n", "path": "kolibri/core/content/utils/import_export_content.py"}]}
2,636
342
gh_patches_debug_24173
rasdani/github-patches
git_diff
getnikola__nikola-2000
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add support for cssminifier.com and javascript-minifier.com They offer a nice service to minify CSS/JS and it's easy to use. </issue> <code> [start of nikola/filters.py] 1 # -*- coding: utf-8 -*- 2 3 # Copyright © 2012-2015 Roberto Alsina and others. 4 5 # Permission is hereby granted, free of charge, to any 6 # person obtaining a copy of this software and associated 7 # documentation files (the "Software"), to deal in the 8 # Software without restriction, including without limitation 9 # the rights to use, copy, modify, merge, publish, 10 # distribute, sublicense, and/or sell copies of the 11 # Software, and to permit persons to whom the Software is 12 # furnished to do so, subject to the following conditions: 13 # 14 # The above copyright notice and this permission notice 15 # shall be included in all copies or substantial portions of 16 # the Software. 17 # 18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS 22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 27 """Utility functions to help run filters on files.""" 28 29 from .utils import req_missing 30 from functools import wraps 31 import os 32 import io 33 import shutil 34 import subprocess 35 import tempfile 36 import shlex 37 38 try: 39 import typogrify.filters as typo 40 except ImportError: 41 typo = None # NOQA 42 43 44 def apply_to_binary_file(f): 45 """Apply a filter to a binary file. 46 47 Take a function f that transforms a data argument, and returns 48 a function that takes a filename and applies f to the contents, 49 in place. Reads files in binary mode. 50 """ 51 @wraps(f) 52 def f_in_file(fname): 53 with open(fname, 'rb') as inf: 54 data = inf.read() 55 data = f(data) 56 with open(fname, 'wb+') as outf: 57 outf.write(data) 58 59 return f_in_file 60 61 62 def apply_to_text_file(f): 63 """Apply a filter to a text file. 64 65 Take a function f that transforms a data argument, and returns 66 a function that takes a filename and applies f to the contents, 67 in place. Reads files in UTF-8. 68 """ 69 @wraps(f) 70 def f_in_file(fname): 71 with io.open(fname, 'r', encoding='utf-8') as inf: 72 data = inf.read() 73 data = f(data) 74 with io.open(fname, 'w+', encoding='utf-8') as outf: 75 outf.write(data) 76 77 return f_in_file 78 79 80 def list_replace(the_list, find, replacement): 81 """Replace all occurrences of ``find`` with ``replacement`` in ``the_list``.""" 82 for i, v in enumerate(the_list): 83 if v == find: 84 the_list[i] = replacement 85 86 87 def runinplace(command, infile): 88 """Run a command in-place on a file. 89 90 command is a string of the form: "commandname %1 %2" and 91 it will be execed with infile as %1 and a temporary file 92 as %2. Then, that temporary file will be moved over %1. 93 94 Example usage: 95 96 runinplace("yui-compressor %1 -o %2", "myfile.css") 97 98 That will replace myfile.css with a minified version. 99 100 You can also supply command as a list. 101 """ 102 if not isinstance(command, list): 103 command = shlex.split(command) 104 105 tmpdir = None 106 107 if "%2" in command: 108 tmpdir = tempfile.mkdtemp(prefix="nikola") 109 tmpfname = os.path.join(tmpdir, os.path.basename(infile)) 110 111 try: 112 list_replace(command, "%1", infile) 113 if tmpdir: 114 list_replace(command, "%2", tmpfname) 115 116 subprocess.check_call(command) 117 118 if tmpdir: 119 shutil.move(tmpfname, infile) 120 finally: 121 if tmpdir: 122 shutil.rmtree(tmpdir) 123 124 125 def yui_compressor(infile): 126 """Run YUI Compressor on a file.""" 127 yuicompressor = False 128 try: 129 subprocess.call('yui-compressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w')) 130 yuicompressor = 'yui-compressor' 131 except Exception: 132 pass 133 if not yuicompressor: 134 try: 135 subprocess.call('yuicompressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w')) 136 yuicompressor = 'yuicompressor' 137 except: 138 raise Exception("yui-compressor is not installed.") 139 return False 140 141 return runinplace(r'{} --nomunge %1 -o %2'.format(yuicompressor), infile) 142 143 144 def closure_compiler(infile): 145 """Run closure-compiler on a file.""" 146 return runinplace(r'closure-compiler --warning_level QUIET --js %1 --js_output_file %2', infile) 147 148 149 def optipng(infile): 150 """Run optipng on a file.""" 151 return runinplace(r"optipng -preserve -o2 -quiet %1", infile) 152 153 154 def jpegoptim(infile): 155 """Run jpegoptim on a file.""" 156 return runinplace(r"jpegoptim -p --strip-all -q %1", infile) 157 158 159 def html_tidy_withconfig(infile): 160 """Run HTML Tidy with tidy5.conf as config file.""" 161 return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 -indent -config tidy5.conf -modify %1") 162 163 164 def html_tidy_nowrap(infile): 165 """Run HTML Tidy without line wrapping.""" 166 return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes no --sort-attributes alpha --wrap 0 --wrap-sections no --drop-empty-elements no --tidy-mark no -modify %1") 167 168 169 def html_tidy_wrap(infile): 170 """Run HTML Tidy with line wrapping.""" 171 return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes no --sort-attributes alpha --wrap 80 --wrap-sections no --drop-empty-elements no --tidy-mark no -modify %1") 172 173 174 def html_tidy_wrap_attr(infile): 175 """Run HTML tidy with line wrapping and attribute indentation.""" 176 return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes yes --sort-attributes alpha --wrap 80 --wrap-sections no --drop-empty-elements no --tidy-mark no -modify %1") 177 178 179 def html_tidy_mini(infile): 180 """Run HTML tidy with minimal settings.""" 181 return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 --indent-attributes no --sort-attributes alpha --wrap 0 --wrap-sections no --tidy-mark no --drop-empty-elements no -modify %1") 182 183 184 def _html_tidy_runner(infile, options): 185 """Run HTML Tidy.""" 186 # Warnings (returncode 1) are not critical, and *everything* is a warning. 187 try: 188 status = runinplace(r"tidy5 " + options, infile) 189 except subprocess.CalledProcessError as err: 190 status = 0 if err.returncode == 1 else err.returncode 191 return status 192 193 194 @apply_to_text_file 195 def html5lib_minify(data): 196 """Minify with html5lib.""" 197 import html5lib 198 import html5lib.serializer 199 data = html5lib.serializer.serialize(html5lib.parse(data, treebuilder='lxml'), 200 tree='lxml', 201 quote_attr_values=False, 202 omit_optional_tags=True, 203 minimize_boolean_attributes=True, 204 strip_whitespace=True, 205 alphabetical_attributes=True, 206 escape_lt_in_attrs=True) 207 return data 208 209 210 @apply_to_text_file 211 def html5lib_xmllike(data): 212 """Transform document to an XML-like form with html5lib.""" 213 import html5lib 214 import html5lib.serializer 215 data = html5lib.serializer.serialize(html5lib.parse(data, treebuilder='lxml'), 216 tree='lxml', 217 quote_attr_values=True, 218 omit_optional_tags=False, 219 strip_whitespace=False, 220 alphabetical_attributes=True, 221 escape_lt_in_attrs=True) 222 return data 223 224 225 @apply_to_text_file 226 def minify_lines(data): 227 """Do nothing -- deprecated filter.""" 228 return data 229 230 231 @apply_to_text_file 232 def typogrify(data): 233 """Prettify text with typogrify.""" 234 if typo is None: 235 req_missing(['typogrify'], 'use the typogrify filter') 236 237 data = typo.amp(data) 238 data = typo.widont(data) 239 data = typo.smartypants(data) 240 # Disabled because of typogrify bug where it breaks <title> 241 # data = typo.caps(data) 242 data = typo.initial_quotes(data) 243 return data 244 245 246 @apply_to_text_file 247 def typogrify_sans_widont(data): 248 """Prettify text with typogrify, skipping the widont filter.""" 249 # typogrify with widont disabled because it caused broken headline 250 # wrapping, see issue #1465 251 if typo is None: 252 req_missing(['typogrify'], 'use the typogrify_sans_widont filter') 253 254 data = typo.amp(data) 255 data = typo.smartypants(data) 256 # Disabled because of typogrify bug where it breaks <title> 257 # data = typo.caps(data) 258 data = typo.initial_quotes(data) 259 return data 260 261 262 @apply_to_text_file 263 def php_template_injection(data): 264 """Insert PHP code into Nikola templates.""" 265 import re 266 template = re.search('<\!-- __NIKOLA_PHP_TEMPLATE_INJECTION source\:(.*) checksum\:(.*)__ -->', data) 267 if template: 268 source = template.group(1) 269 with io.open(source, "r", encoding="utf-8") as in_file: 270 phpdata = in_file.read() 271 _META_SEPARATOR = '(' + os.linesep * 2 + '|' + ('\n' * 2) + '|' + ("\r\n" * 2) + ')' 272 phpdata = re.split(_META_SEPARATOR, phpdata, maxsplit=1)[-1] 273 phpdata = re.sub(template.group(0), phpdata, data) 274 return phpdata 275 else: 276 return data 277 [end of nikola/filters.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nikola/filters.py b/nikola/filters.py --- a/nikola/filters.py +++ b/nikola/filters.py @@ -26,7 +26,6 @@ """Utility functions to help run filters on files.""" -from .utils import req_missing from functools import wraps import os import io @@ -39,6 +38,9 @@ import typogrify.filters as typo except ImportError: typo = None # NOQA +import requests + +from .utils import req_missing, LOGGER def apply_to_binary_file(f): @@ -274,3 +276,29 @@ return phpdata else: return data + + +@apply_to_text_file +def cssminify(data): + """Minify CSS using http://cssminifier.com/.""" + try: + url = 'http://cssminifier.com/raw' + _data = {'input': data} + response = requests.post(url, data=_data) + return response.text + except Exception as exc: + LOGGER.error("can't use cssminifier.com: {}", exc) + return data + + +@apply_to_text_file +def jsminify(data): + """Minify JS using http://javascript-minifier.com/.""" + try: + url = 'http://javascript-minifier.com/raw' + _data = {'input': data} + response = requests.post(url, data=_data) + return response.text + except Exception as exc: + LOGGER.error("can't use javascript-minifier.com: {}", exc) + return data
{"golden_diff": "diff --git a/nikola/filters.py b/nikola/filters.py\n--- a/nikola/filters.py\n+++ b/nikola/filters.py\n@@ -26,7 +26,6 @@\n \n \"\"\"Utility functions to help run filters on files.\"\"\"\n \n-from .utils import req_missing\n from functools import wraps\n import os\n import io\n@@ -39,6 +38,9 @@\n import typogrify.filters as typo\n except ImportError:\n typo = None # NOQA\n+import requests\n+\n+from .utils import req_missing, LOGGER\n \n \n def apply_to_binary_file(f):\n@@ -274,3 +276,29 @@\n return phpdata\n else:\n return data\n+\n+\n+@apply_to_text_file\n+def cssminify(data):\n+ \"\"\"Minify CSS using http://cssminifier.com/.\"\"\"\n+ try:\n+ url = 'http://cssminifier.com/raw'\n+ _data = {'input': data}\n+ response = requests.post(url, data=_data)\n+ return response.text\n+ except Exception as exc:\n+ LOGGER.error(\"can't use cssminifier.com: {}\", exc)\n+ return data\n+\n+\n+@apply_to_text_file\n+def jsminify(data):\n+ \"\"\"Minify JS using http://javascript-minifier.com/.\"\"\"\n+ try:\n+ url = 'http://javascript-minifier.com/raw'\n+ _data = {'input': data}\n+ response = requests.post(url, data=_data)\n+ return response.text\n+ except Exception as exc:\n+ LOGGER.error(\"can't use javascript-minifier.com: {}\", exc)\n+ return data\n", "issue": "Add support for cssminifier.com and javascript-minifier.com\nThey offer a nice service to minify CSS/JS and it's easy to use.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2015 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Utility functions to help run filters on files.\"\"\"\n\nfrom .utils import req_missing\nfrom functools import wraps\nimport os\nimport io\nimport shutil\nimport subprocess\nimport tempfile\nimport shlex\n\ntry:\n import typogrify.filters as typo\nexcept ImportError:\n typo = None # NOQA\n\n\ndef apply_to_binary_file(f):\n \"\"\"Apply a filter to a binary file.\n\n Take a function f that transforms a data argument, and returns\n a function that takes a filename and applies f to the contents,\n in place. Reads files in binary mode.\n \"\"\"\n @wraps(f)\n def f_in_file(fname):\n with open(fname, 'rb') as inf:\n data = inf.read()\n data = f(data)\n with open(fname, 'wb+') as outf:\n outf.write(data)\n\n return f_in_file\n\n\ndef apply_to_text_file(f):\n \"\"\"Apply a filter to a text file.\n\n Take a function f that transforms a data argument, and returns\n a function that takes a filename and applies f to the contents,\n in place. Reads files in UTF-8.\n \"\"\"\n @wraps(f)\n def f_in_file(fname):\n with io.open(fname, 'r', encoding='utf-8') as inf:\n data = inf.read()\n data = f(data)\n with io.open(fname, 'w+', encoding='utf-8') as outf:\n outf.write(data)\n\n return f_in_file\n\n\ndef list_replace(the_list, find, replacement):\n \"\"\"Replace all occurrences of ``find`` with ``replacement`` in ``the_list``.\"\"\"\n for i, v in enumerate(the_list):\n if v == find:\n the_list[i] = replacement\n\n\ndef runinplace(command, infile):\n \"\"\"Run a command in-place on a file.\n\n command is a string of the form: \"commandname %1 %2\" and\n it will be execed with infile as %1 and a temporary file\n as %2. Then, that temporary file will be moved over %1.\n\n Example usage:\n\n runinplace(\"yui-compressor %1 -o %2\", \"myfile.css\")\n\n That will replace myfile.css with a minified version.\n\n You can also supply command as a list.\n \"\"\"\n if not isinstance(command, list):\n command = shlex.split(command)\n\n tmpdir = None\n\n if \"%2\" in command:\n tmpdir = tempfile.mkdtemp(prefix=\"nikola\")\n tmpfname = os.path.join(tmpdir, os.path.basename(infile))\n\n try:\n list_replace(command, \"%1\", infile)\n if tmpdir:\n list_replace(command, \"%2\", tmpfname)\n\n subprocess.check_call(command)\n\n if tmpdir:\n shutil.move(tmpfname, infile)\n finally:\n if tmpdir:\n shutil.rmtree(tmpdir)\n\n\ndef yui_compressor(infile):\n \"\"\"Run YUI Compressor on a file.\"\"\"\n yuicompressor = False\n try:\n subprocess.call('yui-compressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))\n yuicompressor = 'yui-compressor'\n except Exception:\n pass\n if not yuicompressor:\n try:\n subprocess.call('yuicompressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))\n yuicompressor = 'yuicompressor'\n except:\n raise Exception(\"yui-compressor is not installed.\")\n return False\n\n return runinplace(r'{} --nomunge %1 -o %2'.format(yuicompressor), infile)\n\n\ndef closure_compiler(infile):\n \"\"\"Run closure-compiler on a file.\"\"\"\n return runinplace(r'closure-compiler --warning_level QUIET --js %1 --js_output_file %2', infile)\n\n\ndef optipng(infile):\n \"\"\"Run optipng on a file.\"\"\"\n return runinplace(r\"optipng -preserve -o2 -quiet %1\", infile)\n\n\ndef jpegoptim(infile):\n \"\"\"Run jpegoptim on a file.\"\"\"\n return runinplace(r\"jpegoptim -p --strip-all -q %1\", infile)\n\n\ndef html_tidy_withconfig(infile):\n \"\"\"Run HTML Tidy with tidy5.conf as config file.\"\"\"\n return _html_tidy_runner(infile, r\"-quiet --show-info no --show-warnings no -utf8 -indent -config tidy5.conf -modify %1\")\n\n\ndef html_tidy_nowrap(infile):\n \"\"\"Run HTML Tidy without line wrapping.\"\"\"\n return _html_tidy_runner(infile, r\"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes no --sort-attributes alpha --wrap 0 --wrap-sections no --drop-empty-elements no --tidy-mark no -modify %1\")\n\n\ndef html_tidy_wrap(infile):\n \"\"\"Run HTML Tidy with line wrapping.\"\"\"\n return _html_tidy_runner(infile, r\"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes no --sort-attributes alpha --wrap 80 --wrap-sections no --drop-empty-elements no --tidy-mark no -modify %1\")\n\n\ndef html_tidy_wrap_attr(infile):\n \"\"\"Run HTML tidy with line wrapping and attribute indentation.\"\"\"\n return _html_tidy_runner(infile, r\"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes yes --sort-attributes alpha --wrap 80 --wrap-sections no --drop-empty-elements no --tidy-mark no -modify %1\")\n\n\ndef html_tidy_mini(infile):\n \"\"\"Run HTML tidy with minimal settings.\"\"\"\n return _html_tidy_runner(infile, r\"-quiet --show-info no --show-warnings no -utf8 --indent-attributes no --sort-attributes alpha --wrap 0 --wrap-sections no --tidy-mark no --drop-empty-elements no -modify %1\")\n\n\ndef _html_tidy_runner(infile, options):\n \"\"\"Run HTML Tidy.\"\"\"\n # Warnings (returncode 1) are not critical, and *everything* is a warning.\n try:\n status = runinplace(r\"tidy5 \" + options, infile)\n except subprocess.CalledProcessError as err:\n status = 0 if err.returncode == 1 else err.returncode\n return status\n\n\n@apply_to_text_file\ndef html5lib_minify(data):\n \"\"\"Minify with html5lib.\"\"\"\n import html5lib\n import html5lib.serializer\n data = html5lib.serializer.serialize(html5lib.parse(data, treebuilder='lxml'),\n tree='lxml',\n quote_attr_values=False,\n omit_optional_tags=True,\n minimize_boolean_attributes=True,\n strip_whitespace=True,\n alphabetical_attributes=True,\n escape_lt_in_attrs=True)\n return data\n\n\n@apply_to_text_file\ndef html5lib_xmllike(data):\n \"\"\"Transform document to an XML-like form with html5lib.\"\"\"\n import html5lib\n import html5lib.serializer\n data = html5lib.serializer.serialize(html5lib.parse(data, treebuilder='lxml'),\n tree='lxml',\n quote_attr_values=True,\n omit_optional_tags=False,\n strip_whitespace=False,\n alphabetical_attributes=True,\n escape_lt_in_attrs=True)\n return data\n\n\n@apply_to_text_file\ndef minify_lines(data):\n \"\"\"Do nothing -- deprecated filter.\"\"\"\n return data\n\n\n@apply_to_text_file\ndef typogrify(data):\n \"\"\"Prettify text with typogrify.\"\"\"\n if typo is None:\n req_missing(['typogrify'], 'use the typogrify filter')\n\n data = typo.amp(data)\n data = typo.widont(data)\n data = typo.smartypants(data)\n # Disabled because of typogrify bug where it breaks <title>\n # data = typo.caps(data)\n data = typo.initial_quotes(data)\n return data\n\n\n@apply_to_text_file\ndef typogrify_sans_widont(data):\n \"\"\"Prettify text with typogrify, skipping the widont filter.\"\"\"\n # typogrify with widont disabled because it caused broken headline\n # wrapping, see issue #1465\n if typo is None:\n req_missing(['typogrify'], 'use the typogrify_sans_widont filter')\n\n data = typo.amp(data)\n data = typo.smartypants(data)\n # Disabled because of typogrify bug where it breaks <title>\n # data = typo.caps(data)\n data = typo.initial_quotes(data)\n return data\n\n\n@apply_to_text_file\ndef php_template_injection(data):\n \"\"\"Insert PHP code into Nikola templates.\"\"\"\n import re\n template = re.search('<\\!-- __NIKOLA_PHP_TEMPLATE_INJECTION source\\:(.*) checksum\\:(.*)__ -->', data)\n if template:\n source = template.group(1)\n with io.open(source, \"r\", encoding=\"utf-8\") as in_file:\n phpdata = in_file.read()\n _META_SEPARATOR = '(' + os.linesep * 2 + '|' + ('\\n' * 2) + '|' + (\"\\r\\n\" * 2) + ')'\n phpdata = re.split(_META_SEPARATOR, phpdata, maxsplit=1)[-1]\n phpdata = re.sub(template.group(0), phpdata, data)\n return phpdata\n else:\n return data\n", "path": "nikola/filters.py"}]}
3,606
369
gh_patches_debug_24404
rasdani/github-patches
git_diff
mdn__kuma-6974
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> T - Add 301 redirect to Subscriptions Management page `/payments/recurring/management` is moving to `/payments/management`, therefore we need to add 301 redirect in kuma/payments/urls.py See https://github.com/mdn/kuma/issues/6703#issuecomment-614246571 for context </issue> <code> [start of kuma/payments/urls.py] 1 from django.urls import path 2 3 from . import views 4 5 lang_urlpatterns = [ 6 path("terms/", views.payment_terms, name="payment_terms"), 7 path("thank-you/", views.thank_you, name="thank_you"), 8 path("management/", views.payment_management, name="payment_management",), 9 path("", views.index, name="payments_index"), 10 ] 11 [end of kuma/payments/urls.py] [start of kuma/payments/views.py] 1 import logging 2 3 from django.shortcuts import render 4 from django.views.decorators.cache import never_cache 5 from waffle.decorators import waffle_flag 6 7 from kuma.users.models import User 8 9 10 log = logging.getLogger("kuma.payments.views") 11 12 13 @never_cache 14 def index(request): 15 highest_subscriber_number = User.get_highest_subscriber_number() 16 # TODO: This is never unit tested because our tests never test SSR rendering. 17 # See https://github.com/mdn/kuma/issues/6797 18 context = {"next_subscriber_number": highest_subscriber_number + 1} 19 return render(request, "payments/index.html", context) 20 21 22 @waffle_flag("subscription") 23 @never_cache 24 def thank_you(request): 25 return render(request, "payments/thank-you.html") 26 27 28 @waffle_flag("subscription") 29 @never_cache 30 def payment_terms(request): 31 return render(request, "payments/terms.html") 32 33 34 @waffle_flag("subscription") 35 @never_cache 36 def payment_management(request): 37 return render(request, "payments/management.html") 38 [end of kuma/payments/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kuma/payments/urls.py b/kuma/payments/urls.py --- a/kuma/payments/urls.py +++ b/kuma/payments/urls.py @@ -1,10 +1,17 @@ from django.urls import path +from django.views.generic import RedirectView from . import views lang_urlpatterns = [ path("terms/", views.payment_terms, name="payment_terms"), path("thank-you/", views.thank_you, name="thank_you"), - path("management/", views.payment_management, name="payment_management",), + path( + # This is the old URL we had for a while + "recurring/management/", + RedirectView.as_view(pattern_name="payment_management", permanent=True), + name="recurring_payment_management", + ), + path("management/", views.payment_management, name="payment_management"), path("", views.index, name="payments_index"), ] diff --git a/kuma/payments/views.py b/kuma/payments/views.py --- a/kuma/payments/views.py +++ b/kuma/payments/views.py @@ -13,8 +13,6 @@ @never_cache def index(request): highest_subscriber_number = User.get_highest_subscriber_number() - # TODO: This is never unit tested because our tests never test SSR rendering. - # See https://github.com/mdn/kuma/issues/6797 context = {"next_subscriber_number": highest_subscriber_number + 1} return render(request, "payments/index.html", context)
{"golden_diff": "diff --git a/kuma/payments/urls.py b/kuma/payments/urls.py\n--- a/kuma/payments/urls.py\n+++ b/kuma/payments/urls.py\n@@ -1,10 +1,17 @@\n from django.urls import path\n+from django.views.generic import RedirectView\n \n from . import views\n \n lang_urlpatterns = [\n path(\"terms/\", views.payment_terms, name=\"payment_terms\"),\n path(\"thank-you/\", views.thank_you, name=\"thank_you\"),\n- path(\"management/\", views.payment_management, name=\"payment_management\",),\n+ path(\n+ # This is the old URL we had for a while\n+ \"recurring/management/\",\n+ RedirectView.as_view(pattern_name=\"payment_management\", permanent=True),\n+ name=\"recurring_payment_management\",\n+ ),\n+ path(\"management/\", views.payment_management, name=\"payment_management\"),\n path(\"\", views.index, name=\"payments_index\"),\n ]\ndiff --git a/kuma/payments/views.py b/kuma/payments/views.py\n--- a/kuma/payments/views.py\n+++ b/kuma/payments/views.py\n@@ -13,8 +13,6 @@\n @never_cache\n def index(request):\n highest_subscriber_number = User.get_highest_subscriber_number()\n- # TODO: This is never unit tested because our tests never test SSR rendering.\n- # See https://github.com/mdn/kuma/issues/6797\n context = {\"next_subscriber_number\": highest_subscriber_number + 1}\n return render(request, \"payments/index.html\", context)\n", "issue": "T - Add 301 redirect to Subscriptions Management page \n`/payments/recurring/management` is moving to `/payments/management`, therefore we need to add 301 redirect in kuma/payments/urls.py\r\n\r\nSee https://github.com/mdn/kuma/issues/6703#issuecomment-614246571 for context \n", "before_files": [{"content": "from django.urls import path\n\nfrom . import views\n\nlang_urlpatterns = [\n path(\"terms/\", views.payment_terms, name=\"payment_terms\"),\n path(\"thank-you/\", views.thank_you, name=\"thank_you\"),\n path(\"management/\", views.payment_management, name=\"payment_management\",),\n path(\"\", views.index, name=\"payments_index\"),\n]\n", "path": "kuma/payments/urls.py"}, {"content": "import logging\n\nfrom django.shortcuts import render\nfrom django.views.decorators.cache import never_cache\nfrom waffle.decorators import waffle_flag\n\nfrom kuma.users.models import User\n\n\nlog = logging.getLogger(\"kuma.payments.views\")\n\n\n@never_cache\ndef index(request):\n highest_subscriber_number = User.get_highest_subscriber_number()\n # TODO: This is never unit tested because our tests never test SSR rendering.\n # See https://github.com/mdn/kuma/issues/6797\n context = {\"next_subscriber_number\": highest_subscriber_number + 1}\n return render(request, \"payments/index.html\", context)\n\n\n@waffle_flag(\"subscription\")\n@never_cache\ndef thank_you(request):\n return render(request, \"payments/thank-you.html\")\n\n\n@waffle_flag(\"subscription\")\n@never_cache\ndef payment_terms(request):\n return render(request, \"payments/terms.html\")\n\n\n@waffle_flag(\"subscription\")\n@never_cache\ndef payment_management(request):\n return render(request, \"payments/management.html\")\n", "path": "kuma/payments/views.py"}]}
1,018
339
gh_patches_debug_31279
rasdani/github-patches
git_diff
easybuilders__easybuild-easyblocks-1897
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bundle Easyblock does not put patches into root config At https://github.com/easybuilders/easybuild-easyblocks/blob/b99cc5a4dfb98cafbbd4a8827ea9bfb444724e27/easybuild/easyblocks/generic/bundle.py#L156 the patches checksums are added to the root, but the patches are not which makes e.g. the unit tests fail, see https://github.com/easybuilders/easybuild-easyconfigs/pull/9546 Should the patches be added? From the logic in the check_checksums it seems: yes </issue> <code> [start of easybuild/easyblocks/generic/bundle.py] 1 ## 2 # Copyright 2009-2019 Ghent University 3 # 4 # This file is part of EasyBuild, 5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), 6 # with support of Ghent University (http://ugent.be/hpc), 7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), 8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en) 9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). 10 # 11 # https://github.com/easybuilders/easybuild 12 # 13 # EasyBuild is free software: you can redistribute it and/or modify 14 # it under the terms of the GNU General Public License as published by 15 # the Free Software Foundation v2. 16 # 17 # EasyBuild is distributed in the hope that it will be useful, 18 # but WITHOUT ANY WARRANTY; without even the implied warranty of 19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 # GNU General Public License for more details. 21 # 22 # You should have received a copy of the GNU General Public License 23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. 24 ## 25 """ 26 EasyBuild support for installing a bundle of modules, implemented as a generic easyblock 27 28 @author: Stijn De Weirdt (Ghent University) 29 @author: Dries Verdegem (Ghent University) 30 @author: Kenneth Hoste (Ghent University) 31 @author: Pieter De Baets (Ghent University) 32 @author: Jens Timmerman (Ghent University) 33 """ 34 import copy 35 import os 36 37 import easybuild.tools.environment as env 38 from easybuild.framework.easyblock import EasyBlock 39 from easybuild.framework.easyconfig import CUSTOM 40 from easybuild.framework.easyconfig.easyconfig import get_easyblock_class 41 from easybuild.tools.build_log import EasyBuildError, print_msg 42 from easybuild.tools.modules import get_software_root, get_software_version 43 from easybuild.tools.py2vs3 import string_type 44 45 46 class Bundle(EasyBlock): 47 """ 48 Bundle of modules: only generate module files, nothing to build/install 49 """ 50 51 @staticmethod 52 def extra_options(extra_vars=None): 53 """Easyconfig parameters specific to bundles.""" 54 if extra_vars is None: 55 extra_vars = {} 56 extra_vars.update({ 57 'altroot': [None, "Software name of dependency to use to define $EBROOT for this bundle", CUSTOM], 58 'altversion': [None, "Software name of dependency to use to define $EBVERSION for this bundle", CUSTOM], 59 'default_component_specs': [{}, "Default specs to use for every component", CUSTOM], 60 'components': [(), "List of components to install: tuples w/ name, version and easyblock to use", CUSTOM], 61 'default_easyblock': [None, "Default easyblock to use for components", CUSTOM], 62 }) 63 return EasyBlock.extra_options(extra_vars) 64 65 def __init__(self, *args, **kwargs): 66 """Initialize easyblock.""" 67 super(Bundle, self).__init__(*args, **kwargs) 68 self.altroot = None 69 self.altversion = None 70 71 # list of EasyConfig instances for components 72 self.comp_cfgs = [] 73 74 # list of sources for bundle itself *must* be empty 75 if self.cfg['sources']: 76 raise EasyBuildError("List of sources for bundle itself must be empty, found %s", self.cfg['sources']) 77 78 # disable templating to avoid premature resolving of template values 79 self.cfg.enable_templating = False 80 81 # list of checksums for patches (must be included after checksums for sources) 82 checksums_patches = [] 83 84 for comp in self.cfg['components']: 85 comp_name, comp_version, comp_specs = comp[0], comp[1], {} 86 if len(comp) == 3: 87 comp_specs = comp[2] 88 89 comp_cfg = self.cfg.copy() 90 91 easyblock = comp_specs.get('easyblock') or self.cfg['default_easyblock'] 92 if easyblock is None: 93 raise EasyBuildError("No easyblock specified for component %s v%s", comp_cfg['name'], 94 comp_cfg['version']) 95 elif easyblock == 'Bundle': 96 raise EasyBuildError("The Bundle easyblock can not be used to install components in a bundle") 97 98 comp_cfg.easyblock = get_easyblock_class(easyblock, name=comp_cfg['name']) 99 100 # make sure that extra easyconfig parameters are known, so they can be set 101 extra_opts = comp_cfg.easyblock.extra_options() 102 comp_cfg.extend_params(copy.deepcopy(extra_opts)) 103 104 comp_cfg['name'] = comp_name 105 comp_cfg['version'] = comp_version 106 comp_cfg.generate_template_values() 107 108 # do not inherit easyblock to use from parent (since that would result in an infinite loop in install_step) 109 comp_cfg['easyblock'] = None 110 111 # reset list of sources/source_urls/checksums 112 comp_cfg['sources'] = comp_cfg['source_urls'] = comp_cfg['checksums'] = [] 113 114 for key in self.cfg['default_component_specs']: 115 comp_cfg[key] = self.cfg['default_component_specs'][key] 116 117 for key in comp_specs: 118 comp_cfg[key] = comp_specs[key] 119 120 # enable resolving of templates for component-specific EasyConfig instance 121 comp_cfg.enable_templating = True 122 123 # 'sources' is strictly required 124 if comp_cfg['sources']: 125 # If per-component source URLs are provided, attach them directly to the relevant sources 126 if comp_cfg['source_urls']: 127 for source in comp_cfg['sources']: 128 if isinstance(source, string_type): 129 self.cfg.update('sources', [{'filename': source, 'source_urls': comp_cfg['source_urls']}]) 130 elif isinstance(source, dict): 131 # Update source_urls in the 'source' dict to use the one for the components 132 # (if it doesn't already exist) 133 if 'source_urls' not in source: 134 source['source_urls'] = comp_cfg['source_urls'] 135 self.cfg.update('sources', [source]) 136 else: 137 raise EasyBuildError("Source %s for component %s is neither a string nor a dict, cannot " 138 "process it.", source, comp_cfg['name']) 139 else: 140 # add component sources to list of sources 141 self.cfg.update('sources', comp_cfg['sources']) 142 else: 143 raise EasyBuildError("No sources specification for component %s v%s", comp_name, comp_version) 144 145 if comp_cfg['checksums']: 146 src_cnt = len(comp_cfg['sources']) 147 148 # add per-component checksums for sources to list of checksums 149 self.cfg.update('checksums', comp_cfg['checksums'][:src_cnt]) 150 151 # add per-component checksums for patches to list of checksums for patches 152 checksums_patches.extend(comp_cfg['checksums'][src_cnt:]) 153 154 self.comp_cfgs.append(comp_cfg) 155 156 self.cfg.update('checksums', checksums_patches) 157 158 self.cfg.enable_templating = True 159 160 def check_checksums(self): 161 """ 162 Check whether a SHA256 checksum is available for all sources & patches (incl. extensions). 163 164 :return: list of strings describing checksum issues (missing checksums, wrong checksum type, etc.) 165 """ 166 checksum_issues = super(Bundle, self).check_checksums() 167 168 for comp in self.comp_cfgs: 169 checksum_issues.extend(self.check_checksums_for(comp, sub="of component %s" % comp['name'])) 170 171 return checksum_issues 172 173 def configure_step(self): 174 """Collect altroot/altversion info.""" 175 # pick up altroot/altversion, if they are defined 176 self.altroot = None 177 if self.cfg['altroot']: 178 self.altroot = get_software_root(self.cfg['altroot']) 179 self.altversion = None 180 if self.cfg['altversion']: 181 self.altversion = get_software_version(self.cfg['altversion']) 182 183 def build_step(self): 184 """Do nothing.""" 185 pass 186 187 def install_step(self): 188 """Install components, if specified.""" 189 comp_cnt = len(self.cfg['components']) 190 for idx, cfg in enumerate(self.comp_cfgs): 191 192 print_msg("installing bundle component %s v%s (%d/%d)..." % (cfg['name'], cfg['version'], idx+1, comp_cnt)) 193 self.log.info("Installing component %s v%s using easyblock %s", cfg['name'], cfg['version'], cfg.easyblock) 194 195 comp = cfg.easyblock(cfg) 196 197 # correct build/install dirs 198 comp.builddir = self.builddir 199 comp.install_subdir, comp.installdir = self.install_subdir, self.installdir 200 201 # make sure we can build in parallel 202 comp.set_parallel() 203 204 # figure out correct start directory 205 comp.guess_start_dir() 206 207 # need to run fetch_patches to ensure per-component patches are applied 208 comp.fetch_patches() 209 210 comp.src = [] 211 212 # find match entries in self.src for this component 213 for source in comp.cfg['sources']: 214 if isinstance(source, string_type): 215 comp_src_fn = source 216 elif isinstance(source, dict): 217 if 'filename' in source: 218 comp_src_fn = source['filename'] 219 else: 220 raise EasyBuildError("Encountered source file specified as dict without 'filename': %s", source) 221 else: 222 raise EasyBuildError("Specification of unknown type for source file: %s", source) 223 224 found = False 225 for src in self.src: 226 if src['name'] == comp_src_fn: 227 self.log.info("Found spec for source %s for component %s: %s", comp_src_fn, comp.name, src) 228 comp.src.append(src) 229 found = True 230 break 231 if not found: 232 raise EasyBuildError("Failed to find spec for source %s for component %s", comp_src_fn, comp.name) 233 234 # location of first unpacked source is used to determine where to apply patch(es) 235 comp.src[-1]['finalpath'] = comp.cfg['start_dir'] 236 237 # run relevant steps 238 for step_name in ['patch', 'configure', 'build', 'install']: 239 if step_name in cfg['skipsteps']: 240 comp.log.info("Skipping '%s' step for component %s v%s", step_name, cfg['name'], cfg['version']) 241 else: 242 comp.run_step(step_name, [lambda x: getattr(x, '%s_step' % step_name)]) 243 244 # update environment to ensure stuff provided by former components can be picked up by latter components 245 # once the installation is finalised, this is handled by the generated module 246 reqs = comp.make_module_req_guess() 247 for envvar in reqs: 248 curr_val = os.getenv(envvar, '') 249 curr_paths = curr_val.split(os.pathsep) 250 for subdir in reqs[envvar]: 251 path = os.path.join(self.installdir, subdir) 252 if path not in curr_paths: 253 if curr_val: 254 new_val = '%s:%s' % (path, curr_val) 255 else: 256 new_val = path 257 env.setvar(envvar, new_val) 258 259 def make_module_extra(self, *args, **kwargs): 260 """Set extra stuff in module file, e.g. $EBROOT*, $EBVERSION*, etc.""" 261 if 'altroot' not in kwargs: 262 kwargs['altroot'] = self.altroot 263 if 'altversion' not in kwargs: 264 kwargs['altversion'] = self.altversion 265 return super(Bundle, self).make_module_extra(*args, **kwargs) 266 267 def sanity_check_step(self, *args, **kwargs): 268 """ 269 Nothing is being installed, so just being able to load the (fake) module is sufficient 270 """ 271 if self.cfg['exts_list'] or self.cfg['sanity_check_paths'] or self.cfg['sanity_check_commands']: 272 super(Bundle, self).sanity_check_step(*args, **kwargs) 273 else: 274 self.log.info("Testing loading of module '%s' by means of sanity check" % self.full_mod_name) 275 fake_mod_data = self.load_fake_module(purge=True) 276 self.log.debug("Cleaning up after testing loading of module") 277 self.clean_up_fake_module(fake_mod_data) 278 [end of easybuild/easyblocks/generic/bundle.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/easybuild/easyblocks/generic/bundle.py b/easybuild/easyblocks/generic/bundle.py --- a/easybuild/easyblocks/generic/bundle.py +++ b/easybuild/easyblocks/generic/bundle.py @@ -74,6 +74,8 @@ # list of sources for bundle itself *must* be empty if self.cfg['sources']: raise EasyBuildError("List of sources for bundle itself must be empty, found %s", self.cfg['sources']) + if self.cfg['patches']: + raise EasyBuildError("List of patches for bundle itself must be empty, found %s", self.cfg['patches']) # disable templating to avoid premature resolving of template values self.cfg.enable_templating = False @@ -109,7 +111,7 @@ comp_cfg['easyblock'] = None # reset list of sources/source_urls/checksums - comp_cfg['sources'] = comp_cfg['source_urls'] = comp_cfg['checksums'] = [] + comp_cfg['sources'] = comp_cfg['source_urls'] = comp_cfg['checksums'] = comp_cfg['patches'] = [] for key in self.cfg['default_component_specs']: comp_cfg[key] = self.cfg['default_component_specs'][key] @@ -151,6 +153,9 @@ # add per-component checksums for patches to list of checksums for patches checksums_patches.extend(comp_cfg['checksums'][src_cnt:]) + if comp_cfg['patches']: + self.cfg.update('patches', comp_cfg['patches']) + self.comp_cfgs.append(comp_cfg) self.cfg.update('checksums', checksums_patches)
{"golden_diff": "diff --git a/easybuild/easyblocks/generic/bundle.py b/easybuild/easyblocks/generic/bundle.py\n--- a/easybuild/easyblocks/generic/bundle.py\n+++ b/easybuild/easyblocks/generic/bundle.py\n@@ -74,6 +74,8 @@\n # list of sources for bundle itself *must* be empty\n if self.cfg['sources']:\n raise EasyBuildError(\"List of sources for bundle itself must be empty, found %s\", self.cfg['sources'])\n+ if self.cfg['patches']:\n+ raise EasyBuildError(\"List of patches for bundle itself must be empty, found %s\", self.cfg['patches'])\n \n # disable templating to avoid premature resolving of template values\n self.cfg.enable_templating = False\n@@ -109,7 +111,7 @@\n comp_cfg['easyblock'] = None\n \n # reset list of sources/source_urls/checksums\n- comp_cfg['sources'] = comp_cfg['source_urls'] = comp_cfg['checksums'] = []\n+ comp_cfg['sources'] = comp_cfg['source_urls'] = comp_cfg['checksums'] = comp_cfg['patches'] = []\n \n for key in self.cfg['default_component_specs']:\n comp_cfg[key] = self.cfg['default_component_specs'][key]\n@@ -151,6 +153,9 @@\n # add per-component checksums for patches to list of checksums for patches\n checksums_patches.extend(comp_cfg['checksums'][src_cnt:])\n \n+ if comp_cfg['patches']:\n+ self.cfg.update('patches', comp_cfg['patches'])\n+\n self.comp_cfgs.append(comp_cfg)\n \n self.cfg.update('checksums', checksums_patches)\n", "issue": "Bundle Easyblock does not put patches into root config\nAt https://github.com/easybuilders/easybuild-easyblocks/blob/b99cc5a4dfb98cafbbd4a8827ea9bfb444724e27/easybuild/easyblocks/generic/bundle.py#L156 the patches checksums are added to the root, but the patches are not which makes e.g. the unit tests fail, see https://github.com/easybuilders/easybuild-easyconfigs/pull/9546\r\n\r\nShould the patches be added? From the logic in the check_checksums it seems: yes\n", "before_files": [{"content": "##\n# Copyright 2009-2019 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for installing a bundle of modules, implemented as a generic easyblock\n\n@author: Stijn De Weirdt (Ghent University)\n@author: Dries Verdegem (Ghent University)\n@author: Kenneth Hoste (Ghent University)\n@author: Pieter De Baets (Ghent University)\n@author: Jens Timmerman (Ghent University)\n\"\"\"\nimport copy\nimport os\n\nimport easybuild.tools.environment as env\nfrom easybuild.framework.easyblock import EasyBlock\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.framework.easyconfig.easyconfig import get_easyblock_class\nfrom easybuild.tools.build_log import EasyBuildError, print_msg\nfrom easybuild.tools.modules import get_software_root, get_software_version\nfrom easybuild.tools.py2vs3 import string_type\n\n\nclass Bundle(EasyBlock):\n \"\"\"\n Bundle of modules: only generate module files, nothing to build/install\n \"\"\"\n\n @staticmethod\n def extra_options(extra_vars=None):\n \"\"\"Easyconfig parameters specific to bundles.\"\"\"\n if extra_vars is None:\n extra_vars = {}\n extra_vars.update({\n 'altroot': [None, \"Software name of dependency to use to define $EBROOT for this bundle\", CUSTOM],\n 'altversion': [None, \"Software name of dependency to use to define $EBVERSION for this bundle\", CUSTOM],\n 'default_component_specs': [{}, \"Default specs to use for every component\", CUSTOM],\n 'components': [(), \"List of components to install: tuples w/ name, version and easyblock to use\", CUSTOM],\n 'default_easyblock': [None, \"Default easyblock to use for components\", CUSTOM],\n })\n return EasyBlock.extra_options(extra_vars)\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize easyblock.\"\"\"\n super(Bundle, self).__init__(*args, **kwargs)\n self.altroot = None\n self.altversion = None\n\n # list of EasyConfig instances for components\n self.comp_cfgs = []\n\n # list of sources for bundle itself *must* be empty\n if self.cfg['sources']:\n raise EasyBuildError(\"List of sources for bundle itself must be empty, found %s\", self.cfg['sources'])\n\n # disable templating to avoid premature resolving of template values\n self.cfg.enable_templating = False\n\n # list of checksums for patches (must be included after checksums for sources)\n checksums_patches = []\n\n for comp in self.cfg['components']:\n comp_name, comp_version, comp_specs = comp[0], comp[1], {}\n if len(comp) == 3:\n comp_specs = comp[2]\n\n comp_cfg = self.cfg.copy()\n\n easyblock = comp_specs.get('easyblock') or self.cfg['default_easyblock']\n if easyblock is None:\n raise EasyBuildError(\"No easyblock specified for component %s v%s\", comp_cfg['name'],\n comp_cfg['version'])\n elif easyblock == 'Bundle':\n raise EasyBuildError(\"The Bundle easyblock can not be used to install components in a bundle\")\n\n comp_cfg.easyblock = get_easyblock_class(easyblock, name=comp_cfg['name'])\n\n # make sure that extra easyconfig parameters are known, so they can be set\n extra_opts = comp_cfg.easyblock.extra_options()\n comp_cfg.extend_params(copy.deepcopy(extra_opts))\n\n comp_cfg['name'] = comp_name\n comp_cfg['version'] = comp_version\n comp_cfg.generate_template_values()\n\n # do not inherit easyblock to use from parent (since that would result in an infinite loop in install_step)\n comp_cfg['easyblock'] = None\n\n # reset list of sources/source_urls/checksums\n comp_cfg['sources'] = comp_cfg['source_urls'] = comp_cfg['checksums'] = []\n\n for key in self.cfg['default_component_specs']:\n comp_cfg[key] = self.cfg['default_component_specs'][key]\n\n for key in comp_specs:\n comp_cfg[key] = comp_specs[key]\n\n # enable resolving of templates for component-specific EasyConfig instance\n comp_cfg.enable_templating = True\n\n # 'sources' is strictly required\n if comp_cfg['sources']:\n # If per-component source URLs are provided, attach them directly to the relevant sources\n if comp_cfg['source_urls']:\n for source in comp_cfg['sources']:\n if isinstance(source, string_type):\n self.cfg.update('sources', [{'filename': source, 'source_urls': comp_cfg['source_urls']}])\n elif isinstance(source, dict):\n # Update source_urls in the 'source' dict to use the one for the components\n # (if it doesn't already exist)\n if 'source_urls' not in source:\n source['source_urls'] = comp_cfg['source_urls']\n self.cfg.update('sources', [source])\n else:\n raise EasyBuildError(\"Source %s for component %s is neither a string nor a dict, cannot \"\n \"process it.\", source, comp_cfg['name'])\n else:\n # add component sources to list of sources\n self.cfg.update('sources', comp_cfg['sources'])\n else:\n raise EasyBuildError(\"No sources specification for component %s v%s\", comp_name, comp_version)\n\n if comp_cfg['checksums']:\n src_cnt = len(comp_cfg['sources'])\n\n # add per-component checksums for sources to list of checksums\n self.cfg.update('checksums', comp_cfg['checksums'][:src_cnt])\n\n # add per-component checksums for patches to list of checksums for patches\n checksums_patches.extend(comp_cfg['checksums'][src_cnt:])\n\n self.comp_cfgs.append(comp_cfg)\n\n self.cfg.update('checksums', checksums_patches)\n\n self.cfg.enable_templating = True\n\n def check_checksums(self):\n \"\"\"\n Check whether a SHA256 checksum is available for all sources & patches (incl. extensions).\n\n :return: list of strings describing checksum issues (missing checksums, wrong checksum type, etc.)\n \"\"\"\n checksum_issues = super(Bundle, self).check_checksums()\n\n for comp in self.comp_cfgs:\n checksum_issues.extend(self.check_checksums_for(comp, sub=\"of component %s\" % comp['name']))\n\n return checksum_issues\n\n def configure_step(self):\n \"\"\"Collect altroot/altversion info.\"\"\"\n # pick up altroot/altversion, if they are defined\n self.altroot = None\n if self.cfg['altroot']:\n self.altroot = get_software_root(self.cfg['altroot'])\n self.altversion = None\n if self.cfg['altversion']:\n self.altversion = get_software_version(self.cfg['altversion'])\n\n def build_step(self):\n \"\"\"Do nothing.\"\"\"\n pass\n\n def install_step(self):\n \"\"\"Install components, if specified.\"\"\"\n comp_cnt = len(self.cfg['components'])\n for idx, cfg in enumerate(self.comp_cfgs):\n\n print_msg(\"installing bundle component %s v%s (%d/%d)...\" % (cfg['name'], cfg['version'], idx+1, comp_cnt))\n self.log.info(\"Installing component %s v%s using easyblock %s\", cfg['name'], cfg['version'], cfg.easyblock)\n\n comp = cfg.easyblock(cfg)\n\n # correct build/install dirs\n comp.builddir = self.builddir\n comp.install_subdir, comp.installdir = self.install_subdir, self.installdir\n\n # make sure we can build in parallel\n comp.set_parallel()\n\n # figure out correct start directory\n comp.guess_start_dir()\n\n # need to run fetch_patches to ensure per-component patches are applied\n comp.fetch_patches()\n\n comp.src = []\n\n # find match entries in self.src for this component\n for source in comp.cfg['sources']:\n if isinstance(source, string_type):\n comp_src_fn = source\n elif isinstance(source, dict):\n if 'filename' in source:\n comp_src_fn = source['filename']\n else:\n raise EasyBuildError(\"Encountered source file specified as dict without 'filename': %s\", source)\n else:\n raise EasyBuildError(\"Specification of unknown type for source file: %s\", source)\n\n found = False\n for src in self.src:\n if src['name'] == comp_src_fn:\n self.log.info(\"Found spec for source %s for component %s: %s\", comp_src_fn, comp.name, src)\n comp.src.append(src)\n found = True\n break\n if not found:\n raise EasyBuildError(\"Failed to find spec for source %s for component %s\", comp_src_fn, comp.name)\n\n # location of first unpacked source is used to determine where to apply patch(es)\n comp.src[-1]['finalpath'] = comp.cfg['start_dir']\n\n # run relevant steps\n for step_name in ['patch', 'configure', 'build', 'install']:\n if step_name in cfg['skipsteps']:\n comp.log.info(\"Skipping '%s' step for component %s v%s\", step_name, cfg['name'], cfg['version'])\n else:\n comp.run_step(step_name, [lambda x: getattr(x, '%s_step' % step_name)])\n\n # update environment to ensure stuff provided by former components can be picked up by latter components\n # once the installation is finalised, this is handled by the generated module\n reqs = comp.make_module_req_guess()\n for envvar in reqs:\n curr_val = os.getenv(envvar, '')\n curr_paths = curr_val.split(os.pathsep)\n for subdir in reqs[envvar]:\n path = os.path.join(self.installdir, subdir)\n if path not in curr_paths:\n if curr_val:\n new_val = '%s:%s' % (path, curr_val)\n else:\n new_val = path\n env.setvar(envvar, new_val)\n\n def make_module_extra(self, *args, **kwargs):\n \"\"\"Set extra stuff in module file, e.g. $EBROOT*, $EBVERSION*, etc.\"\"\"\n if 'altroot' not in kwargs:\n kwargs['altroot'] = self.altroot\n if 'altversion' not in kwargs:\n kwargs['altversion'] = self.altversion\n return super(Bundle, self).make_module_extra(*args, **kwargs)\n\n def sanity_check_step(self, *args, **kwargs):\n \"\"\"\n Nothing is being installed, so just being able to load the (fake) module is sufficient\n \"\"\"\n if self.cfg['exts_list'] or self.cfg['sanity_check_paths'] or self.cfg['sanity_check_commands']:\n super(Bundle, self).sanity_check_step(*args, **kwargs)\n else:\n self.log.info(\"Testing loading of module '%s' by means of sanity check\" % self.full_mod_name)\n fake_mod_data = self.load_fake_module(purge=True)\n self.log.debug(\"Cleaning up after testing loading of module\")\n self.clean_up_fake_module(fake_mod_data)\n", "path": "easybuild/easyblocks/generic/bundle.py"}]}
4,082
377
gh_patches_debug_455
rasdani/github-patches
git_diff
openfun__marsha-2411
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 🐛(backend) licence not saved during creation video resource ## Bug Report **Problematic Behavior** When we create a video and set a licence, the licence is not saved. [error-licence-2023-09-12 143121.webm](https://github.com/openfun/marsha/assets/25994652/60514ad8-07cd-4390-97c9-21eb3525ecc6) </issue> <code> [start of src/backend/marsha/core/forms.py] 1 """Marsha forms module.""" 2 from django.core.exceptions import ValidationError 3 from django.forms import CharField, ModelForm 4 5 from . import models 6 from .defaults import INITIALIZED 7 8 9 class DocumentForm(ModelForm): 10 """Form to create or update documents.""" 11 12 class Meta: 13 """Meta for DocumentForm.""" 14 15 model = models.Document 16 fields = ["description", "is_public", "lti_id", "playlist", "title"] 17 18 19 class VideoForm(ModelForm): 20 """Form to create or update videos.""" 21 22 upload_state = CharField( 23 max_length=20, 24 required=False, 25 ) 26 27 class Meta: 28 """Meta for VideoForm.""" 29 30 model = models.Video 31 fields = [ 32 "description", 33 "is_public", 34 "lti_id", 35 "playlist", 36 "title", 37 "upload_state", 38 ] 39 40 def clean_upload_state(self): 41 """Check upload_state valid value.""" 42 upload_state = self.cleaned_data["upload_state"] 43 44 if upload_state and upload_state != INITIALIZED: 45 raise ValidationError(f"{INITIALIZED} is the only accepted value") 46 47 return upload_state 48 [end of src/backend/marsha/core/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/backend/marsha/core/forms.py b/src/backend/marsha/core/forms.py --- a/src/backend/marsha/core/forms.py +++ b/src/backend/marsha/core/forms.py @@ -35,6 +35,7 @@ "playlist", "title", "upload_state", + "license", ] def clean_upload_state(self):
{"golden_diff": "diff --git a/src/backend/marsha/core/forms.py b/src/backend/marsha/core/forms.py\n--- a/src/backend/marsha/core/forms.py\n+++ b/src/backend/marsha/core/forms.py\n@@ -35,6 +35,7 @@\n \"playlist\",\n \"title\",\n \"upload_state\",\n+ \"license\",\n ]\n \n def clean_upload_state(self):\n", "issue": "\ud83d\udc1b(backend) licence not saved during creation video resource\n## Bug Report\r\n\r\n**Problematic Behavior**\r\nWhen we create a video and set a licence, the licence is not saved.\r\n\r\n[error-licence-2023-09-12 143121.webm](https://github.com/openfun/marsha/assets/25994652/60514ad8-07cd-4390-97c9-21eb3525ecc6)\r\n\r\n\n", "before_files": [{"content": "\"\"\"Marsha forms module.\"\"\"\nfrom django.core.exceptions import ValidationError\nfrom django.forms import CharField, ModelForm\n\nfrom . import models\nfrom .defaults import INITIALIZED\n\n\nclass DocumentForm(ModelForm):\n \"\"\"Form to create or update documents.\"\"\"\n\n class Meta:\n \"\"\"Meta for DocumentForm.\"\"\"\n\n model = models.Document\n fields = [\"description\", \"is_public\", \"lti_id\", \"playlist\", \"title\"]\n\n\nclass VideoForm(ModelForm):\n \"\"\"Form to create or update videos.\"\"\"\n\n upload_state = CharField(\n max_length=20,\n required=False,\n )\n\n class Meta:\n \"\"\"Meta for VideoForm.\"\"\"\n\n model = models.Video\n fields = [\n \"description\",\n \"is_public\",\n \"lti_id\",\n \"playlist\",\n \"title\",\n \"upload_state\",\n ]\n\n def clean_upload_state(self):\n \"\"\"Check upload_state valid value.\"\"\"\n upload_state = self.cleaned_data[\"upload_state\"]\n\n if upload_state and upload_state != INITIALIZED:\n raise ValidationError(f\"{INITIALIZED} is the only accepted value\")\n\n return upload_state\n", "path": "src/backend/marsha/core/forms.py"}]}
987
84
gh_patches_debug_22905
rasdani/github-patches
git_diff
streamlink__streamlink-1511
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Kanal 7 does not show ## **Checklist** - [x] This is a bug report. - [ ] This is a feature request. - [ ] This is a plugin (improvement) request. - [ ] I have read the contribution guidelines. ## **Description** i cant see anything at kanal 7.com . i have test it with this links but i became black screen ## **Reproduction steps / Explicit stream URLs to test** #SERVICE 5002:0:1:1DE6:C544:7E:460000:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//new.10gbps.tv%3a443/live/kanal7LiveDesktop/index.m3u8 #DESCRIPTION KANAL 7 #SERVICE 5002:0:1:1DE6:C544:7E:460000:0:0:0:http%3a//127.0.0.1%3a8088/http%3a//www.kanal7.com/canli-izle #DESCRIPTION KANAL 7 #SERVICE 5002:0:1:1DE6:C544:7E:460000:0:0:0:http%3a//127.0.0.1%3a8088/http%3a//www.izle7.com/canli-yayin-frame?air=1 #DESCRIPTION KANAL 7 </issue> <code> [start of src/streamlink/plugins/kanal7.py] 1 from __future__ import print_function 2 import re 3 4 from streamlink.plugin import Plugin 5 from streamlink.plugin.api import http 6 from streamlink.plugin.api import useragents 7 from streamlink.plugin.api import validate 8 from streamlink.stream import HLSStream 9 10 11 class Kanal7(Plugin): 12 url_re = re.compile(r"https?://(?:www.)?kanal7.com/canli-izle") 13 iframe_re = re.compile(r'iframe .*?src="(http://[^"]*?)"') 14 stream_re = re.compile(r'''tp_file\s+=\s+['"](http[^"]*?)['"]''') 15 16 @classmethod 17 def can_handle_url(cls, url): 18 return cls.url_re.match(url) is not None 19 20 def find_iframe(self, url): 21 res = http.get(url) 22 # find iframe url 23 iframe = self.iframe_re.search(res.text) 24 iframe_url = iframe and iframe.group(1) 25 if iframe_url: 26 self.logger.debug("Found iframe: {}", iframe_url) 27 return iframe_url 28 29 def _get_streams(self): 30 iframe1 = self.find_iframe(self.url) 31 if iframe1: 32 iframe2 = self.find_iframe(iframe1) 33 if iframe2: 34 ires = http.get(iframe2) 35 stream_m = self.stream_re.search(ires.text) 36 stream_url = stream_m and stream_m.group(1) 37 if stream_url: 38 yield "live", HLSStream(self.session, stream_url, headers={"Referer": iframe2}) 39 else: 40 self.logger.error("Could not find second iframe, has the page layout changed?") 41 else: 42 self.logger.error("Could not find iframe, has the page layout changed?") 43 44 45 __plugin__ = Kanal7 46 [end of src/streamlink/plugins/kanal7.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/streamlink/plugins/kanal7.py b/src/streamlink/plugins/kanal7.py --- a/src/streamlink/plugins/kanal7.py +++ b/src/streamlink/plugins/kanal7.py @@ -6,12 +6,13 @@ from streamlink.plugin.api import useragents from streamlink.plugin.api import validate from streamlink.stream import HLSStream +from streamlink.utils import update_scheme class Kanal7(Plugin): url_re = re.compile(r"https?://(?:www.)?kanal7.com/canli-izle") - iframe_re = re.compile(r'iframe .*?src="(http://[^"]*?)"') - stream_re = re.compile(r'''tp_file\s+=\s+['"](http[^"]*?)['"]''') + iframe_re = re.compile(r'iframe .*?src="((?:http:)?//[^"]*?)"') + stream_re = re.compile(r'''video-source\s*=\s*['"](http[^"']*?)['"]''') @classmethod def can_handle_url(cls, url): @@ -23,6 +24,7 @@ iframe = self.iframe_re.search(res.text) iframe_url = iframe and iframe.group(1) if iframe_url: + iframe_url = update_scheme(self.url, iframe_url) self.logger.debug("Found iframe: {}", iframe_url) return iframe_url
{"golden_diff": "diff --git a/src/streamlink/plugins/kanal7.py b/src/streamlink/plugins/kanal7.py\n--- a/src/streamlink/plugins/kanal7.py\n+++ b/src/streamlink/plugins/kanal7.py\n@@ -6,12 +6,13 @@\n from streamlink.plugin.api import useragents\n from streamlink.plugin.api import validate\n from streamlink.stream import HLSStream\n+from streamlink.utils import update_scheme\n \n \n class Kanal7(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?kanal7.com/canli-izle\")\n- iframe_re = re.compile(r'iframe .*?src=\"(http://[^\"]*?)\"')\n- stream_re = re.compile(r'''tp_file\\s+=\\s+['\"](http[^\"]*?)['\"]''')\n+ iframe_re = re.compile(r'iframe .*?src=\"((?:http:)?//[^\"]*?)\"')\n+ stream_re = re.compile(r'''video-source\\s*=\\s*['\"](http[^\"']*?)['\"]''')\n \n @classmethod\n def can_handle_url(cls, url):\n@@ -23,6 +24,7 @@\n iframe = self.iframe_re.search(res.text)\n iframe_url = iframe and iframe.group(1)\n if iframe_url:\n+ iframe_url = update_scheme(self.url, iframe_url)\n self.logger.debug(\"Found iframe: {}\", iframe_url)\n return iframe_url\n", "issue": "Kanal 7 does not show\n## **Checklist**\r\n\r\n- [x] This is a bug report.\r\n- [ ] This is a feature request.\r\n- [ ] This is a plugin (improvement) request.\r\n- [ ] I have read the contribution guidelines.\r\n\r\n## **Description**\r\n\r\n i cant see anything at kanal 7.com . i have test it with this links but i became black screen \r\n\r\n## **Reproduction steps / Explicit stream URLs to test**\r\n\r\n#SERVICE 5002:0:1:1DE6:C544:7E:460000:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//new.10gbps.tv%3a443/live/kanal7LiveDesktop/index.m3u8\r\n#DESCRIPTION KANAL 7 \r\n#SERVICE 5002:0:1:1DE6:C544:7E:460000:0:0:0:http%3a//127.0.0.1%3a8088/http%3a//www.kanal7.com/canli-izle\r\n#DESCRIPTION KANAL 7\r\n#SERVICE 5002:0:1:1DE6:C544:7E:460000:0:0:0:http%3a//127.0.0.1%3a8088/http%3a//www.izle7.com/canli-yayin-frame?air=1\r\n#DESCRIPTION KANAL 7\n", "before_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import useragents\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass Kanal7(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?kanal7.com/canli-izle\")\n iframe_re = re.compile(r'iframe .*?src=\"(http://[^\"]*?)\"')\n stream_re = re.compile(r'''tp_file\\s+=\\s+['\"](http[^\"]*?)['\"]''')\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def find_iframe(self, url):\n res = http.get(url)\n # find iframe url\n iframe = self.iframe_re.search(res.text)\n iframe_url = iframe and iframe.group(1)\n if iframe_url:\n self.logger.debug(\"Found iframe: {}\", iframe_url)\n return iframe_url\n\n def _get_streams(self):\n iframe1 = self.find_iframe(self.url)\n if iframe1:\n iframe2 = self.find_iframe(iframe1)\n if iframe2:\n ires = http.get(iframe2)\n stream_m = self.stream_re.search(ires.text)\n stream_url = stream_m and stream_m.group(1)\n if stream_url:\n yield \"live\", HLSStream(self.session, stream_url, headers={\"Referer\": iframe2})\n else:\n self.logger.error(\"Could not find second iframe, has the page layout changed?\")\n else:\n self.logger.error(\"Could not find iframe, has the page layout changed?\")\n\n\n__plugin__ = Kanal7\n", "path": "src/streamlink/plugins/kanal7.py"}]}
1,367
312
gh_patches_debug_15114
rasdani/github-patches
git_diff
nextcloud__appstore-246
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Email change form Among the account pages should be a page from which a user can change their email address. The email field on the `User` model needs to be updated and a correspondig django-allauth `EmailAddress` object should be associated with the user. django-allauth supports multiple email addresses per user, but we only want one. New email addresses should be verified with [this](http://django-allauth.readthedocs.io/en/latest/views.html#e-mail-verification). The page should be located at `/account/email/`. @BernhardPosselt Do you agree with the above? </issue> <code> [start of nextcloudappstore/core/user/views.py] 1 from allauth.account.views import PasswordChangeView 2 from django.contrib import messages 3 from django.contrib.auth.mixins import LoginRequiredMixin 4 from django.contrib.auth.models import User 5 from django.core.urlresolvers import reverse_lazy 6 from django.shortcuts import redirect, render 7 from django.views.generic import TemplateView 8 from django.views.generic import UpdateView 9 10 from nextcloudappstore.core.user.forms import DeleteAccountForm 11 12 13 class ChangeLanguageView(LoginRequiredMixin, TemplateView): 14 template_name = 'user/set-language.html' 15 16 def get_context_data(self, **kwargs): 17 context = super().get_context_data(**kwargs) 18 context['acc_page'] = 'account-change-language' 19 return context 20 21 22 class DeleteAccountView(LoginRequiredMixin, TemplateView): 23 template_name = 'user/delete-account.html' 24 25 def get_context_data(self, **kwargs): 26 context = super().get_context_data(**kwargs) 27 context['form'] = DeleteAccountForm() 28 context['acc_page'] = 'delete-account' 29 return context 30 31 def post(self, request, *args, **kwargs): 32 form = DeleteAccountForm(request.POST, user=request.user) 33 if form.is_valid(): 34 request.user.delete() 35 return redirect(reverse_lazy('home')) 36 else: 37 return render(request, self.template_name, {'form': form}) 38 39 40 class AccountView(LoginRequiredMixin, UpdateView): 41 """Display and allow changing of the user's name.""" 42 43 template_name = 'user/account.html' 44 template_name_suffix = '' 45 model = User 46 fields = ['first_name', 'last_name'] 47 success_url = reverse_lazy('user:account') 48 49 def get_context_data(self, **kwargs): 50 context = super().get_context_data(**kwargs) 51 context['acc_page'] = 'account' 52 return context 53 54 def form_valid(self, form): 55 messages.success(self.request, 'Name saved.') 56 return super().form_valid(form) 57 58 def get_object(self, queryset=None): 59 return self.request.user 60 61 62 class PasswordView(LoginRequiredMixin, PasswordChangeView): 63 """Allow the user to change their password.""" 64 65 template_name = 'user/password.html' 66 success_url = reverse_lazy('user:account-password') 67 68 def get_context_data(self, **kwargs): 69 context = super().get_context_data(**kwargs) 70 context['acc_page'] = 'password' 71 return context 72 73 74 class APITokenView(LoginRequiredMixin, TemplateView): 75 """Display the user's API token, and allow it to be regenerated.""" 76 77 template_name = 'user/api-token.html' 78 79 def get_context_data(self, **kwargs): 80 context = super().get_context_data(**kwargs) 81 context['acc_page'] = 'api-token' 82 return context 83 [end of nextcloudappstore/core/user/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nextcloudappstore/core/user/views.py b/nextcloudappstore/core/user/views.py --- a/nextcloudappstore/core/user/views.py +++ b/nextcloudappstore/core/user/views.py @@ -43,7 +43,7 @@ template_name = 'user/account.html' template_name_suffix = '' model = User - fields = ['first_name', 'last_name'] + fields = ['first_name', 'last_name', 'email'] success_url = reverse_lazy('user:account') def get_context_data(self, **kwargs): @@ -52,7 +52,7 @@ return context def form_valid(self, form): - messages.success(self.request, 'Name saved.') + messages.success(self.request, 'Account details saved.') return super().form_valid(form) def get_object(self, queryset=None):
{"golden_diff": "diff --git a/nextcloudappstore/core/user/views.py b/nextcloudappstore/core/user/views.py\n--- a/nextcloudappstore/core/user/views.py\n+++ b/nextcloudappstore/core/user/views.py\n@@ -43,7 +43,7 @@\n template_name = 'user/account.html'\n template_name_suffix = ''\n model = User\n- fields = ['first_name', 'last_name']\n+ fields = ['first_name', 'last_name', 'email']\n success_url = reverse_lazy('user:account')\n \n def get_context_data(self, **kwargs):\n@@ -52,7 +52,7 @@\n return context\n \n def form_valid(self, form):\n- messages.success(self.request, 'Name saved.')\n+ messages.success(self.request, 'Account details saved.')\n return super().form_valid(form)\n \n def get_object(self, queryset=None):\n", "issue": "Email change form\nAmong the account pages should be a page from which a user can change their email address. The email field on the `User` model needs to be updated and a correspondig django-allauth `EmailAddress` object should be associated with the user. django-allauth supports multiple email addresses per user, but we only want one. New email addresses should be verified with [this](http://django-allauth.readthedocs.io/en/latest/views.html#e-mail-verification). The page should be located at `/account/email/`.\n\n@BernhardPosselt Do you agree with the above?\n\n", "before_files": [{"content": "from allauth.account.views import PasswordChangeView\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.shortcuts import redirect, render\nfrom django.views.generic import TemplateView\nfrom django.views.generic import UpdateView\n\nfrom nextcloudappstore.core.user.forms import DeleteAccountForm\n\n\nclass ChangeLanguageView(LoginRequiredMixin, TemplateView):\n template_name = 'user/set-language.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'account-change-language'\n return context\n\n\nclass DeleteAccountView(LoginRequiredMixin, TemplateView):\n template_name = 'user/delete-account.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = DeleteAccountForm()\n context['acc_page'] = 'delete-account'\n return context\n\n def post(self, request, *args, **kwargs):\n form = DeleteAccountForm(request.POST, user=request.user)\n if form.is_valid():\n request.user.delete()\n return redirect(reverse_lazy('home'))\n else:\n return render(request, self.template_name, {'form': form})\n\n\nclass AccountView(LoginRequiredMixin, UpdateView):\n \"\"\"Display and allow changing of the user's name.\"\"\"\n\n template_name = 'user/account.html'\n template_name_suffix = ''\n model = User\n fields = ['first_name', 'last_name']\n success_url = reverse_lazy('user:account')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'account'\n return context\n\n def form_valid(self, form):\n messages.success(self.request, 'Name saved.')\n return super().form_valid(form)\n\n def get_object(self, queryset=None):\n return self.request.user\n\n\nclass PasswordView(LoginRequiredMixin, PasswordChangeView):\n \"\"\"Allow the user to change their password.\"\"\"\n\n template_name = 'user/password.html'\n success_url = reverse_lazy('user:account-password')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'password'\n return context\n\n\nclass APITokenView(LoginRequiredMixin, TemplateView):\n \"\"\"Display the user's API token, and allow it to be regenerated.\"\"\"\n\n template_name = 'user/api-token.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'api-token'\n return context\n", "path": "nextcloudappstore/core/user/views.py"}]}
1,395
194
gh_patches_debug_21933
rasdani/github-patches
git_diff
craiga__will-of-the-prophets-31
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Figure out how timezones should work on roll page Times are in UTC. Ben and Adam will be using this from the US. Can we auto-detect the user's timezone? Is this something Django can do for us? </issue> <code> [start of will_of_the_prophets/urls.py] 1 """will_of_the_prophets URL Configuration 2 3 The `urlpatterns` list routes URLs to views. For more information please see: 4 https://docs.djangoproject.com/en/2.0/topics/http/urls/ 5 Examples: 6 Function views 7 1. Add an import: from my_app import views 8 2. Add a URL to urlpatterns: path('', views.home, name='home') 9 Class-based views 10 1. Add an import: from other_app.views import Home 11 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') 12 Including another URLconf 13 1. Import the include() function: from django.urls import include, path 14 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) 15 """ 16 from django.contrib import admin 17 from django.urls import include, path 18 19 20 from will_of_the_prophets import views 21 22 urlpatterns = [ 23 path('admin/', admin.site.urls), 24 path('accounts/', include('django.contrib.auth.urls')), 25 path('roll/', views.RollView.as_view(), name='roll'), 26 path('', views.public_board, name='public_board'), 27 ] 28 [end of will_of_the_prophets/urls.py] [start of will_of_the_prophets/settings/__init__.py] 1 """ 2 Django settings for will_of_the_prophets project. 3 4 Generated by 'django-admin startproject' using Django 2.0.4. 5 6 For more information on this file, see 7 https://docs.djangoproject.com/en/2.0/topics/settings/ 8 9 For the full list of settings and their values, see 10 https://docs.djangoproject.com/en/2.0/ref/settings/ 11 """ 12 13 import os 14 15 import django_heroku 16 17 # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 18 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 19 20 21 # Quick-start development settings - unsuitable for production 22 # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ 23 24 # SECURITY WARNING: keep the secret key used in production secret! 25 SECRET_KEY = os.environ.get( 26 'SECRET_KEY', 'bah!osmx@cpaoulc-!eohvd3ldoy*^oki#l25-v6tvq04=6npz') 27 28 # SECURITY WARNING: don't run with debug turned on in production! 29 DEBUG = os.environ.get('DEBUG', False) 30 31 ALLOWED_HOSTS = ['*.herokuapp.com', 'localhost'] 32 33 34 # Application definition 35 36 INSTALLED_APPS = [ 37 'raven.contrib.django.raven_compat', 38 'django.contrib.admin', 39 'django.contrib.auth', 40 'django.contrib.contenttypes', 41 'django.contrib.sessions', 42 'django.contrib.messages', 43 'django.contrib.staticfiles', 44 'sass_processor', 45 'widget_tweaks', 46 'bootstrap', 47 'will_of_the_prophets', 48 ] 49 50 MIDDLEWARE = [ 51 'django.middleware.security.SecurityMiddleware', 52 'django.contrib.sessions.middleware.SessionMiddleware', 53 'django.middleware.common.CommonMiddleware', 54 'django.middleware.csrf.CsrfViewMiddleware', 55 'django.contrib.auth.middleware.AuthenticationMiddleware', 56 'django.contrib.messages.middleware.MessageMiddleware', 57 'django.middleware.clickjacking.XFrameOptionsMiddleware', 58 ] 59 60 ROOT_URLCONF = 'will_of_the_prophets.urls' 61 62 TEMPLATES = [ 63 { 64 'BACKEND': 'django.template.backends.django.DjangoTemplates', 65 'DIRS': [], 66 'APP_DIRS': True, 67 'OPTIONS': { 68 'context_processors': [ 69 'django.template.context_processors.debug', 70 'django.template.context_processors.request', 71 'django.contrib.auth.context_processors.auth', 72 'django.contrib.messages.context_processors.messages', 73 ], 74 }, 75 }, 76 ] 77 78 WSGI_APPLICATION = 'will_of_the_prophets.wsgi.application' 79 80 81 # Database 82 # https://docs.djangoproject.com/en/2.0/ref/settings/#databases 83 84 DATABASES = { 85 'default': { 86 'ENGINE': 'django.db.backends.sqlite3', 87 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), 88 } 89 } 90 91 92 # Password validation 93 # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators 94 95 AUTH_PASSWORD_VALIDATORS = [ 96 { 97 'NAME': ('django.contrib.auth.password_validation' 98 '.UserAttributeSimilarityValidator'), 99 }, 100 { 101 'NAME': ('django.contrib.auth.password_validation' 102 '.MinimumLengthValidator'), 103 }, 104 { 105 'NAME': ('django.contrib.auth.password_validation' 106 '.CommonPasswordValidator'), 107 }, 108 { 109 'NAME': ('django.contrib.auth.password_validation' 110 '.NumericPasswordValidator'), 111 }, 112 ] 113 114 115 # Internationalization 116 # https://docs.djangoproject.com/en/2.0/topics/i18n/ 117 118 LANGUAGE_CODE = 'en-us' 119 120 TIME_ZONE = 'UTC' 121 122 USE_I18N = True 123 124 USE_L10N = True 125 126 USE_TZ = True 127 128 129 # Static files (CSS, JavaScript, Images) 130 # https://docs.djangoproject.com/en/2.0/howto/static-files/ 131 132 STATIC_URL = '/static/' 133 134 STATICFILES_FINDERS = [ 135 'django.contrib.staticfiles.finders.FileSystemFinder', 136 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 137 # https://github.com/jrief/django-sass-processor 138 'sass_processor.finders.CssFinder', 139 ] 140 141 142 # django-sass-processor 143 # https://github.com/jrief/django-sass-processor 144 SASS_OUTPUT_STYLE = 'compressed' 145 146 147 # Configure Django App for Heroku. 148 django_heroku.settings(locals()) 149 [end of will_of_the_prophets/settings/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/will_of_the_prophets/settings/__init__.py b/will_of_the_prophets/settings/__init__.py --- a/will_of_the_prophets/settings/__init__.py +++ b/will_of_the_prophets/settings/__init__.py @@ -43,6 +43,7 @@ 'django.contrib.staticfiles', 'sass_processor', 'widget_tweaks', + 'tz_detect', 'bootstrap', 'will_of_the_prophets', ] @@ -144,5 +145,14 @@ SASS_OUTPUT_STYLE = 'compressed' +# django-tz-detect +# https://github.com/adamcharnock/django-tz-detect +MIDDLEWARE += [ + 'tz_detect.middleware.TimezoneMiddleware', +] + +TZ_DETECT_COUNTRIES = ('US', 'CN', 'IN', 'JP', 'BR', 'RU', 'DE', 'FR', 'GB') + + # Configure Django App for Heroku. django_heroku.settings(locals()) diff --git a/will_of_the_prophets/urls.py b/will_of_the_prophets/urls.py --- a/will_of_the_prophets/urls.py +++ b/will_of_the_prophets/urls.py @@ -23,5 +23,6 @@ path('admin/', admin.site.urls), path('accounts/', include('django.contrib.auth.urls')), path('roll/', views.RollView.as_view(), name='roll'), + path('tz_detect/', include('tz_detect.urls')), path('', views.public_board, name='public_board'), ]
{"golden_diff": "diff --git a/will_of_the_prophets/settings/__init__.py b/will_of_the_prophets/settings/__init__.py\n--- a/will_of_the_prophets/settings/__init__.py\n+++ b/will_of_the_prophets/settings/__init__.py\n@@ -43,6 +43,7 @@\n 'django.contrib.staticfiles',\n 'sass_processor',\n 'widget_tweaks',\n+ 'tz_detect',\n 'bootstrap',\n 'will_of_the_prophets',\n ]\n@@ -144,5 +145,14 @@\n SASS_OUTPUT_STYLE = 'compressed'\n \n \n+# django-tz-detect\n+# https://github.com/adamcharnock/django-tz-detect\n+MIDDLEWARE += [\n+ 'tz_detect.middleware.TimezoneMiddleware',\n+]\n+\n+TZ_DETECT_COUNTRIES = ('US', 'CN', 'IN', 'JP', 'BR', 'RU', 'DE', 'FR', 'GB')\n+\n+\n # Configure Django App for Heroku.\n django_heroku.settings(locals())\ndiff --git a/will_of_the_prophets/urls.py b/will_of_the_prophets/urls.py\n--- a/will_of_the_prophets/urls.py\n+++ b/will_of_the_prophets/urls.py\n@@ -23,5 +23,6 @@\n path('admin/', admin.site.urls),\n path('accounts/', include('django.contrib.auth.urls')),\n path('roll/', views.RollView.as_view(), name='roll'),\n+ path('tz_detect/', include('tz_detect.urls')),\n path('', views.public_board, name='public_board'),\n ]\n", "issue": "Figure out how timezones should work on roll page\nTimes are in UTC. Ben and Adam will be using this from the US. Can we auto-detect the user's timezone? Is this something Django can do for us?\n", "before_files": [{"content": "\"\"\"will_of_the_prophets URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import include, path\n\n\nfrom will_of_the_prophets import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('accounts/', include('django.contrib.auth.urls')),\n path('roll/', views.RollView.as_view(), name='roll'),\n path('', views.public_board, name='public_board'),\n]\n", "path": "will_of_the_prophets/urls.py"}, {"content": "\"\"\"\nDjango settings for will_of_the_prophets project.\n\nGenerated by 'django-admin startproject' using Django 2.0.4.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.0/ref/settings/\n\"\"\"\n\nimport os\n\nimport django_heroku\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\n 'SECRET_KEY', 'bah!osmx@cpaoulc-!eohvd3ldoy*^oki#l25-v6tvq04=6npz')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get('DEBUG', False)\n\nALLOWED_HOSTS = ['*.herokuapp.com', 'localhost']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'raven.contrib.django.raven_compat',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'sass_processor',\n 'widget_tweaks',\n 'bootstrap',\n 'will_of_the_prophets',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'will_of_the_prophets.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'will_of_the_prophets.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.0/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.UserAttributeSimilarityValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.MinimumLengthValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.CommonPasswordValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.NumericPasswordValidator'),\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.0/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # https://github.com/jrief/django-sass-processor\n 'sass_processor.finders.CssFinder',\n]\n\n\n# django-sass-processor\n# https://github.com/jrief/django-sass-processor\nSASS_OUTPUT_STYLE = 'compressed'\n\n\n# Configure Django App for Heroku.\ndjango_heroku.settings(locals())\n", "path": "will_of_the_prophets/settings/__init__.py"}]}
2,171
355
gh_patches_debug_34816
rasdani/github-patches
git_diff
chainer__chainer-2195
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Partial deserialization of links We sometimes want to deserialize a link only partially, i.e. we want to load a smaller set of parameters to a larger link. It happens when we do some surgery to neural nets (e.g. fine-tuning), in which case the set of parameters in the source network does not match to the set of parameters in the target network. One possible design is to introduce a `strict` option to the deserializer, and if that flag is False, any mismatch of the sets of parameters is ignored. I think it is safer to turn on this flag by default (I mean, we should not change the default behavior). </issue> <code> [start of chainer/serializers/npz.py] 1 import numpy 2 3 from chainer import cuda 4 from chainer import serializer 5 6 7 class DictionarySerializer(serializer.Serializer): 8 9 """Serializer for dictionary. 10 11 This is the standard serializer in Chainer. The hierarchy of objects are 12 simply mapped to a flat dictionary with keys representing the paths to 13 objects in the hierarchy. 14 15 .. note:: 16 Despite of its name, this serializer DOES NOT serialize the 17 object into external files. It just build a flat dictionary of arrays 18 that can be fed into :func:`numpy.savez` and 19 :func:`numpy.savez_compressed`. If you want to use this serializer 20 directly, you have to manually send a resulting dictionary to one of 21 these functions. 22 23 Args: 24 target (dict): The dictionary that this serializer saves the objects 25 to. If target is None, then a new dictionary is created. 26 path (str): The base path in the hierarchy that this serializer 27 indicates. 28 29 Attributes: 30 target (dict): The target dictionary. Once the serialization completes, 31 this dictionary can be fed into :func:`numpy.savez` or 32 :func:`numpy.savez_compressed` to serialize it in the NPZ format. 33 34 """ 35 36 def __init__(self, target=None, path=''): 37 self.target = {} if target is None else target 38 self.path = path 39 40 def __getitem__(self, key): 41 key = key.strip('/') 42 return DictionarySerializer(self.target, self.path + key + '/') 43 44 def __call__(self, key, value): 45 key = key.lstrip('/') 46 ret = value 47 if isinstance(value, cuda.ndarray): 48 value = value.get() 49 arr = numpy.asarray(value) 50 self.target[self.path + key] = arr 51 return ret 52 53 54 def save_npz(filename, obj, compression=True): 55 """Saves an object to the file in NPZ format. 56 57 This is a short-cut function to save only one object into an NPZ file. 58 59 Args: 60 filename (str): Target file name. 61 obj: Object to be serialized. It must support serialization protocol. 62 compression (bool): If ``True``, compression in the resulting zip file 63 is enabled. 64 65 """ 66 s = DictionarySerializer() 67 s.save(obj) 68 with open(filename, 'wb') as f: 69 if compression: 70 numpy.savez_compressed(f, **s.target) 71 else: 72 numpy.savez(f, **s.target) 73 74 75 class NpzDeserializer(serializer.Deserializer): 76 77 """Deserializer for NPZ format. 78 79 This is the standard deserializer in Chainer. This deserializer can be used 80 to read an object serialized by :func:`save_npz`. 81 82 Args: 83 npz: `npz` file object. 84 path: The base path that the deserialization starts from. 85 86 """ 87 88 def __init__(self, npz, path=''): 89 self.npz = npz 90 self.path = path 91 92 def __getitem__(self, key): 93 key = key.strip('/') 94 return NpzDeserializer(self.npz, self.path + key + '/') 95 96 def __call__(self, key, value): 97 key = key.lstrip('/') 98 dataset = self.npz[self.path + key] 99 if value is None: 100 return dataset 101 elif isinstance(value, numpy.ndarray): 102 numpy.copyto(value, dataset) 103 elif isinstance(value, cuda.ndarray): 104 value.set(numpy.asarray(dataset)) 105 else: 106 value = type(value)(numpy.asarray(dataset)) 107 return value 108 109 110 def load_npz(filename, obj): 111 """Loads an object from the file in NPZ format. 112 113 This is a short-cut function to load from an `.npz` file that contains only 114 one object. 115 116 Args: 117 filename (str): Name of the file to be loaded. 118 obj: Object to be deserialized. It must support serialization protocol. 119 120 """ 121 with numpy.load(filename) as f: 122 d = NpzDeserializer(f) 123 d.load(obj) 124 [end of chainer/serializers/npz.py] [start of chainer/serializers/hdf5.py] 1 import numpy 2 3 from chainer import cuda 4 from chainer import serializer 5 6 7 try: 8 import h5py 9 _available = True 10 except ImportError: 11 _available = False 12 13 14 def _check_available(): 15 if not _available: 16 msg = '''h5py is not installed on your environment. 17 Please install h5py to activate hdf5 serializers. 18 19 $ pip install h5py''' 20 raise RuntimeError(msg) 21 22 23 class HDF5Serializer(serializer.Serializer): 24 25 """Serializer for HDF5 format. 26 27 This is the standard serializer in Chainer. The chain hierarchy is simply 28 mapped to HDF5 hierarchical groups. 29 30 Args: 31 group (h5py.Group): The group that this serializer represents. 32 compression (int): Gzip compression level. 33 34 """ 35 36 def __init__(self, group, compression=4): 37 _check_available() 38 39 self.group = group 40 self.compression = compression 41 42 def __getitem__(self, key): 43 name = self.group.name + '/' + key 44 return HDF5Serializer(self.group.require_group(name), self.compression) 45 46 def __call__(self, key, value): 47 ret = value 48 if isinstance(value, cuda.ndarray): 49 value = cuda.to_cpu(value) 50 arr = numpy.asarray(value) 51 compression = None if arr.size <= 1 else self.compression 52 self.group.create_dataset(key, data=arr, compression=compression) 53 return ret 54 55 56 def save_hdf5(filename, obj, compression=4): 57 """Saves an object to the file in HDF5 format. 58 59 This is a short-cut function to save only one object into an HDF5 file. If 60 you want to save multiple objects to one HDF5 file, use 61 :class:`HDF5Serializer` directly by passing appropriate :class:`h5py.Group` 62 objects. 63 64 Args: 65 filename (str): Target file name. 66 obj: Object to be serialized. It must support serialization protocol. 67 compression (int): Gzip compression level. 68 69 """ 70 _check_available() 71 with h5py.File(filename, 'w') as f: 72 s = HDF5Serializer(f, compression=compression) 73 s.save(obj) 74 75 76 class HDF5Deserializer(serializer.Deserializer): 77 78 """Deserializer for HDF5 format. 79 80 This is the standard deserializer in Chainer. This deserializer can be used 81 to read an object serialized by :class:`HDF5Serializer`. 82 83 Args: 84 group (h5py.Group): The group that the deserialization starts from. 85 86 """ 87 88 def __init__(self, group): 89 _check_available() 90 self.group = group 91 92 def __getitem__(self, key): 93 name = self.group.name + '/' + key 94 return HDF5Deserializer(self.group.require_group(name)) 95 96 def __call__(self, key, value): 97 dataset = self.group[key] 98 if value is None: 99 return numpy.asarray(dataset) 100 elif isinstance(value, numpy.ndarray): 101 dataset.read_direct(value) 102 elif isinstance(value, cuda.ndarray): 103 value.set(numpy.asarray(dataset)) 104 else: 105 value = type(value)(numpy.asarray(dataset)) 106 return value 107 108 109 def load_hdf5(filename, obj): 110 """Loads an object from the file in HDF5 format. 111 112 This is a short-cut function to load from an HDF5 file that contains only 113 one object. If you want to load multiple objects from one HDF5 file, use 114 :class:`HDF5Deserializer` directly by passing appropriate 115 :class:`h5py.Group` objects. 116 117 Args: 118 filename (str): Name of the file to be loaded. 119 obj: Object to be deserialized. It must support serialization protocol. 120 121 """ 122 _check_available() 123 with h5py.File(filename, 'r') as f: 124 d = HDF5Deserializer(f) 125 d.load(obj) 126 [end of chainer/serializers/hdf5.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/serializers/hdf5.py b/chainer/serializers/hdf5.py --- a/chainer/serializers/hdf5.py +++ b/chainer/serializers/hdf5.py @@ -82,18 +82,26 @@ Args: group (h5py.Group): The group that the deserialization starts from. + strict (bool): If ``True``, the deserializer raises an error when an + expected value is not found in the given HDF5 file. Otherwise, + it ignores the value and skip deserialization. """ - def __init__(self, group): + def __init__(self, group, strict=True): _check_available() self.group = group + self.strict = strict def __getitem__(self, key): name = self.group.name + '/' + key return HDF5Deserializer(self.group.require_group(name)) def __call__(self, key, value): + if not self.strict and key not in self.group: + return value + + self.group.keys dataset = self.group[key] if value is None: return numpy.asarray(dataset) diff --git a/chainer/serializers/npz.py b/chainer/serializers/npz.py --- a/chainer/serializers/npz.py +++ b/chainer/serializers/npz.py @@ -82,20 +82,27 @@ Args: npz: `npz` file object. path: The base path that the deserialization starts from. + strict (bool): If ``True``, the deserializer raises an error when an + expected value is not found in the given NPZ file. Otherwise, + it ignores the value and skip deserialization. """ - def __init__(self, npz, path=''): + def __init__(self, npz, path='', strict=True): self.npz = npz self.path = path + self.strict = strict def __getitem__(self, key): key = key.strip('/') return NpzDeserializer(self.npz, self.path + key + '/') def __call__(self, key, value): - key = key.lstrip('/') - dataset = self.npz[self.path + key] + key = self.path + key.lstrip('/') + if not self.strict and key not in self.npz: + return value + + dataset = self.npz[key] if value is None: return dataset elif isinstance(value, numpy.ndarray):
{"golden_diff": "diff --git a/chainer/serializers/hdf5.py b/chainer/serializers/hdf5.py\n--- a/chainer/serializers/hdf5.py\n+++ b/chainer/serializers/hdf5.py\n@@ -82,18 +82,26 @@\n \n Args:\n group (h5py.Group): The group that the deserialization starts from.\n+ strict (bool): If ``True``, the deserializer raises an error when an\n+ expected value is not found in the given HDF5 file. Otherwise,\n+ it ignores the value and skip deserialization.\n \n \"\"\"\n \n- def __init__(self, group):\n+ def __init__(self, group, strict=True):\n _check_available()\n self.group = group\n+ self.strict = strict\n \n def __getitem__(self, key):\n name = self.group.name + '/' + key\n return HDF5Deserializer(self.group.require_group(name))\n \n def __call__(self, key, value):\n+ if not self.strict and key not in self.group:\n+ return value\n+\n+ self.group.keys\n dataset = self.group[key]\n if value is None:\n return numpy.asarray(dataset)\ndiff --git a/chainer/serializers/npz.py b/chainer/serializers/npz.py\n--- a/chainer/serializers/npz.py\n+++ b/chainer/serializers/npz.py\n@@ -82,20 +82,27 @@\n Args:\n npz: `npz` file object.\n path: The base path that the deserialization starts from.\n+ strict (bool): If ``True``, the deserializer raises an error when an\n+ expected value is not found in the given NPZ file. Otherwise,\n+ it ignores the value and skip deserialization.\n \n \"\"\"\n \n- def __init__(self, npz, path=''):\n+ def __init__(self, npz, path='', strict=True):\n self.npz = npz\n self.path = path\n+ self.strict = strict\n \n def __getitem__(self, key):\n key = key.strip('/')\n return NpzDeserializer(self.npz, self.path + key + '/')\n \n def __call__(self, key, value):\n- key = key.lstrip('/')\n- dataset = self.npz[self.path + key]\n+ key = self.path + key.lstrip('/')\n+ if not self.strict and key not in self.npz:\n+ return value\n+\n+ dataset = self.npz[key]\n if value is None:\n return dataset\n elif isinstance(value, numpy.ndarray):\n", "issue": "Partial deserialization of links\nWe sometimes want to deserialize a link only partially, i.e. we want to load a smaller set of parameters to a larger link. It happens when we do some surgery to neural nets (e.g. fine-tuning), in which case the set of parameters in the source network does not match to the set of parameters in the target network.\r\n\r\nOne possible design is to introduce a `strict` option to the deserializer, and if that flag is False, any mismatch of the sets of parameters is ignored. I think it is safer to turn on this flag by default (I mean, we should not change the default behavior).\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import serializer\n\n\nclass DictionarySerializer(serializer.Serializer):\n\n \"\"\"Serializer for dictionary.\n\n This is the standard serializer in Chainer. The hierarchy of objects are\n simply mapped to a flat dictionary with keys representing the paths to\n objects in the hierarchy.\n\n .. note::\n Despite of its name, this serializer DOES NOT serialize the\n object into external files. It just build a flat dictionary of arrays\n that can be fed into :func:`numpy.savez` and\n :func:`numpy.savez_compressed`. If you want to use this serializer\n directly, you have to manually send a resulting dictionary to one of\n these functions.\n\n Args:\n target (dict): The dictionary that this serializer saves the objects\n to. If target is None, then a new dictionary is created.\n path (str): The base path in the hierarchy that this serializer\n indicates.\n\n Attributes:\n target (dict): The target dictionary. Once the serialization completes,\n this dictionary can be fed into :func:`numpy.savez` or\n :func:`numpy.savez_compressed` to serialize it in the NPZ format.\n\n \"\"\"\n\n def __init__(self, target=None, path=''):\n self.target = {} if target is None else target\n self.path = path\n\n def __getitem__(self, key):\n key = key.strip('/')\n return DictionarySerializer(self.target, self.path + key + '/')\n\n def __call__(self, key, value):\n key = key.lstrip('/')\n ret = value\n if isinstance(value, cuda.ndarray):\n value = value.get()\n arr = numpy.asarray(value)\n self.target[self.path + key] = arr\n return ret\n\n\ndef save_npz(filename, obj, compression=True):\n \"\"\"Saves an object to the file in NPZ format.\n\n This is a short-cut function to save only one object into an NPZ file.\n\n Args:\n filename (str): Target file name.\n obj: Object to be serialized. It must support serialization protocol.\n compression (bool): If ``True``, compression in the resulting zip file\n is enabled.\n\n \"\"\"\n s = DictionarySerializer()\n s.save(obj)\n with open(filename, 'wb') as f:\n if compression:\n numpy.savez_compressed(f, **s.target)\n else:\n numpy.savez(f, **s.target)\n\n\nclass NpzDeserializer(serializer.Deserializer):\n\n \"\"\"Deserializer for NPZ format.\n\n This is the standard deserializer in Chainer. This deserializer can be used\n to read an object serialized by :func:`save_npz`.\n\n Args:\n npz: `npz` file object.\n path: The base path that the deserialization starts from.\n\n \"\"\"\n\n def __init__(self, npz, path=''):\n self.npz = npz\n self.path = path\n\n def __getitem__(self, key):\n key = key.strip('/')\n return NpzDeserializer(self.npz, self.path + key + '/')\n\n def __call__(self, key, value):\n key = key.lstrip('/')\n dataset = self.npz[self.path + key]\n if value is None:\n return dataset\n elif isinstance(value, numpy.ndarray):\n numpy.copyto(value, dataset)\n elif isinstance(value, cuda.ndarray):\n value.set(numpy.asarray(dataset))\n else:\n value = type(value)(numpy.asarray(dataset))\n return value\n\n\ndef load_npz(filename, obj):\n \"\"\"Loads an object from the file in NPZ format.\n\n This is a short-cut function to load from an `.npz` file that contains only\n one object.\n\n Args:\n filename (str): Name of the file to be loaded.\n obj: Object to be deserialized. It must support serialization protocol.\n\n \"\"\"\n with numpy.load(filename) as f:\n d = NpzDeserializer(f)\n d.load(obj)\n", "path": "chainer/serializers/npz.py"}, {"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import serializer\n\n\ntry:\n import h5py\n _available = True\nexcept ImportError:\n _available = False\n\n\ndef _check_available():\n if not _available:\n msg = '''h5py is not installed on your environment.\nPlease install h5py to activate hdf5 serializers.\n\n $ pip install h5py'''\n raise RuntimeError(msg)\n\n\nclass HDF5Serializer(serializer.Serializer):\n\n \"\"\"Serializer for HDF5 format.\n\n This is the standard serializer in Chainer. The chain hierarchy is simply\n mapped to HDF5 hierarchical groups.\n\n Args:\n group (h5py.Group): The group that this serializer represents.\n compression (int): Gzip compression level.\n\n \"\"\"\n\n def __init__(self, group, compression=4):\n _check_available()\n\n self.group = group\n self.compression = compression\n\n def __getitem__(self, key):\n name = self.group.name + '/' + key\n return HDF5Serializer(self.group.require_group(name), self.compression)\n\n def __call__(self, key, value):\n ret = value\n if isinstance(value, cuda.ndarray):\n value = cuda.to_cpu(value)\n arr = numpy.asarray(value)\n compression = None if arr.size <= 1 else self.compression\n self.group.create_dataset(key, data=arr, compression=compression)\n return ret\n\n\ndef save_hdf5(filename, obj, compression=4):\n \"\"\"Saves an object to the file in HDF5 format.\n\n This is a short-cut function to save only one object into an HDF5 file. If\n you want to save multiple objects to one HDF5 file, use\n :class:`HDF5Serializer` directly by passing appropriate :class:`h5py.Group`\n objects.\n\n Args:\n filename (str): Target file name.\n obj: Object to be serialized. It must support serialization protocol.\n compression (int): Gzip compression level.\n\n \"\"\"\n _check_available()\n with h5py.File(filename, 'w') as f:\n s = HDF5Serializer(f, compression=compression)\n s.save(obj)\n\n\nclass HDF5Deserializer(serializer.Deserializer):\n\n \"\"\"Deserializer for HDF5 format.\n\n This is the standard deserializer in Chainer. This deserializer can be used\n to read an object serialized by :class:`HDF5Serializer`.\n\n Args:\n group (h5py.Group): The group that the deserialization starts from.\n\n \"\"\"\n\n def __init__(self, group):\n _check_available()\n self.group = group\n\n def __getitem__(self, key):\n name = self.group.name + '/' + key\n return HDF5Deserializer(self.group.require_group(name))\n\n def __call__(self, key, value):\n dataset = self.group[key]\n if value is None:\n return numpy.asarray(dataset)\n elif isinstance(value, numpy.ndarray):\n dataset.read_direct(value)\n elif isinstance(value, cuda.ndarray):\n value.set(numpy.asarray(dataset))\n else:\n value = type(value)(numpy.asarray(dataset))\n return value\n\n\ndef load_hdf5(filename, obj):\n \"\"\"Loads an object from the file in HDF5 format.\n\n This is a short-cut function to load from an HDF5 file that contains only\n one object. If you want to load multiple objects from one HDF5 file, use\n :class:`HDF5Deserializer` directly by passing appropriate\n :class:`h5py.Group` objects.\n\n Args:\n filename (str): Name of the file to be loaded.\n obj: Object to be deserialized. It must support serialization protocol.\n\n \"\"\"\n _check_available()\n with h5py.File(filename, 'r') as f:\n d = HDF5Deserializer(f)\n d.load(obj)\n", "path": "chainer/serializers/hdf5.py"}]}
2,932
570
gh_patches_debug_53934
rasdani/github-patches
git_diff
bokeh__bokeh-3570
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ImageURL example in reference guide is broken The example doesn't render an image, just a gridded, empty plot. http://bokeh.pydata.org/en/latest/docs/reference/models/glyphs.html#bokeh.models.glyphs.ImageURL </issue> <code> [start of examples/glyphs/image_url.py] 1 2 import numpy as np 3 4 from bokeh.util.browser import view 5 from bokeh.document import Document 6 from bokeh.embed import file_html 7 from bokeh.models.glyphs import ImageURL 8 from bokeh.models import ColumnDataSource, Range1d, Plot, LinearAxis, Grid 9 from bokeh.resources import INLINE 10 11 url = "http://bokeh.pydata.org/en/latest/_static/bokeh-transparent.png" 12 N = 5 13 14 source = ColumnDataSource(dict( 15 url = [url]*N, 16 x1 = np.linspace( 0, 150, N), 17 y1 = np.linspace( 0, 150, N), 18 w1 = np.linspace( 10, 50, N), 19 h1 = np.linspace( 10, 50, N), 20 x2 = np.linspace(-50, 150, N), 21 y2 = np.linspace( 0, 200, N), 22 )) 23 24 xdr = Range1d(start=-100, end=200) 25 ydr = Range1d(start=-100, end=200) 26 27 plot = Plot(title="ImageURL", x_range=xdr, y_range=ydr) 28 29 image1 = ImageURL(url="url", x="x1", y="y1", w="w1", h="h1", anchor="center", global_alpha=0.2) 30 plot.add_glyph(source, image1) 31 32 image2 = ImageURL(url="url", x="x2", y="y2", w=20, h=20, anchor="top_left") 33 plot.add_glyph(source, image2) 34 35 image3 = ImageURL(url=dict(value=url), x=200, y=-100, anchor="bottom_right") 36 plot.add_glyph(source, image3) 37 38 xaxis = LinearAxis() 39 plot.add_layout(xaxis, 'below') 40 41 yaxis = LinearAxis() 42 plot.add_layout(yaxis,'left') 43 44 plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker)) 45 plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker)) 46 47 doc = Document( ) 48 doc.add_root(plot) 49 50 if __name__ == "__main__": 51 filename = "image_url.html" 52 with open(filename, "w") as f: 53 f.write(file_html(doc, INLINE, "Image URL Example")) 54 print("Wrote %s" % filename) 55 view(filename) 56 [end of examples/glyphs/image_url.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/glyphs/image_url.py b/examples/glyphs/image_url.py --- a/examples/glyphs/image_url.py +++ b/examples/glyphs/image_url.py @@ -8,7 +8,7 @@ from bokeh.models import ColumnDataSource, Range1d, Plot, LinearAxis, Grid from bokeh.resources import INLINE -url = "http://bokeh.pydata.org/en/latest/_static/bokeh-transparent.png" +url = "http://bokeh.pydata.org/en/latest/_static/images/logo.png" N = 5 source = ColumnDataSource(dict(
{"golden_diff": "diff --git a/examples/glyphs/image_url.py b/examples/glyphs/image_url.py\n--- a/examples/glyphs/image_url.py\n+++ b/examples/glyphs/image_url.py\n@@ -8,7 +8,7 @@\n from bokeh.models import ColumnDataSource, Range1d, Plot, LinearAxis, Grid\n from bokeh.resources import INLINE\n \n-url = \"http://bokeh.pydata.org/en/latest/_static/bokeh-transparent.png\"\n+url = \"http://bokeh.pydata.org/en/latest/_static/images/logo.png\"\n N = 5\n \n source = ColumnDataSource(dict(\n", "issue": "ImageURL example in reference guide is broken\nThe example doesn't render an image, just a gridded, empty plot.\n\nhttp://bokeh.pydata.org/en/latest/docs/reference/models/glyphs.html#bokeh.models.glyphs.ImageURL\n\n", "before_files": [{"content": "\nimport numpy as np\n\nfrom bokeh.util.browser import view\nfrom bokeh.document import Document\nfrom bokeh.embed import file_html\nfrom bokeh.models.glyphs import ImageURL\nfrom bokeh.models import ColumnDataSource, Range1d, Plot, LinearAxis, Grid\nfrom bokeh.resources import INLINE\n\nurl = \"http://bokeh.pydata.org/en/latest/_static/bokeh-transparent.png\"\nN = 5\n\nsource = ColumnDataSource(dict(\n url = [url]*N,\n x1 = np.linspace( 0, 150, N),\n y1 = np.linspace( 0, 150, N),\n w1 = np.linspace( 10, 50, N),\n h1 = np.linspace( 10, 50, N),\n x2 = np.linspace(-50, 150, N),\n y2 = np.linspace( 0, 200, N),\n))\n\nxdr = Range1d(start=-100, end=200)\nydr = Range1d(start=-100, end=200)\n\nplot = Plot(title=\"ImageURL\", x_range=xdr, y_range=ydr)\n\nimage1 = ImageURL(url=\"url\", x=\"x1\", y=\"y1\", w=\"w1\", h=\"h1\", anchor=\"center\", global_alpha=0.2)\nplot.add_glyph(source, image1)\n\nimage2 = ImageURL(url=\"url\", x=\"x2\", y=\"y2\", w=20, h=20, anchor=\"top_left\")\nplot.add_glyph(source, image2)\n\nimage3 = ImageURL(url=dict(value=url), x=200, y=-100, anchor=\"bottom_right\")\nplot.add_glyph(source, image3)\n\nxaxis = LinearAxis()\nplot.add_layout(xaxis, 'below')\n\nyaxis = LinearAxis()\nplot.add_layout(yaxis,'left')\n\nplot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))\nplot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))\n\ndoc = Document( )\ndoc.add_root(plot)\n\nif __name__ == \"__main__\":\n filename = \"image_url.html\"\n with open(filename, \"w\") as f:\n f.write(file_html(doc, INLINE, \"Image URL Example\"))\n print(\"Wrote %s\" % filename)\n view(filename)\n", "path": "examples/glyphs/image_url.py"}]}
1,230
126
gh_patches_debug_41745
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-1051
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Modify elasticdl.layers.Embedding arguments and constructor according to design doc According to [design doc](https://github.com/wangkuiyi/elasticdl/blob/develop/elasticdl/doc/distributed_embedding_layer_design.md#elasticdllayersembedding): ``` __init__( output_dim, embeddings_initializer='uniform', mask_zero=False, input_length=None, combiner=None, ) ``` </issue> <code> [start of elasticdl/python/elasticdl/layers/embedding.py] 1 import tensorflow as tf 2 from tensorflow.python.keras.utils import tf_utils 3 4 5 class Embedding(tf.keras.layers.Layer): 6 """ 7 Input: indexes for the embedding entries 8 shape is (batch_size, input_length) 9 Output: Corresponding embedding vectors of the input indexes 10 shape is (batch_size, input_length, embedding_dim) 11 Arguments: 12 embedding_dim: the dimension of the embedding vector 13 embedding_initializer: Initializer for embedding table 14 """ 15 16 def __init__(self, embedding_dim, embedding_initializer="uniform"): 17 super(Embedding, self).__init__() 18 self.embedding_dim = embedding_dim 19 self.embedding_initializer = embedding_initializer 20 self.tape = None 21 self.worker = None 22 self.bet_ids_pair = [] 23 24 @tf_utils.shape_type_conversion 25 def compute_output_shape(self, input_shape): 26 return input_shape + (self.embedding_dim,) 27 28 @property 29 def name(self): 30 return self._name 31 32 @staticmethod 33 def get_key(name_list): 34 return "-".join(map(str, name_list)) 35 36 def lookup_embedding(self, unique_ids): 37 batch_embedding = self.worker.embedding_lookup( 38 unique_ids, self._name, self.embedding_initializer 39 ) 40 return batch_embedding 41 42 def call(self, input): 43 ids = tf.convert_to_tensor(input, name="embedding_ids") 44 flat_ids = tf.reshape(ids, [-1]) 45 unique_ids, idx = tf.unique(flat_ids) 46 batch_embedding_tensor = tf.py_function( 47 self.lookup_embedding, inp=[unique_ids], Tout=tf.float32 48 ) 49 if self.tape: 50 # tape.watch works with eager mode only 51 if not tf.executing_eagerly(): 52 raise RuntimeError("tape.watch only works with eager mode") 53 self.tape.watch(batch_embedding_tensor) 54 self.bet_ids_pair.append((batch_embedding_tensor, unique_ids)) 55 outputs = tf.gather(batch_embedding_tensor, idx) 56 outputs = tf.reshape( 57 outputs, ids.get_shape().concatenate(self.embedding_dim) 58 ) 59 return outputs 60 61 def reset(self): 62 self.bet_ids_pair = [] 63 self.tape = None 64 65 def set_tape(self, tape): 66 self.tape = tape 67 68 def set_worker(self, worker): 69 self.worker = worker 70 [end of elasticdl/python/elasticdl/layers/embedding.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/elasticdl/python/elasticdl/layers/embedding.py b/elasticdl/python/elasticdl/layers/embedding.py --- a/elasticdl/python/elasticdl/layers/embedding.py +++ b/elasticdl/python/elasticdl/layers/embedding.py @@ -6,24 +6,77 @@ """ Input: indexes for the embedding entries shape is (batch_size, input_length) - Output: Corresponding embedding vectors of the input indexes - shape is (batch_size, input_length, embedding_dim) + Output: + corresponding (combined) embeddings with a shape of + (batch_size, input_length, output_dim) if combiner is None + (batch_size, output_dim) if combiner is not None Arguments: - embedding_dim: the dimension of the embedding vector + output_dim: the dimension of the embedding vector embedding_initializer: Initializer for embedding table + mask_zero: Whether or not the input value 0 is a special "padding" + value that should be masked out. + input_length: Length of input sequences, when it is constant. + This argument is required if you are going to connect + `Flatten` then `Dense` layers upstream + (without it, the shape of the dense outputs cannot be computed). + combiner: A string specifying the reduction op or None if not used. + "mean", "sqrtn" and "sum" are supported for the reduction op. + TODO: support mask_zero + TODO: support combiner + TODO: support sparse input """ - def __init__(self, embedding_dim, embedding_initializer="uniform"): - super(Embedding, self).__init__() - self.embedding_dim = embedding_dim + def __init__( + self, + output_dim, + embedding_initializer="uniform", + mask_zero=False, + input_length=None, + combiner=None, + **kwargs + ): + if "input_shape" not in kwargs and input_length: + kwargs["input_shape"] = (input_length,) + super(Embedding, self).__init__(**kwargs) + + self.output_dim = output_dim self.embedding_initializer = embedding_initializer + self.mask_zero = mask_zero + self.input_length = input_length + self.combiner = combiner self.tape = None self.worker = None self.bet_ids_pair = [] @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): - return input_shape + (self.embedding_dim,) + # this function is taken from + # tf.keras.layers.Embedding.compute_output_shape + # https://github.com/tensorflow/tensorflow/blob/3f3c728bf80e0fd6653744318cbbfe1454c6ddca/tensorflow/python/keras/layers/embeddings.py#L156 + if self.input_length is None: + return input_shape + (self.output_dim,) + else: + if isinstance(self.input_length, (list, tuple)): + in_lens = list(self.input_length) + else: + in_lens = [self.input_length] + if len(in_lens) != len(input_shape) - 1: + raise ValueError( + '"input_length" is %s, ' + "but received input has shape %s" + % (str(self.input_length), str(input_shape)) + ) + else: + for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])): + if s1 is not None and s2 is not None and s1 != s2: + raise ValueError( + '"input_length" is %s, ' + "but received input has shape %s" + % (str(self.input_length), str(input_shape)) + ) + elif s1 is None: + in_lens[i] = s2 + return (input_shape[0],) + tuple(in_lens) + (self.output_dim,) @property def name(self): @@ -54,7 +107,7 @@ self.bet_ids_pair.append((batch_embedding_tensor, unique_ids)) outputs = tf.gather(batch_embedding_tensor, idx) outputs = tf.reshape( - outputs, ids.get_shape().concatenate(self.embedding_dim) + outputs, ids.get_shape().concatenate(self.output_dim) ) return outputs
{"golden_diff": "diff --git a/elasticdl/python/elasticdl/layers/embedding.py b/elasticdl/python/elasticdl/layers/embedding.py\n--- a/elasticdl/python/elasticdl/layers/embedding.py\n+++ b/elasticdl/python/elasticdl/layers/embedding.py\n@@ -6,24 +6,77 @@\n \"\"\"\n Input: indexes for the embedding entries\n shape is (batch_size, input_length)\n- Output: Corresponding embedding vectors of the input indexes\n- shape is (batch_size, input_length, embedding_dim)\n+ Output:\n+ corresponding (combined) embeddings with a shape of\n+ (batch_size, input_length, output_dim) if combiner is None\n+ (batch_size, output_dim) if combiner is not None\n Arguments:\n- embedding_dim: the dimension of the embedding vector\n+ output_dim: the dimension of the embedding vector\n embedding_initializer: Initializer for embedding table\n+ mask_zero: Whether or not the input value 0 is a special \"padding\"\n+ value that should be masked out.\n+ input_length: Length of input sequences, when it is constant.\n+ This argument is required if you are going to connect\n+ `Flatten` then `Dense` layers upstream\n+ (without it, the shape of the dense outputs cannot be computed).\n+ combiner: A string specifying the reduction op or None if not used.\n+ \"mean\", \"sqrtn\" and \"sum\" are supported for the reduction op.\n+ TODO: support mask_zero\n+ TODO: support combiner\n+ TODO: support sparse input\n \"\"\"\n \n- def __init__(self, embedding_dim, embedding_initializer=\"uniform\"):\n- super(Embedding, self).__init__()\n- self.embedding_dim = embedding_dim\n+ def __init__(\n+ self,\n+ output_dim,\n+ embedding_initializer=\"uniform\",\n+ mask_zero=False,\n+ input_length=None,\n+ combiner=None,\n+ **kwargs\n+ ):\n+ if \"input_shape\" not in kwargs and input_length:\n+ kwargs[\"input_shape\"] = (input_length,)\n+ super(Embedding, self).__init__(**kwargs)\n+\n+ self.output_dim = output_dim\n self.embedding_initializer = embedding_initializer\n+ self.mask_zero = mask_zero\n+ self.input_length = input_length\n+ self.combiner = combiner\n self.tape = None\n self.worker = None\n self.bet_ids_pair = []\n \n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n- return input_shape + (self.embedding_dim,)\n+ # this function is taken from\n+ # tf.keras.layers.Embedding.compute_output_shape\n+ # https://github.com/tensorflow/tensorflow/blob/3f3c728bf80e0fd6653744318cbbfe1454c6ddca/tensorflow/python/keras/layers/embeddings.py#L156\n+ if self.input_length is None:\n+ return input_shape + (self.output_dim,)\n+ else:\n+ if isinstance(self.input_length, (list, tuple)):\n+ in_lens = list(self.input_length)\n+ else:\n+ in_lens = [self.input_length]\n+ if len(in_lens) != len(input_shape) - 1:\n+ raise ValueError(\n+ '\"input_length\" is %s, '\n+ \"but received input has shape %s\"\n+ % (str(self.input_length), str(input_shape))\n+ )\n+ else:\n+ for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):\n+ if s1 is not None and s2 is not None and s1 != s2:\n+ raise ValueError(\n+ '\"input_length\" is %s, '\n+ \"but received input has shape %s\"\n+ % (str(self.input_length), str(input_shape))\n+ )\n+ elif s1 is None:\n+ in_lens[i] = s2\n+ return (input_shape[0],) + tuple(in_lens) + (self.output_dim,)\n \n @property\n def name(self):\n@@ -54,7 +107,7 @@\n self.bet_ids_pair.append((batch_embedding_tensor, unique_ids))\n outputs = tf.gather(batch_embedding_tensor, idx)\n outputs = tf.reshape(\n- outputs, ids.get_shape().concatenate(self.embedding_dim)\n+ outputs, ids.get_shape().concatenate(self.output_dim)\n )\n return outputs\n", "issue": "Modify elasticdl.layers.Embedding arguments and constructor according to design doc\nAccording to [design doc](https://github.com/wangkuiyi/elasticdl/blob/develop/elasticdl/doc/distributed_embedding_layer_design.md#elasticdllayersembedding):\r\n\r\n```\r\n__init__(\r\n output_dim,\r\n embeddings_initializer='uniform',\r\n mask_zero=False,\r\n input_length=None,\r\n combiner=None,\r\n)\r\n```\n", "before_files": [{"content": "import tensorflow as tf\nfrom tensorflow.python.keras.utils import tf_utils\n\n\nclass Embedding(tf.keras.layers.Layer):\n \"\"\"\n Input: indexes for the embedding entries\n shape is (batch_size, input_length)\n Output: Corresponding embedding vectors of the input indexes\n shape is (batch_size, input_length, embedding_dim)\n Arguments:\n embedding_dim: the dimension of the embedding vector\n embedding_initializer: Initializer for embedding table\n \"\"\"\n\n def __init__(self, embedding_dim, embedding_initializer=\"uniform\"):\n super(Embedding, self).__init__()\n self.embedding_dim = embedding_dim\n self.embedding_initializer = embedding_initializer\n self.tape = None\n self.worker = None\n self.bet_ids_pair = []\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n return input_shape + (self.embedding_dim,)\n\n @property\n def name(self):\n return self._name\n\n @staticmethod\n def get_key(name_list):\n return \"-\".join(map(str, name_list))\n\n def lookup_embedding(self, unique_ids):\n batch_embedding = self.worker.embedding_lookup(\n unique_ids, self._name, self.embedding_initializer\n )\n return batch_embedding\n\n def call(self, input):\n ids = tf.convert_to_tensor(input, name=\"embedding_ids\")\n flat_ids = tf.reshape(ids, [-1])\n unique_ids, idx = tf.unique(flat_ids)\n batch_embedding_tensor = tf.py_function(\n self.lookup_embedding, inp=[unique_ids], Tout=tf.float32\n )\n if self.tape:\n # tape.watch works with eager mode only\n if not tf.executing_eagerly():\n raise RuntimeError(\"tape.watch only works with eager mode\")\n self.tape.watch(batch_embedding_tensor)\n self.bet_ids_pair.append((batch_embedding_tensor, unique_ids))\n outputs = tf.gather(batch_embedding_tensor, idx)\n outputs = tf.reshape(\n outputs, ids.get_shape().concatenate(self.embedding_dim)\n )\n return outputs\n\n def reset(self):\n self.bet_ids_pair = []\n self.tape = None\n\n def set_tape(self, tape):\n self.tape = tape\n\n def set_worker(self, worker):\n self.worker = worker\n", "path": "elasticdl/python/elasticdl/layers/embedding.py"}]}
1,254
1,008
gh_patches_debug_28888
rasdani/github-patches
git_diff
chainer__chainer-1376
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> GPU implementation of transpose_sequence is too slow As @jnory mentioned in #1174, `transpose_sequence` is too slow. We need to make a single kernel to make a transposed matrix. </issue> <code> [start of chainer/functions/array/transpose_sequence.py] 1 import numpy 2 3 from chainer import cuda 4 from chainer import function 5 from chainer.utils import type_check 6 7 8 def _transpose(xs, length): 9 xp = cuda.get_array_module(*xs) 10 lengths = numpy.zeros(length, dtype='i') 11 for i, x in enumerate(xs): 12 lengths[0:len(x)] = i + 1 13 dtype = xs[0].dtype 14 unit = xs[0].shape[1:] 15 outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths]) 16 17 for i, x in enumerate(xs): 18 for p, xi in enumerate(x): 19 outs[p][i] = xi 20 21 return outs 22 23 24 class TransposeSequence(function.Function): 25 26 """Function that transposes a list of Variables.""" 27 28 def check_type_forward(self, xs_type): 29 for p, n in zip(xs_type, xs_type[1:]): 30 type_check.expect( 31 p.shape[0] >= n.shape[0], 32 p.shape[1:] == n.shape[1:], 33 ) 34 35 def forward(self, xs): 36 if len(xs) == 0: 37 return () 38 return _transpose(xs, len(xs[0])) 39 40 def backward(self, xs, gs): 41 return _transpose(gs, len(xs)) 42 43 44 def transpose_sequence(xs): 45 """Transpose a list of Variables. 46 47 This function transposes a list of :class:`~chainer.Variable` s and returns 48 a list of :class:`Variable` s. 49 For exampe a user gives ``[(0, 1, 2, 3), (4, 5), (6)]``, the function 50 returns ``[(0, 4, 6), (1, 5), (2), (3)]``. 51 Note that a given list needs to be sorted by each length of 52 :class:`~chainer.Variable`. 53 54 Args: 55 xs (list of ~chainer.Variable): Variables to transpose. 56 57 Returns: 58 tuple or Variable: Transposed list. 59 """ 60 ys = TransposeSequence()(*xs) 61 if not isinstance(ys, tuple): 62 ys = (ys,) 63 return ys 64 [end of chainer/functions/array/transpose_sequence.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/functions/array/transpose_sequence.py b/chainer/functions/array/transpose_sequence.py --- a/chainer/functions/array/transpose_sequence.py +++ b/chainer/functions/array/transpose_sequence.py @@ -6,17 +6,55 @@ def _transpose(xs, length): - xp = cuda.get_array_module(*xs) - lengths = numpy.zeros(length, dtype='i') - for i, x in enumerate(xs): - lengths[0:len(x)] = i + 1 - dtype = xs[0].dtype - unit = xs[0].shape[1:] - outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths]) + if length == 0: + return () + xp = cuda.get_array_module(*xs) + lengths = numpy.empty(length, dtype='i') + end = length for i, x in enumerate(xs): - for p, xi in enumerate(x): - outs[p][i] = xi + lengths[len(x):end] = i + end = len(x) + lengths[0:end] = len(xs) + + if xp is numpy: + dtype = xs[0].dtype + unit = xs[0].shape[1:] + + outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths]) + for i, x in enumerate(xs): + for p, xi in enumerate(x): + outs[p][i] = xi + + else: + offsets1 = numpy.empty(len(xs) + 1, dtype='i') + offsets1[0] = 0 + numpy.cumsum([len(x) for x in xs], out=offsets1[1:]) + + offsets2 = numpy.empty(length + 1, dtype='i') + offsets2[0] = 0 + numpy.cumsum(lengths, dtype='i', out=offsets2[1:]) + + x = xp.concatenate(xs, axis=0) + o = xp.empty_like(x) + unit = xs[0].size // len(xs[0]) + size = length * len(xs) * unit + cuda.elementwise( + 'int32 len, int32 unit, raw int32 off1, raw int32 off2, raw T vs', + 'raw T hs', + ''' + int ind = i / unit; + int off = i - ind * unit; + int y = ind / len; + int x = ind - y * len; + if (off2[x] + y < off2[x + 1]) { + hs[(off2[x] + y) * unit + off] = vs[(off1[y] + x) * unit + off]; + } + ''', + 'transpose_sequence' + )(length, unit, cuda.to_gpu(offsets1), cuda.to_gpu(offsets2), x, o, + size=size) + outs = tuple(xp.split(o, offsets2[1:-1])) return outs
{"golden_diff": "diff --git a/chainer/functions/array/transpose_sequence.py b/chainer/functions/array/transpose_sequence.py\n--- a/chainer/functions/array/transpose_sequence.py\n+++ b/chainer/functions/array/transpose_sequence.py\n@@ -6,17 +6,55 @@\n \n \n def _transpose(xs, length):\n- xp = cuda.get_array_module(*xs)\n- lengths = numpy.zeros(length, dtype='i')\n- for i, x in enumerate(xs):\n- lengths[0:len(x)] = i + 1\n- dtype = xs[0].dtype\n- unit = xs[0].shape[1:]\n- outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths])\n+ if length == 0:\n+ return ()\n \n+ xp = cuda.get_array_module(*xs)\n+ lengths = numpy.empty(length, dtype='i')\n+ end = length\n for i, x in enumerate(xs):\n- for p, xi in enumerate(x):\n- outs[p][i] = xi\n+ lengths[len(x):end] = i\n+ end = len(x)\n+ lengths[0:end] = len(xs)\n+\n+ if xp is numpy:\n+ dtype = xs[0].dtype\n+ unit = xs[0].shape[1:]\n+\n+ outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths])\n+ for i, x in enumerate(xs):\n+ for p, xi in enumerate(x):\n+ outs[p][i] = xi\n+\n+ else:\n+ offsets1 = numpy.empty(len(xs) + 1, dtype='i')\n+ offsets1[0] = 0\n+ numpy.cumsum([len(x) for x in xs], out=offsets1[1:])\n+\n+ offsets2 = numpy.empty(length + 1, dtype='i')\n+ offsets2[0] = 0\n+ numpy.cumsum(lengths, dtype='i', out=offsets2[1:])\n+\n+ x = xp.concatenate(xs, axis=0)\n+ o = xp.empty_like(x)\n+ unit = xs[0].size // len(xs[0])\n+ size = length * len(xs) * unit\n+ cuda.elementwise(\n+ 'int32 len, int32 unit, raw int32 off1, raw int32 off2, raw T vs',\n+ 'raw T hs',\n+ '''\n+ int ind = i / unit;\n+ int off = i - ind * unit;\n+ int y = ind / len;\n+ int x = ind - y * len;\n+ if (off2[x] + y < off2[x + 1]) {\n+ hs[(off2[x] + y) * unit + off] = vs[(off1[y] + x) * unit + off];\n+ }\n+ ''',\n+ 'transpose_sequence'\n+ )(length, unit, cuda.to_gpu(offsets1), cuda.to_gpu(offsets2), x, o,\n+ size=size)\n+ outs = tuple(xp.split(o, offsets2[1:-1]))\n \n return outs\n", "issue": "GPU implementation of transpose_sequence is too slow\nAs @jnory mentioned in #1174, `transpose_sequence` is too slow. We need to make a single kernel to make a transposed matrix.\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\ndef _transpose(xs, length):\n xp = cuda.get_array_module(*xs)\n lengths = numpy.zeros(length, dtype='i')\n for i, x in enumerate(xs):\n lengths[0:len(x)] = i + 1\n dtype = xs[0].dtype\n unit = xs[0].shape[1:]\n outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths])\n\n for i, x in enumerate(xs):\n for p, xi in enumerate(x):\n outs[p][i] = xi\n\n return outs\n\n\nclass TransposeSequence(function.Function):\n\n \"\"\"Function that transposes a list of Variables.\"\"\"\n\n def check_type_forward(self, xs_type):\n for p, n in zip(xs_type, xs_type[1:]):\n type_check.expect(\n p.shape[0] >= n.shape[0],\n p.shape[1:] == n.shape[1:],\n )\n\n def forward(self, xs):\n if len(xs) == 0:\n return ()\n return _transpose(xs, len(xs[0]))\n\n def backward(self, xs, gs):\n return _transpose(gs, len(xs))\n\n\ndef transpose_sequence(xs):\n \"\"\"Transpose a list of Variables.\n\n This function transposes a list of :class:`~chainer.Variable` s and returns\n a list of :class:`Variable` s.\n For exampe a user gives ``[(0, 1, 2, 3), (4, 5), (6)]``, the function\n returns ``[(0, 4, 6), (1, 5), (2), (3)]``.\n Note that a given list needs to be sorted by each length of\n :class:`~chainer.Variable`.\n\n Args:\n xs (list of ~chainer.Variable): Variables to transpose.\n\n Returns:\n tuple or Variable: Transposed list.\n \"\"\"\n ys = TransposeSequence()(*xs)\n if not isinstance(ys, tuple):\n ys = (ys,)\n return ys\n", "path": "chainer/functions/array/transpose_sequence.py"}]}
1,174
695
gh_patches_debug_35027
rasdani/github-patches
git_diff
PokemonGoF__PokemonGo-Bot-4564
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Feature Request] Differentiate Between Unlimited and "Purchased" Incubator use. Blue incubators are difficult to get so I use it only for 10km eggs. Is there any configuration to prevent bot from using Blue Incubator? </issue> <code> [start of pokemongo_bot/cell_workers/incubate_eggs.py] 1 from datetime import datetime, timedelta 2 3 from pokemongo_bot.human_behaviour import sleep 4 from pokemongo_bot.base_task import BaseTask 5 6 7 class IncubateEggs(BaseTask): 8 SUPPORTED_TASK_API_VERSION = 1 9 10 last_km_walked = 0 11 12 def initialize(self): 13 self.next_update = None 14 self.ready_incubators = [] 15 self.used_incubators = [] 16 self.eggs = [] 17 self.km_walked = 0 18 self.hatching_animation_delay = 4.20 19 self.max_iv = 45.0 20 21 self._process_config() 22 23 def _process_config(self): 24 self.longer_eggs_first = self.config.get("longer_eggs_first", True) 25 self.min_interval = self.config.get('min_interval', 120) 26 27 def work(self): 28 try: 29 self._check_inventory() 30 except: 31 return 32 33 if self.used_incubators and IncubateEggs.last_km_walked != self.km_walked: 34 self.used_incubators.sort(key=lambda x: x.get("km")) 35 km_left = self.used_incubators[0]['km']-self.km_walked 36 if km_left <= 0: 37 self._hatch_eggs() 38 else: 39 self.bot.metrics.next_hatching_km(km_left) 40 41 if self._should_print(): 42 self._print_eggs() 43 self._compute_next_update() 44 45 IncubateEggs.last_km_walked = self.km_walked 46 47 sorting = self.longer_eggs_first 48 self.eggs.sort(key=lambda x: x.get("km"), reverse=sorting) 49 50 if self.ready_incubators: 51 self._apply_incubators() 52 53 def _apply_incubators(self): 54 for incubator in self.ready_incubators: 55 if incubator.get('used', False): 56 continue 57 for egg in self.eggs: 58 if egg["used"] or egg["km"] == -1: 59 continue 60 self.emit_event( 61 'incubate_try', 62 level='debug', 63 formatted="Attempting to apply incubator {incubator_id} to egg {egg_id}", 64 data={ 65 'incubator_id': incubator['id'], 66 'egg_id': egg['id'] 67 } 68 ) 69 ret = self.bot.api.use_item_egg_incubator( 70 item_id=incubator["id"], 71 pokemon_id=egg["id"] 72 ) 73 if ret: 74 code = ret.get("responses", {}).get("USE_ITEM_EGG_INCUBATOR", {}).get("result", 0) 75 if code == 1: 76 self.emit_event( 77 'incubate', 78 formatted='Incubating a {distance_in_km} egg.', 79 data={ 80 'distance_in_km': str(egg['km']) 81 } 82 ) 83 egg["used"] = True 84 incubator["used"] = True 85 break 86 elif code == 5 or code == 7: 87 self.emit_event( 88 'incubator_already_used', 89 level='debug', 90 formatted='Incubator in use.', 91 ) 92 incubator["used"] = True 93 break 94 elif code == 6: 95 self.emit_event( 96 'egg_already_incubating', 97 level='debug', 98 formatted='Egg already incubating', 99 ) 100 egg["used"] = True 101 102 def _check_inventory(self, lookup_ids=[]): 103 inv = {} 104 response_dict = self.bot.api.get_inventory() 105 matched_pokemon = [] 106 temp_eggs = [] 107 temp_used_incubators = [] 108 temp_ready_incubators = [] 109 inv = reduce( 110 dict.__getitem__, 111 ["responses", "GET_INVENTORY", "inventory_delta", "inventory_items"], 112 response_dict 113 ) 114 for inv_data in inv: 115 inv_data = inv_data.get("inventory_item_data", {}) 116 if "egg_incubators" in inv_data: 117 temp_used_incubators = [] 118 temp_ready_incubators = [] 119 incubators = inv_data.get("egg_incubators", {}).get("egg_incubator",[]) 120 if isinstance(incubators, basestring): # checking for old response 121 incubators = [incubators] 122 for incubator in incubators: 123 if 'pokemon_id' in incubator: 124 start_km = incubator.get('start_km_walked', 9001) 125 km_walked = incubator.get('target_km_walked', 9001) 126 temp_used_incubators.append({ 127 "id": incubator.get('id', -1), 128 "km": km_walked, 129 "km_needed": (km_walked - start_km) 130 }) 131 else: 132 temp_ready_incubators.append({ 133 "id": incubator.get('id', -1) 134 }) 135 continue 136 if "pokemon_data" in inv_data: 137 pokemon = inv_data.get("pokemon_data", {}) 138 if pokemon.get("is_egg", False) and "egg_incubator_id" not in pokemon: 139 temp_eggs.append({ 140 "id": pokemon.get("id", -1), 141 "km": pokemon.get("egg_km_walked_target", -1), 142 "used": False 143 }) 144 elif 'is_egg' not in pokemon and pokemon['id'] in lookup_ids: 145 pokemon.update({ 146 "iv": [ 147 pokemon.get('individual_attack', 0), 148 pokemon.get('individual_defense', 0), 149 pokemon.get('individual_stamina', 0) 150 ]}) 151 matched_pokemon.append(pokemon) 152 continue 153 if "player_stats" in inv_data: 154 self.km_walked = inv_data.get("player_stats", {}).get("km_walked", 0) 155 if temp_used_incubators: 156 self.used_incubators = temp_used_incubators 157 if temp_ready_incubators: 158 self.ready_incubators = temp_ready_incubators 159 if temp_eggs: 160 self.eggs = temp_eggs 161 return matched_pokemon 162 163 def _hatch_eggs(self): 164 response_dict = self.bot.api.get_hatched_eggs() 165 log_color = 'green' 166 try: 167 result = reduce(dict.__getitem__, ["responses", "GET_HATCHED_EGGS"], response_dict) 168 except KeyError: 169 return 170 pokemon_ids = [] 171 if 'pokemon_id' in result: 172 pokemon_ids = [id for id in result['pokemon_id']] 173 stardust = result.get('stardust_awarded', "error") 174 candy = result.get('candy_awarded', "error") 175 xp = result.get('experience_awarded', "error") 176 sleep(self.hatching_animation_delay) 177 self.bot.latest_inventory = None 178 try: 179 pokemon_data = self._check_inventory(pokemon_ids) 180 for pokemon in pokemon_data: 181 # pokemon ids seem to be offset by one 182 if pokemon['pokemon_id']!=-1: 183 pokemon['name'] = self.bot.pokemon_list[(pokemon.get('pokemon_id')-1)]['Name'] 184 else: 185 pokemon['name'] = "error" 186 except: 187 pokemon_data = [{"name":"error","cp":"error","iv":"error"}] 188 if not pokemon_ids or pokemon_data[0]['name'] == "error": 189 self.emit_event( 190 'egg_hatched', 191 data={ 192 'pokemon': 'error', 193 'cp': 'error', 194 'iv': 'error', 195 'exp': 'error', 196 'stardust': 'error', 197 'candy': 'error', 198 } 199 ) 200 return 201 for i in range(len(pokemon_data)): 202 msg = "Egg hatched with a {pokemon} (CP {cp} - IV {iv}), {exp} exp, {stardust} stardust and {candy} candies." 203 self.bot.metrics.hatched_eggs(1) 204 self.emit_event( 205 'egg_hatched', 206 formatted=msg, 207 data={ 208 'pokemon': pokemon_data[i]['name'], 209 'cp': pokemon_data[i]['cp'], 210 'iv': "{} {}".format( 211 "/".join(map(str, pokemon_data[i]['iv'])), 212 round(sum(pokemon_data[i]['iv'])/self.max_iv, 2) 213 ), 214 'exp': xp[i], 215 'stardust': stardust[i], 216 'candy': candy[i], 217 } 218 ) 219 220 def _print_eggs(self): 221 if not self.used_incubators: 222 return 223 224 self.used_incubators.sort(key=lambda x: x.get("km")) 225 226 eggs = ['{:.2f}/{} km'.format(e['km_needed']-e['km']+self.km_walked, e['km_needed']) for e in self.used_incubators] 227 228 self.emit_event( 229 'next_egg_incubates', 230 formatted='Eggs incubating: [{eggs}] (Eggs left: {eggs_left}, Incubating: {eggs_inc})', 231 data={ 232 'eggs_left': len(self.eggs), 233 'eggs_inc': len(self.used_incubators), 234 'eggs': ', '.join(eggs) 235 } 236 ) 237 238 def _should_print(self): 239 """ 240 Returns a value indicating whether the eggs should be displayed. 241 :return: True if the stats should be displayed; otherwise, False. 242 :rtype: bool 243 """ 244 return self.next_update is None or datetime.now() >= self.next_update 245 246 def _compute_next_update(self): 247 """ 248 Computes the next update datetime based on the minimum update interval. 249 :return: Nothing. 250 :rtype: None 251 """ 252 self.next_update = datetime.now() + timedelta(seconds=self.min_interval) [end of pokemongo_bot/cell_workers/incubate_eggs.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pokemongo_bot/cell_workers/incubate_eggs.py b/pokemongo_bot/cell_workers/incubate_eggs.py --- a/pokemongo_bot/cell_workers/incubate_eggs.py +++ b/pokemongo_bot/cell_workers/incubate_eggs.py @@ -23,7 +23,10 @@ def _process_config(self): self.longer_eggs_first = self.config.get("longer_eggs_first", True) self.min_interval = self.config.get('min_interval', 120) - + + self.breakable_incubator = self.config.get("breakable", []) + self.infinite_incubator = self.config.get("infinite", []) + def work(self): try: self._check_inventory() @@ -57,6 +60,19 @@ for egg in self.eggs: if egg["used"] or egg["km"] == -1: continue + + if self.breakable_incubator: + # test if the incubator is of type breakable + if incubator.get('uses_remaining') is not None: + if egg["km"] not in self.breakable_incubator: + continue + + if self.infinite_incubator: + # test if the incubator is of type infinite + if incubator.get('uses_remaining') is None: + if egg["km"] not in self.infinite_incubator: + continue + self.emit_event( 'incubate_try', level='debug', @@ -119,7 +135,7 @@ incubators = inv_data.get("egg_incubators", {}).get("egg_incubator",[]) if isinstance(incubators, basestring): # checking for old response incubators = [incubators] - for incubator in incubators: + for incubator in incubators: if 'pokemon_id' in incubator: start_km = incubator.get('start_km_walked', 9001) km_walked = incubator.get('target_km_walked', 9001)
{"golden_diff": "diff --git a/pokemongo_bot/cell_workers/incubate_eggs.py b/pokemongo_bot/cell_workers/incubate_eggs.py\n--- a/pokemongo_bot/cell_workers/incubate_eggs.py\n+++ b/pokemongo_bot/cell_workers/incubate_eggs.py\n@@ -23,7 +23,10 @@\n def _process_config(self):\n self.longer_eggs_first = self.config.get(\"longer_eggs_first\", True)\n self.min_interval = self.config.get('min_interval', 120)\n-\n+ \n+ self.breakable_incubator = self.config.get(\"breakable\", [])\n+ self.infinite_incubator = self.config.get(\"infinite\", [])\n+ \n def work(self):\n try:\n self._check_inventory()\n@@ -57,6 +60,19 @@\n for egg in self.eggs:\n if egg[\"used\"] or egg[\"km\"] == -1:\n continue\n+ \n+ if self.breakable_incubator:\n+ # test if the incubator is of type breakable\n+ if incubator.get('uses_remaining') is not None:\n+ if egg[\"km\"] not in self.breakable_incubator:\n+ continue\n+ \n+ if self.infinite_incubator:\n+ # test if the incubator is of type infinite\n+ if incubator.get('uses_remaining') is None:\n+ if egg[\"km\"] not in self.infinite_incubator:\n+ continue\n+ \n self.emit_event(\n 'incubate_try',\n level='debug',\n@@ -119,7 +135,7 @@\n incubators = inv_data.get(\"egg_incubators\", {}).get(\"egg_incubator\",[])\n if isinstance(incubators, basestring): # checking for old response\n incubators = [incubators]\n- for incubator in incubators:\n+ for incubator in incubators: \n if 'pokemon_id' in incubator:\n start_km = incubator.get('start_km_walked', 9001)\n km_walked = incubator.get('target_km_walked', 9001)\n", "issue": "[Feature Request] Differentiate Between Unlimited and \"Purchased\" Incubator use.\nBlue incubators are difficult to get so I use it only for 10km eggs. Is there any configuration to prevent bot from using Blue Incubator?\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\n\nfrom pokemongo_bot.human_behaviour import sleep\nfrom pokemongo_bot.base_task import BaseTask\n\n\nclass IncubateEggs(BaseTask):\n SUPPORTED_TASK_API_VERSION = 1\n\n last_km_walked = 0\n\n def initialize(self):\n self.next_update = None\n self.ready_incubators = []\n self.used_incubators = []\n self.eggs = []\n self.km_walked = 0\n self.hatching_animation_delay = 4.20\n self.max_iv = 45.0\n\n self._process_config()\n\n def _process_config(self):\n self.longer_eggs_first = self.config.get(\"longer_eggs_first\", True)\n self.min_interval = self.config.get('min_interval', 120)\n\n def work(self):\n try:\n self._check_inventory()\n except:\n return\n\n if self.used_incubators and IncubateEggs.last_km_walked != self.km_walked:\n self.used_incubators.sort(key=lambda x: x.get(\"km\"))\n km_left = self.used_incubators[0]['km']-self.km_walked\n if km_left <= 0:\n self._hatch_eggs()\n else:\n self.bot.metrics.next_hatching_km(km_left)\n\n if self._should_print():\n self._print_eggs()\n self._compute_next_update()\n\n IncubateEggs.last_km_walked = self.km_walked\n\n sorting = self.longer_eggs_first\n self.eggs.sort(key=lambda x: x.get(\"km\"), reverse=sorting)\n\n if self.ready_incubators:\n self._apply_incubators()\n\n def _apply_incubators(self):\n for incubator in self.ready_incubators:\n if incubator.get('used', False):\n continue\n for egg in self.eggs:\n if egg[\"used\"] or egg[\"km\"] == -1:\n continue\n self.emit_event(\n 'incubate_try',\n level='debug',\n formatted=\"Attempting to apply incubator {incubator_id} to egg {egg_id}\",\n data={\n 'incubator_id': incubator['id'],\n 'egg_id': egg['id']\n }\n )\n ret = self.bot.api.use_item_egg_incubator(\n item_id=incubator[\"id\"],\n pokemon_id=egg[\"id\"]\n )\n if ret:\n code = ret.get(\"responses\", {}).get(\"USE_ITEM_EGG_INCUBATOR\", {}).get(\"result\", 0)\n if code == 1:\n self.emit_event(\n 'incubate',\n formatted='Incubating a {distance_in_km} egg.',\n data={\n 'distance_in_km': str(egg['km'])\n }\n )\n egg[\"used\"] = True\n incubator[\"used\"] = True\n break\n elif code == 5 or code == 7:\n self.emit_event(\n 'incubator_already_used',\n level='debug',\n formatted='Incubator in use.',\n )\n incubator[\"used\"] = True\n break\n elif code == 6:\n self.emit_event(\n 'egg_already_incubating',\n level='debug',\n formatted='Egg already incubating',\n )\n egg[\"used\"] = True\n\n def _check_inventory(self, lookup_ids=[]):\n inv = {}\n response_dict = self.bot.api.get_inventory()\n matched_pokemon = []\n temp_eggs = []\n temp_used_incubators = []\n temp_ready_incubators = []\n inv = reduce(\n dict.__getitem__,\n [\"responses\", \"GET_INVENTORY\", \"inventory_delta\", \"inventory_items\"],\n response_dict\n )\n for inv_data in inv:\n inv_data = inv_data.get(\"inventory_item_data\", {})\n if \"egg_incubators\" in inv_data:\n temp_used_incubators = []\n temp_ready_incubators = []\n incubators = inv_data.get(\"egg_incubators\", {}).get(\"egg_incubator\",[])\n if isinstance(incubators, basestring): # checking for old response\n incubators = [incubators]\n for incubator in incubators:\n if 'pokemon_id' in incubator:\n start_km = incubator.get('start_km_walked', 9001)\n km_walked = incubator.get('target_km_walked', 9001)\n temp_used_incubators.append({\n \"id\": incubator.get('id', -1),\n \"km\": km_walked,\n \"km_needed\": (km_walked - start_km)\n })\n else:\n temp_ready_incubators.append({\n \"id\": incubator.get('id', -1)\n })\n continue\n if \"pokemon_data\" in inv_data:\n pokemon = inv_data.get(\"pokemon_data\", {})\n if pokemon.get(\"is_egg\", False) and \"egg_incubator_id\" not in pokemon:\n temp_eggs.append({\n \"id\": pokemon.get(\"id\", -1),\n \"km\": pokemon.get(\"egg_km_walked_target\", -1),\n \"used\": False\n })\n elif 'is_egg' not in pokemon and pokemon['id'] in lookup_ids:\n pokemon.update({\n \"iv\": [\n pokemon.get('individual_attack', 0),\n pokemon.get('individual_defense', 0),\n pokemon.get('individual_stamina', 0)\n ]})\n matched_pokemon.append(pokemon)\n continue\n if \"player_stats\" in inv_data:\n self.km_walked = inv_data.get(\"player_stats\", {}).get(\"km_walked\", 0)\n if temp_used_incubators:\n self.used_incubators = temp_used_incubators\n if temp_ready_incubators:\n self.ready_incubators = temp_ready_incubators\n if temp_eggs:\n self.eggs = temp_eggs\n return matched_pokemon\n\n def _hatch_eggs(self):\n response_dict = self.bot.api.get_hatched_eggs()\n log_color = 'green'\n try:\n result = reduce(dict.__getitem__, [\"responses\", \"GET_HATCHED_EGGS\"], response_dict)\n except KeyError:\n return\n pokemon_ids = []\n if 'pokemon_id' in result:\n pokemon_ids = [id for id in result['pokemon_id']]\n stardust = result.get('stardust_awarded', \"error\")\n candy = result.get('candy_awarded', \"error\")\n xp = result.get('experience_awarded', \"error\")\n sleep(self.hatching_animation_delay)\n self.bot.latest_inventory = None\n try:\n pokemon_data = self._check_inventory(pokemon_ids)\n for pokemon in pokemon_data:\n # pokemon ids seem to be offset by one\n if pokemon['pokemon_id']!=-1:\n pokemon['name'] = self.bot.pokemon_list[(pokemon.get('pokemon_id')-1)]['Name']\n else:\n pokemon['name'] = \"error\"\n except:\n pokemon_data = [{\"name\":\"error\",\"cp\":\"error\",\"iv\":\"error\"}]\n if not pokemon_ids or pokemon_data[0]['name'] == \"error\":\n self.emit_event(\n 'egg_hatched',\n data={\n 'pokemon': 'error',\n 'cp': 'error',\n 'iv': 'error',\n 'exp': 'error',\n 'stardust': 'error',\n 'candy': 'error',\n }\n )\n return\n for i in range(len(pokemon_data)):\n msg = \"Egg hatched with a {pokemon} (CP {cp} - IV {iv}), {exp} exp, {stardust} stardust and {candy} candies.\"\n self.bot.metrics.hatched_eggs(1)\n self.emit_event(\n 'egg_hatched',\n formatted=msg,\n data={\n 'pokemon': pokemon_data[i]['name'],\n 'cp': pokemon_data[i]['cp'],\n 'iv': \"{} {}\".format(\n \"/\".join(map(str, pokemon_data[i]['iv'])),\n round(sum(pokemon_data[i]['iv'])/self.max_iv, 2)\n ),\n 'exp': xp[i],\n 'stardust': stardust[i],\n 'candy': candy[i],\n }\n )\n\n def _print_eggs(self):\n if not self.used_incubators:\n return\n\n self.used_incubators.sort(key=lambda x: x.get(\"km\"))\n \n eggs = ['{:.2f}/{} km'.format(e['km_needed']-e['km']+self.km_walked, e['km_needed']) for e in self.used_incubators]\n\n self.emit_event(\n 'next_egg_incubates',\n formatted='Eggs incubating: [{eggs}] (Eggs left: {eggs_left}, Incubating: {eggs_inc})',\n data={\n 'eggs_left': len(self.eggs),\n 'eggs_inc': len(self.used_incubators),\n 'eggs': ', '.join(eggs)\n }\n )\n \n def _should_print(self):\n \"\"\"\n Returns a value indicating whether the eggs should be displayed.\n :return: True if the stats should be displayed; otherwise, False.\n :rtype: bool\n \"\"\"\n return self.next_update is None or datetime.now() >= self.next_update\n\n def _compute_next_update(self):\n \"\"\"\n Computes the next update datetime based on the minimum update interval.\n :return: Nothing.\n :rtype: None\n \"\"\"\n self.next_update = datetime.now() + timedelta(seconds=self.min_interval)", "path": "pokemongo_bot/cell_workers/incubate_eggs.py"}]}
3,366
487
gh_patches_debug_29272
rasdani/github-patches
git_diff
acl-org__acl-anthology-1868
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Correction to Anthology ID 2021.paclic-1 In PACLIC 2021 proceedings (https://aclanthology.org/volumes/2021.paclic-1/), edit the indexed name for "Joseph Marvin R. Imperial" of paper 26 (https://aclanthology.org/2021.paclic-1.26/) to "Joseph Marvin Imperial" (no R.) to combine previously indexed papers with one author below: https://aclanthology.org/people/j/joseph-marvin-imperial/ </issue> <code> [start of bin/retract_paper.py] 1 #! /usr/bin/env python3 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright 2020 Matt Post <[email protected]> 5 # 6 # Licensed under the Apache License, Version 2.0 (the "License"); 7 # you may not use this file except in compliance with the License. 8 # You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, software 13 # distributed under the License is distributed on an "AS IS" BASIS, 14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 # See the License for the specific language governing permissions and 16 # limitations under the License. 17 18 """ 19 Takes an Anthology ID, downloads the PDF, and produces a revision PDF 20 with a "RETRACTED" watermark, as well as a note at the top pointing 21 to the paper page. Also revises the XML. 22 """ 23 24 import argparse 25 import os 26 import shutil 27 import subprocess 28 import sys 29 import tempfile 30 31 from string import Template 32 33 from anthology.utils import ( 34 retrieve_url, 35 deconstruct_anthology_id, 36 make_simple_element, 37 get_xml_file, 38 indent, 39 ) 40 from anthology.data import CANONICAL_URL_TEMPLATE, PDF_LOCATION_TEMPLATE 41 from add_revision import add_revision 42 43 from datetime import datetime 44 45 import lxml.etree as ET 46 47 template = Template( 48 r"""\documentclass{article} 49 \usepackage[printwatermark]{xwatermark} 50 \usepackage{xcolor} 51 \usepackage{graphicx} 52 \usepackage{pdfpages} 53 \usepackage{hyperref} 54 \hypersetup{plainpages=false, 55 pdfpagemode=none, 56 colorlinks=true, 57 unicode=true 58 } 59 60 % "allpages" didn't work 61 \newwatermark[pages=1-1000,color=red!80,angle=45,scale=3,xpos=-6,ypos=0]{RETRACTED} 62 63 % set A4 64 \setlength{\paperwidth}{21cm} 65 \setlength{\paperheight}{29.7cm} 66 67 \special{papersize=21cm,29.7cm} 68 \pdfpageheight\paperheight 69 \pdfpagewidth\paperwidth 70 \pagestyle{plain} 71 72 \begin{document} 73 74 \AddToShipoutPicture{% 75 \setlength{\unitlength}{1mm} 76 % center box at (x, y) millimeters from bottom-left corner 77 \put(105,290){\makebox(0,0){This paper was retracted. For more information, see \url{$url}.}} 78 } 79 80 \includepdf[pages=-]{$file} 81 82 \end{document}""" 83 ) 84 85 86 def add_watermark(anth_id, workdir="."): 87 """ 88 Downloads an Anthology paper and adds a RETRACTED watermark. 89 """ 90 page = CANONICAL_URL_TEMPLATE.format(anth_id) 91 url = PDF_LOCATION_TEMPLATE.format(anth_id) 92 orig_pdf = os.path.join(workdir, "tmp.pdf") 93 94 retrieve_url(url, orig_pdf) 95 96 tex_file = os.path.join(workdir, f"{anth_id}.tex") 97 print("TEX_FILE", tex_file) 98 with open(tex_file, "w") as f: 99 print(template.substitute(file=orig_pdf, url=page), file=f) 100 101 command = f"pdflatex {tex_file}" 102 try: 103 subprocess.call( 104 command, shell=True, cwd=workdir, stdout=subprocess.DEVNULL, timeout=60 105 ) 106 except TimeoutExpired: 107 print( 108 "pdflatex didn't finish within 60 seconds. Do you have the CTAN watermark package installed?", 109 file=sys.stderr, 110 ) 111 sys.exit(1) 112 113 new_pdf = f"{tex_file}".replace(".tex", ".pdf") 114 115 return new_pdf 116 117 118 def main(args): 119 """ 120 Downloads an Anthology paper and adds a RETRACTED watermark, then updates the XML 121 with an appropriate <revision> and <retracted> tag. 122 """ 123 124 with tempfile.TemporaryDirectory() as tempdir: 125 126 new_pdf = add_watermark(args.anthology_id, workdir=tempdir) 127 128 add_revision( 129 args.anthology_id, 130 new_pdf, 131 explanation="Retracted.", 132 change_type="revision", 133 dry_run=False, 134 ) 135 136 xml_file = get_xml_file(args.anthology_id) 137 collection_id, volume_id, paper_id = deconstruct_anthology_id(args.anthology_id) 138 tree = ET.parse(xml_file) 139 if paper_id == "0": 140 paper = tree.getroot().find(f"./volume[@id='{volume_id}']/frontmatter") 141 else: 142 paper = tree.getroot().find( 143 f"./volume[@id='{volume_id}']/paper[@id='{paper_id}']" 144 ) 145 146 if paper is None: 147 print(f"Couldn't find paper {args.anthology_id}!", file=sys.stderr) 148 sys.exit(2) 149 150 print("Modifying the XML", file=sys.stderr) 151 now = datetime.now() 152 date = f"{now.year}-{now.month:02d}-{now.day:02d}" 153 retracted_node = make_simple_element( 154 "retracted", args.explanation, attrib={"date": date}, parent=paper 155 ) 156 indent(tree.getroot()) 157 tree.write(xml_file, encoding="UTF-8", xml_declaration=True) 158 159 160 if __name__ == "__main__": 161 parser = argparse.ArgumentParser() 162 parser.add_argument("anthology_id") 163 parser.add_argument("explanation", help="Brief description of the changes.") 164 args = parser.parse_args() 165 166 main(args) 167 [end of bin/retract_paper.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bin/retract_paper.py b/bin/retract_paper.py --- a/bin/retract_paper.py +++ b/bin/retract_paper.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python3 +#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright 2020 Matt Post <[email protected]> @@ -46,20 +46,17 @@ template = Template( r"""\documentclass{article} -\usepackage[printwatermark]{xwatermark} +\usepackage[text=RETRACTED,scale=3,color=red]{draftwatermark} \usepackage{xcolor} \usepackage{graphicx} \usepackage{pdfpages} \usepackage{hyperref} \hypersetup{plainpages=false, - pdfpagemode=none, + pdfpagemode=UseNone, colorlinks=true, unicode=true } -% "allpages" didn't work -\newwatermark[pages=1-1000,color=red!80,angle=45,scale=3,xpos=-6,ypos=0]{RETRACTED} - % set A4 \setlength{\paperwidth}{21cm} \setlength{\paperheight}{29.7cm} @@ -101,11 +98,11 @@ command = f"pdflatex {tex_file}" try: subprocess.call( - command, shell=True, cwd=workdir, stdout=subprocess.DEVNULL, timeout=60 + command, shell=True, cwd=workdir, stdout=subprocess.DEVNULL, timeout=30 ) - except TimeoutExpired: + except subprocess.TimeoutExpired: print( - "pdflatex didn't finish within 60 seconds. Do you have the CTAN watermark package installed?", + "pdflatex didn't finish within 30 seconds. Do you have the CTAN watermark package installed?", file=sys.stderr, ) sys.exit(1)
{"golden_diff": "diff --git a/bin/retract_paper.py b/bin/retract_paper.py\n--- a/bin/retract_paper.py\n+++ b/bin/retract_paper.py\n@@ -1,4 +1,4 @@\n-#! /usr/bin/env python3\n+#!/usr/bin/env python3\n # -*- coding: utf-8 -*-\n #\n # Copyright 2020 Matt Post <[email protected]>\n@@ -46,20 +46,17 @@\n \n template = Template(\n r\"\"\"\\documentclass{article}\n-\\usepackage[printwatermark]{xwatermark}\n+\\usepackage[text=RETRACTED,scale=3,color=red]{draftwatermark}\n \\usepackage{xcolor}\n \\usepackage{graphicx}\n \\usepackage{pdfpages}\n \\usepackage{hyperref}\n \\hypersetup{plainpages=false,\n- pdfpagemode=none,\n+ pdfpagemode=UseNone,\n colorlinks=true,\n unicode=true\n }\n \n-% \"allpages\" didn't work\n-\\newwatermark[pages=1-1000,color=red!80,angle=45,scale=3,xpos=-6,ypos=0]{RETRACTED}\n-\n % set A4\n \\setlength{\\paperwidth}{21cm}\n \\setlength{\\paperheight}{29.7cm}\n@@ -101,11 +98,11 @@\n command = f\"pdflatex {tex_file}\"\n try:\n subprocess.call(\n- command, shell=True, cwd=workdir, stdout=subprocess.DEVNULL, timeout=60\n+ command, shell=True, cwd=workdir, stdout=subprocess.DEVNULL, timeout=30\n )\n- except TimeoutExpired:\n+ except subprocess.TimeoutExpired:\n print(\n- \"pdflatex didn't finish within 60 seconds. Do you have the CTAN watermark package installed?\",\n+ \"pdflatex didn't finish within 30 seconds. Do you have the CTAN watermark package installed?\",\n file=sys.stderr,\n )\n sys.exit(1)\n", "issue": "Correction to Anthology ID 2021.paclic-1\nIn PACLIC 2021 proceedings (https://aclanthology.org/volumes/2021.paclic-1/), edit the indexed name for \"Joseph Marvin R. Imperial\" of paper 26 (https://aclanthology.org/2021.paclic-1.26/) to \"Joseph Marvin Imperial\" (no R.) to combine previously indexed papers with one author below:\r\n\r\nhttps://aclanthology.org/people/j/joseph-marvin-imperial/\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2020 Matt Post <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nTakes an Anthology ID, downloads the PDF, and produces a revision PDF\nwith a \"RETRACTED\" watermark, as well as a note at the top pointing\nto the paper page. Also revises the XML.\n\"\"\"\n\nimport argparse\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nfrom string import Template\n\nfrom anthology.utils import (\n retrieve_url,\n deconstruct_anthology_id,\n make_simple_element,\n get_xml_file,\n indent,\n)\nfrom anthology.data import CANONICAL_URL_TEMPLATE, PDF_LOCATION_TEMPLATE\nfrom add_revision import add_revision\n\nfrom datetime import datetime\n\nimport lxml.etree as ET\n\ntemplate = Template(\n r\"\"\"\\documentclass{article}\n\\usepackage[printwatermark]{xwatermark}\n\\usepackage{xcolor}\n\\usepackage{graphicx}\n\\usepackage{pdfpages}\n\\usepackage{hyperref}\n\\hypersetup{plainpages=false,\n pdfpagemode=none,\n colorlinks=true,\n unicode=true\n}\n\n% \"allpages\" didn't work\n\\newwatermark[pages=1-1000,color=red!80,angle=45,scale=3,xpos=-6,ypos=0]{RETRACTED}\n\n% set A4\n\\setlength{\\paperwidth}{21cm}\n\\setlength{\\paperheight}{29.7cm}\n\n\\special{papersize=21cm,29.7cm}\n\\pdfpageheight\\paperheight\n\\pdfpagewidth\\paperwidth\n\\pagestyle{plain}\n\n\\begin{document}\n\n\\AddToShipoutPicture{%\n \\setlength{\\unitlength}{1mm}\n % center box at (x, y) millimeters from bottom-left corner\n \\put(105,290){\\makebox(0,0){This paper was retracted. For more information, see \\url{$url}.}}\n}\n\n\\includepdf[pages=-]{$file}\n\n\\end{document}\"\"\"\n)\n\n\ndef add_watermark(anth_id, workdir=\".\"):\n \"\"\"\n Downloads an Anthology paper and adds a RETRACTED watermark.\n \"\"\"\n page = CANONICAL_URL_TEMPLATE.format(anth_id)\n url = PDF_LOCATION_TEMPLATE.format(anth_id)\n orig_pdf = os.path.join(workdir, \"tmp.pdf\")\n\n retrieve_url(url, orig_pdf)\n\n tex_file = os.path.join(workdir, f\"{anth_id}.tex\")\n print(\"TEX_FILE\", tex_file)\n with open(tex_file, \"w\") as f:\n print(template.substitute(file=orig_pdf, url=page), file=f)\n\n command = f\"pdflatex {tex_file}\"\n try:\n subprocess.call(\n command, shell=True, cwd=workdir, stdout=subprocess.DEVNULL, timeout=60\n )\n except TimeoutExpired:\n print(\n \"pdflatex didn't finish within 60 seconds. Do you have the CTAN watermark package installed?\",\n file=sys.stderr,\n )\n sys.exit(1)\n\n new_pdf = f\"{tex_file}\".replace(\".tex\", \".pdf\")\n\n return new_pdf\n\n\ndef main(args):\n \"\"\"\n Downloads an Anthology paper and adds a RETRACTED watermark, then updates the XML\n with an appropriate <revision> and <retracted> tag.\n \"\"\"\n\n with tempfile.TemporaryDirectory() as tempdir:\n\n new_pdf = add_watermark(args.anthology_id, workdir=tempdir)\n\n add_revision(\n args.anthology_id,\n new_pdf,\n explanation=\"Retracted.\",\n change_type=\"revision\",\n dry_run=False,\n )\n\n xml_file = get_xml_file(args.anthology_id)\n collection_id, volume_id, paper_id = deconstruct_anthology_id(args.anthology_id)\n tree = ET.parse(xml_file)\n if paper_id == \"0\":\n paper = tree.getroot().find(f\"./volume[@id='{volume_id}']/frontmatter\")\n else:\n paper = tree.getroot().find(\n f\"./volume[@id='{volume_id}']/paper[@id='{paper_id}']\"\n )\n\n if paper is None:\n print(f\"Couldn't find paper {args.anthology_id}!\", file=sys.stderr)\n sys.exit(2)\n\n print(\"Modifying the XML\", file=sys.stderr)\n now = datetime.now()\n date = f\"{now.year}-{now.month:02d}-{now.day:02d}\"\n retracted_node = make_simple_element(\n \"retracted\", args.explanation, attrib={\"date\": date}, parent=paper\n )\n indent(tree.getroot())\n tree.write(xml_file, encoding=\"UTF-8\", xml_declaration=True)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"anthology_id\")\n parser.add_argument(\"explanation\", help=\"Brief description of the changes.\")\n args = parser.parse_args()\n\n main(args)\n", "path": "bin/retract_paper.py"}]}
2,291
452
gh_patches_debug_622
rasdani/github-patches
git_diff
pex-tool__pex-1859
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.100 On the docket: + [x] Using --target-system linux --target-system mac can still lead to failed attempts to lock Windows requirements. #1856 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.99" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.99" +__version__ = "2.1.100"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.99\"\n+__version__ = \"2.1.100\"\n", "issue": "Release 2.1.100\nOn the docket:\r\n+ [x] Using --target-system linux --target-system mac can still lead to failed attempts to lock Windows requirements. #1856\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.99\"\n", "path": "pex/version.py"}]}
627
97
gh_patches_debug_16027
rasdani/github-patches
git_diff
SigmaHQ__sigma-1278
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update sigma2attack to the latest navigator version the data generated for navigator is not up to date. ![Clipboard - 5 novembre 2020 15_38](https://user-images.githubusercontent.com/1626464/98255016-3675e080-1f7d-11eb-8490-321a7a053c04.png) specs on [att&ck navigator github](https://github.com/mitre-attack/attack-navigator/blob/master/layers/LAYERFORMATv4.md) </issue> <code> [start of tools/sigma/sigma2attack.py] 1 #!/usr/bin/env python3 2 3 import argparse 4 import glob 5 import json 6 import os 7 import sys 8 9 import yaml 10 11 def main(): 12 parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) 13 parser.add_argument("--rules-directory", "-d", dest="rules_dir", default="rules", help="Directory to read rules from") 14 parser.add_argument("--out-file", "-o", dest="out_file", default="heatmap.json", help="File to write the JSON layer to") 15 parser.add_argument("--no-comment", dest="no_comment", action="store_true", help="Don't store rule names in comments") 16 args = parser.parse_args() 17 18 rule_files = glob.glob(os.path.join(args.rules_dir, "**/*.yml"), recursive=True) 19 techniques_to_rules = {} 20 curr_max_technique_count = 0 21 num_rules_used = 0 22 for rule_file in rule_files: 23 try: 24 rule = yaml.safe_load(open(rule_file).read()) 25 except yaml.YAMLError: 26 sys.stderr.write("Ignoring rule " + rule_file + " (parsing failed)\n") 27 continue 28 if "tags" not in rule: 29 sys.stderr.write("Ignoring rule " + rule_file + " (no tags)\n") 30 continue 31 tags = rule["tags"] 32 for tag in tags: 33 if tag.lower().startswith("attack.t"): 34 technique_id = tag[len("attack."):].upper() 35 num_rules_used += 1 36 if technique_id not in techniques_to_rules: 37 techniques_to_rules[technique_id] = [] 38 techniques_to_rules[technique_id].append(os.path.basename(rule_file)) 39 curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id])) 40 41 42 scores = [] 43 for technique in techniques_to_rules: 44 entry = { 45 "techniqueID": technique, 46 "score": len(techniques_to_rules[technique]), 47 } 48 if not args.no_comment: 49 entry["comment"] = "\n".join(techniques_to_rules[technique]) 50 51 scores.append(entry) 52 53 output = { 54 "domain": "mitre-enterprise", 55 "name": "Sigma rules heatmap", 56 "gradient": { 57 "colors": [ 58 "#ffffff", 59 "#ff6666" 60 ], 61 "maxValue": curr_max_technique_count, 62 "minValue": 0 63 }, 64 "version": "2.2", 65 "techniques": scores, 66 } 67 68 with open(args.out_file, "w") as f: 69 f.write(json.dumps(output)) 70 print("[*] Layer file written in " + args.out_file + " (" + str(num_rules_used) + " rules)") 71 72 if __name__ == "__main__": 73 main() 74 [end of tools/sigma/sigma2attack.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tools/sigma/sigma2attack.py b/tools/sigma/sigma2attack.py --- a/tools/sigma/sigma2attack.py +++ b/tools/sigma/sigma2attack.py @@ -21,7 +21,7 @@ num_rules_used = 0 for rule_file in rule_files: try: - rule = yaml.safe_load(open(rule_file).read()) + rule = yaml.safe_load(open(rule_file, encoding="utf-8").read()) except yaml.YAMLError: sys.stderr.write("Ignoring rule " + rule_file + " (parsing failed)\n") continue @@ -61,7 +61,10 @@ "maxValue": curr_max_technique_count, "minValue": 0 }, - "version": "2.2", + "versions": { + "navigator": "4.0", + "layer": "4.0" + }, "techniques": scores, }
{"golden_diff": "diff --git a/tools/sigma/sigma2attack.py b/tools/sigma/sigma2attack.py\n--- a/tools/sigma/sigma2attack.py\n+++ b/tools/sigma/sigma2attack.py\n@@ -21,7 +21,7 @@\n num_rules_used = 0\n for rule_file in rule_files:\n try:\n- rule = yaml.safe_load(open(rule_file).read())\n+ rule = yaml.safe_load(open(rule_file, encoding=\"utf-8\").read())\n except yaml.YAMLError:\n sys.stderr.write(\"Ignoring rule \" + rule_file + \" (parsing failed)\\n\")\n continue\n@@ -61,7 +61,10 @@\n \"maxValue\": curr_max_technique_count,\n \"minValue\": 0\n },\n- \"version\": \"2.2\", \n+ \"versions\": {\n+ \"navigator\": \"4.0\",\n+ \"layer\": \"4.0\"\n+ },\n \"techniques\": scores,\n }\n", "issue": "Update sigma2attack to the latest navigator version\nthe data generated for navigator is not up to date.\r\n\r\n![Clipboard - 5 novembre 2020 15_38](https://user-images.githubusercontent.com/1626464/98255016-3675e080-1f7d-11eb-8490-321a7a053c04.png)\r\n\r\nspecs on [att&ck navigator github](https://github.com/mitre-attack/attack-navigator/blob/master/layers/LAYERFORMATv4.md)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\nimport glob\nimport json\nimport os\nimport sys\n\nimport yaml\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--rules-directory\", \"-d\", dest=\"rules_dir\", default=\"rules\", help=\"Directory to read rules from\")\n parser.add_argument(\"--out-file\", \"-o\", dest=\"out_file\", default=\"heatmap.json\", help=\"File to write the JSON layer to\")\n parser.add_argument(\"--no-comment\", dest=\"no_comment\", action=\"store_true\", help=\"Don't store rule names in comments\")\n args = parser.parse_args()\n\n rule_files = glob.glob(os.path.join(args.rules_dir, \"**/*.yml\"), recursive=True)\n techniques_to_rules = {}\n curr_max_technique_count = 0\n num_rules_used = 0\n for rule_file in rule_files:\n try:\n rule = yaml.safe_load(open(rule_file).read())\n except yaml.YAMLError:\n sys.stderr.write(\"Ignoring rule \" + rule_file + \" (parsing failed)\\n\")\n continue\n if \"tags\" not in rule:\n sys.stderr.write(\"Ignoring rule \" + rule_file + \" (no tags)\\n\")\n continue\n tags = rule[\"tags\"]\n for tag in tags:\n if tag.lower().startswith(\"attack.t\"):\n technique_id = tag[len(\"attack.\"):].upper()\n num_rules_used += 1\n if technique_id not in techniques_to_rules:\n techniques_to_rules[technique_id] = []\n techniques_to_rules[technique_id].append(os.path.basename(rule_file))\n curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id]))\n\n\n scores = []\n for technique in techniques_to_rules:\n entry = {\n \"techniqueID\": technique, \n \"score\": len(techniques_to_rules[technique]), \n }\n if not args.no_comment:\n entry[\"comment\"] = \"\\n\".join(techniques_to_rules[technique])\n\n scores.append(entry)\n\n output = {\n \"domain\": \"mitre-enterprise\",\n \"name\": \"Sigma rules heatmap\",\n \"gradient\": {\n \"colors\": [\n \"#ffffff\",\n \"#ff6666\"\n ],\n \"maxValue\": curr_max_technique_count,\n \"minValue\": 0\n },\n \"version\": \"2.2\", \n \"techniques\": scores,\n }\n\n with open(args.out_file, \"w\") as f:\n f.write(json.dumps(output))\n print(\"[*] Layer file written in \" + args.out_file + \" (\" + str(num_rules_used) + \" rules)\")\n\nif __name__ == \"__main__\":\n main()\n", "path": "tools/sigma/sigma2attack.py"}]}
1,419
221
gh_patches_debug_17072
rasdani/github-patches
git_diff
Pycord-Development__pycord-1453
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [ext.bridge] Ephemeral kwarg causing errors in defer() and respond() ### Summary Using the ephemeral= keyword argument causes errors when it is passed into the prefix-command version of the bridge command. ### Reproduction Steps Using any `ephemeral` keyword arguments ### Minimal Reproducible Code ```python @bridge_command(name="commands", description="This page lists all commands") async def _commands(self, context: ApplicationContext) -> None: await context.defer(ephemeral=True) await context.respond("Commands List", ephemeral=True) ``` ### Expected Results The ephemeral keyword argument should instead be ignored. This could be as simple as a: ```py if kwargs.get("ephemeral") is not None: del kwargs["ephemeral"] ``` This is in fact exactly what I added in both `discord/message.py` line 1625 and `discord/ext/bridge/context.py` line 153 to get it to work temporarily. ### Actual Results ```py Ignoring exception in command commands: Traceback (most recent call last): File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/commands/core.py", line 181, in wrapped ret = await coro(*args, **kwargs) File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/slash/help.py", line 38, in _commands await context.defer(ephemeral=True) File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/bridge/context.py", line 101, in defer return await self._defer(*args, **kwargs) File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/bridge/context.py", line 153, in _defer return await self._get_super("trigger_typing")(*args, **kwargs) TypeError: Messageable.trigger_typing() got an unexpected keyword argument 'ephemeral' ``` and ```py Ignoring exception in command commands: Traceback (most recent call last): File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/commands/core.py", line 181, in wrapped ret = await coro(*args, **kwargs) File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/slash/help.py", line 40, in _commands await context.respond( File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/bridge/context.py", line 81, in respond return await self._respond(*args, **kwargs) File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/bridge/context.py", line 147, in _respond message = await self._get_super("reply")(*args, **kwargs) File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/commands/context.py", line 399, in reply return await self.message.reply(content, **kwargs) File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/message.py", line 1625, in reply return await self.channel.send(content, reference=self, **kwargs) TypeError: Messageable.send() got an unexpected keyword argument 'ephemeral' ``` ### Intents discord.Intents.all ### System Information - Python v3.10.5-final - py-cord v2.0.0-candidate - py-cord pkg_resources: v2.0.0rc1 - aiohttp v3.8.1 - system info: Linux 5.15.48-1-MANJARO #1 SMP PREEMPT Thu Jun 16 12:33:56 UTC 2022 ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. ### Additional Context This can also apply to other slash-command-specific kwargs, which I can't specifically think of off the top of my head. </issue> <code> [start of discord/ext/bridge/context.py] 1 """ 2 The MIT License (MIT) 3 4 Copyright (c) 2015-2021 Rapptz 5 Copyright (c) 2021-present Pycord Development 6 7 Permission is hereby granted, free of charge, to any person obtaining a 8 copy of this software and associated documentation files (the "Software"), 9 to deal in the Software without restriction, including without limitation 10 the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 and/or sell copies of the Software, and to permit persons to whom the 12 Software is furnished to do so, subject to the following conditions: 13 14 The above copyright notice and this permission notice shall be included in 15 all copies or substantial portions of the Software. 16 17 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 DEALINGS IN THE SOFTWARE. 24 """ 25 from abc import ABC, abstractmethod 26 from typing import TYPE_CHECKING, Any, Optional, Union 27 28 from discord.commands import ApplicationContext 29 from discord.interactions import Interaction, InteractionMessage 30 from discord.message import Message 31 from discord.webhook import WebhookMessage 32 33 from ..commands import Context 34 35 __all__ = ("BridgeContext", "BridgeExtContext", "BridgeApplicationContext") 36 37 38 class BridgeContext(ABC): 39 """ 40 The base context class for compatibility commands. This class is an :class:`ABC` (abstract base class), which is 41 subclassed by :class:`BridgeExtContext` and :class:`BridgeApplicationContext`. The methods in this class are meant 42 to give parity between the two contexts, while still allowing for all of their functionality. 43 44 When this is passed to a command, it will either be passed as :class:`BridgeExtContext`, or 45 :class:`BridgeApplicationContext`. Since they are two separate classes, it is quite simple to use :meth:`isinstance` 46 to make different functionality for each context. For example, if you want to respond to a command with the command 47 type that it was invoked with, you can do the following: 48 49 .. code-block:: python3 50 51 @bot.bridge_command() 52 async def example(ctx: BridgeContext): 53 if isinstance(ctx, BridgeExtContext): 54 command_type = "Traditional (prefix-based) command" 55 elif isinstance(ctx, BridgeApplicationContext): 56 command_type = "Application command" 57 await ctx.send(f"This command was invoked with a(n) {command_type}.") 58 59 .. versionadded:: 2.0 60 """ 61 62 @abstractmethod 63 async def _respond(self, *args, **kwargs) -> Union[Union[Interaction, WebhookMessage], Message]: 64 ... 65 66 @abstractmethod 67 async def _defer(self, *args, **kwargs) -> None: 68 ... 69 70 @abstractmethod 71 async def _edit(self, *args, **kwargs) -> Union[InteractionMessage, Message]: 72 ... 73 74 async def respond(self, *args, **kwargs) -> Union[Union[Interaction, WebhookMessage], Message]: 75 """|coro| 76 77 Responds to the command with the respective response type to the current context. In :class:`BridgeExtContext`, 78 this will be :meth:`~.ExtContext.reply` while in :class:`BridgeApplicationContext`, this will be 79 :meth:`~.ApplicationContext.respond`. 80 """ 81 return await self._respond(*args, **kwargs) 82 83 async def reply(self, *args, **kwargs) -> Union[Union[Interaction, WebhookMessage], Message]: 84 """|coro| 85 86 Alias for :meth:`~.BridgeContext.respond`. 87 """ 88 return await self.respond(*args, **kwargs) 89 90 async def defer(self, *args, **kwargs) -> None: 91 """|coro| 92 93 Defers the command with the respective approach to the current context. In :class:`BridgeExtContext`, this will 94 be :meth:`~.ExtContext.trigger_typing` while in :class:`BridgeApplicationContext`, this will be 95 :meth:`~.ApplicationContext.defer`. 96 97 .. note:: 98 There is no ``trigger_typing`` alias for this method. ``trigger_typing`` will always provide the same 99 functionality across contexts. 100 """ 101 return await self._defer(*args, **kwargs) 102 103 async def edit(self, *args, **kwargs) -> Union[InteractionMessage, Message]: 104 """|coro| 105 106 Edits the original response message with the respective approach to the current context. In 107 :class:`BridgeExtContext`, this will have a custom approach where :meth:`.respond` caches the message to be 108 edited here. In :class:`BridgeApplicationContext`, this will be :meth:`~.ApplicationContext.edit`. 109 """ 110 return await self._edit(*args, **kwargs) 111 112 def _get_super(self, attr: str) -> Optional[Any]: 113 return getattr(super(), attr) 114 115 116 class BridgeApplicationContext(BridgeContext, ApplicationContext): 117 """ 118 The application context class for compatibility commands. This class is a subclass of :class:`BridgeContext` and 119 :class:`ApplicationContext`. This class is meant to be used with :class:`BridgeCommand`. 120 121 .. versionadded:: 2.0 122 """ 123 124 async def _respond(self, *args, **kwargs) -> Union[Interaction, WebhookMessage]: 125 return await self._get_super("respond")(*args, **kwargs) 126 127 async def _defer(self, *args, **kwargs) -> None: 128 return await self._get_super("defer")(*args, **kwargs) 129 130 async def _edit(self, *args, **kwargs) -> InteractionMessage: 131 return await self._get_super("edit")(*args, **kwargs) 132 133 134 class BridgeExtContext(BridgeContext, Context): 135 """ 136 The ext.commands context class for compatibility commands. This class is a subclass of :class:`BridgeContext` and 137 :class:`Context`. This class is meant to be used with :class:`BridgeCommand`. 138 139 .. versionadded:: 2.0 140 """ 141 142 def __init__(self, *args, **kwargs): 143 super().__init__(*args, **kwargs) 144 self._original_response_message: Optional[Message] = None 145 146 async def _respond(self, *args, **kwargs) -> Message: 147 message = await self._get_super("reply")(*args, **kwargs) 148 if self._original_response_message is None: 149 self._original_response_message = message 150 return message 151 152 async def _defer(self, *args, **kwargs) -> None: 153 return await self._get_super("trigger_typing")(*args, **kwargs) 154 155 async def _edit(self, *args, **kwargs) -> Message: 156 return await self._original_response_message.edit(*args, **kwargs) 157 158 async def delete(self, *, delay: Optional[float] = None, reason: Optional[str] = None) -> None: 159 """|coro| 160 161 Deletes the original response message, if it exists. 162 163 Parameters 164 ----------- 165 delay: Optional[:class:`float`] 166 If provided, the number of seconds to wait before deleting the message. 167 reason: Optional[:class:`str`] 168 The reason for deleting the message. Shows up on the audit log. 169 """ 170 if self._original_response_message: 171 await self._original_response_message.delete(delay=delay, reason=reason) 172 173 174 if TYPE_CHECKING: 175 # This is a workaround for mypy not being able to resolve the type of BridgeCommand. 176 class BridgeContext(ApplicationContext, Context): 177 ... 178 [end of discord/ext/bridge/context.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/discord/ext/bridge/context.py b/discord/ext/bridge/context.py --- a/discord/ext/bridge/context.py +++ b/discord/ext/bridge/context.py @@ -144,12 +144,14 @@ self._original_response_message: Optional[Message] = None async def _respond(self, *args, **kwargs) -> Message: + kwargs.pop("ephemeral", None) message = await self._get_super("reply")(*args, **kwargs) if self._original_response_message is None: self._original_response_message = message return message async def _defer(self, *args, **kwargs) -> None: + kwargs.pop("ephemeral", None) return await self._get_super("trigger_typing")(*args, **kwargs) async def _edit(self, *args, **kwargs) -> Message:
{"golden_diff": "diff --git a/discord/ext/bridge/context.py b/discord/ext/bridge/context.py\n--- a/discord/ext/bridge/context.py\n+++ b/discord/ext/bridge/context.py\n@@ -144,12 +144,14 @@\n self._original_response_message: Optional[Message] = None\n \n async def _respond(self, *args, **kwargs) -> Message:\n+ kwargs.pop(\"ephemeral\", None)\n message = await self._get_super(\"reply\")(*args, **kwargs)\n if self._original_response_message is None:\n self._original_response_message = message\n return message\n \n async def _defer(self, *args, **kwargs) -> None:\n+ kwargs.pop(\"ephemeral\", None)\n return await self._get_super(\"trigger_typing\")(*args, **kwargs)\n \n async def _edit(self, *args, **kwargs) -> Message:\n", "issue": "[ext.bridge] Ephemeral kwarg causing errors in defer() and respond()\n### Summary\r\n\r\nUsing the ephemeral= keyword argument causes errors when it is passed into the prefix-command version of the bridge command.\r\n\r\n### Reproduction Steps\r\n\r\nUsing any `ephemeral` keyword arguments\r\n\r\n### Minimal Reproducible Code\r\n\r\n```python\r\n @bridge_command(name=\"commands\", description=\"This page lists all commands\")\r\n async def _commands(self, context: ApplicationContext) -> None:\r\n await context.defer(ephemeral=True)\r\n await context.respond(\"Commands List\", ephemeral=True)\r\n```\r\n\r\n\r\n### Expected Results\r\n\r\nThe ephemeral keyword argument should instead be ignored. This could be as simple as a:\r\n```py\r\nif kwargs.get(\"ephemeral\") is not None:\r\n del kwargs[\"ephemeral\"]\r\n```\r\nThis is in fact exactly what I added in both `discord/message.py` line 1625 and `discord/ext/bridge/context.py` line 153 to get it to work temporarily.\r\n\r\n### Actual Results\r\n\r\n```py\r\nIgnoring exception in command commands:\r\nTraceback (most recent call last):\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/commands/core.py\", line 181, in wrapped\r\n ret = await coro(*args, **kwargs)\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/slash/help.py\", line 38, in _commands\r\n await context.defer(ephemeral=True)\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/bridge/context.py\", line 101, in defer\r\n return await self._defer(*args, **kwargs)\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/bridge/context.py\", line 153, in _defer\r\n return await self._get_super(\"trigger_typing\")(*args, **kwargs)\r\nTypeError: Messageable.trigger_typing() got an unexpected keyword argument 'ephemeral'\r\n```\r\nand\r\n```py\r\nIgnoring exception in command commands:\r\nTraceback (most recent call last):\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/commands/core.py\", line 181, in wrapped\r\n ret = await coro(*args, **kwargs)\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/slash/help.py\", line 40, in _commands\r\n await context.respond(\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/bridge/context.py\", line 81, in respond\r\n return await self._respond(*args, **kwargs)\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/bridge/context.py\", line 147, in _respond\r\n message = await self._get_super(\"reply\")(*args, **kwargs)\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/commands/context.py\", line 399, in reply\r\n return await self.message.reply(content, **kwargs)\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/message.py\", line 1625, in reply\r\n return await self.channel.send(content, reference=self, **kwargs)\r\nTypeError: Messageable.send() got an unexpected keyword argument 'ephemeral'\r\n```\r\n\r\n### Intents\r\n\r\ndiscord.Intents.all\r\n\r\n### System Information\r\n\r\n- Python v3.10.5-final\r\n- py-cord v2.0.0-candidate\r\n - py-cord pkg_resources: v2.0.0rc1\r\n- aiohttp v3.8.1\r\n- system info: Linux 5.15.48-1-MANJARO #1 SMP PREEMPT Thu Jun 16 12:33:56 UTC 2022\r\n\r\n### Checklist\r\n\r\n- [X] I have searched the open issues for duplicates.\r\n- [X] I have shown the entire traceback, if possible.\r\n- [X] I have removed my token from display, if visible.\r\n\r\n### Additional Context\r\n\r\nThis can also apply to other slash-command-specific kwargs, which I can't specifically think of off the top of my head.\n", "before_files": [{"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom abc import ABC, abstractmethod\nfrom typing import TYPE_CHECKING, Any, Optional, Union\n\nfrom discord.commands import ApplicationContext\nfrom discord.interactions import Interaction, InteractionMessage\nfrom discord.message import Message\nfrom discord.webhook import WebhookMessage\n\nfrom ..commands import Context\n\n__all__ = (\"BridgeContext\", \"BridgeExtContext\", \"BridgeApplicationContext\")\n\n\nclass BridgeContext(ABC):\n \"\"\"\n The base context class for compatibility commands. This class is an :class:`ABC` (abstract base class), which is\n subclassed by :class:`BridgeExtContext` and :class:`BridgeApplicationContext`. The methods in this class are meant\n to give parity between the two contexts, while still allowing for all of their functionality.\n\n When this is passed to a command, it will either be passed as :class:`BridgeExtContext`, or\n :class:`BridgeApplicationContext`. Since they are two separate classes, it is quite simple to use :meth:`isinstance`\n to make different functionality for each context. For example, if you want to respond to a command with the command\n type that it was invoked with, you can do the following:\n\n .. code-block:: python3\n\n @bot.bridge_command()\n async def example(ctx: BridgeContext):\n if isinstance(ctx, BridgeExtContext):\n command_type = \"Traditional (prefix-based) command\"\n elif isinstance(ctx, BridgeApplicationContext):\n command_type = \"Application command\"\n await ctx.send(f\"This command was invoked with a(n) {command_type}.\")\n\n .. versionadded:: 2.0\n \"\"\"\n\n @abstractmethod\n async def _respond(self, *args, **kwargs) -> Union[Union[Interaction, WebhookMessage], Message]:\n ...\n\n @abstractmethod\n async def _defer(self, *args, **kwargs) -> None:\n ...\n\n @abstractmethod\n async def _edit(self, *args, **kwargs) -> Union[InteractionMessage, Message]:\n ...\n\n async def respond(self, *args, **kwargs) -> Union[Union[Interaction, WebhookMessage], Message]:\n \"\"\"|coro|\n\n Responds to the command with the respective response type to the current context. In :class:`BridgeExtContext`,\n this will be :meth:`~.ExtContext.reply` while in :class:`BridgeApplicationContext`, this will be\n :meth:`~.ApplicationContext.respond`.\n \"\"\"\n return await self._respond(*args, **kwargs)\n\n async def reply(self, *args, **kwargs) -> Union[Union[Interaction, WebhookMessage], Message]:\n \"\"\"|coro|\n\n Alias for :meth:`~.BridgeContext.respond`.\n \"\"\"\n return await self.respond(*args, **kwargs)\n\n async def defer(self, *args, **kwargs) -> None:\n \"\"\"|coro|\n\n Defers the command with the respective approach to the current context. In :class:`BridgeExtContext`, this will\n be :meth:`~.ExtContext.trigger_typing` while in :class:`BridgeApplicationContext`, this will be\n :meth:`~.ApplicationContext.defer`.\n\n .. note::\n There is no ``trigger_typing`` alias for this method. ``trigger_typing`` will always provide the same\n functionality across contexts.\n \"\"\"\n return await self._defer(*args, **kwargs)\n\n async def edit(self, *args, **kwargs) -> Union[InteractionMessage, Message]:\n \"\"\"|coro|\n\n Edits the original response message with the respective approach to the current context. In\n :class:`BridgeExtContext`, this will have a custom approach where :meth:`.respond` caches the message to be\n edited here. In :class:`BridgeApplicationContext`, this will be :meth:`~.ApplicationContext.edit`.\n \"\"\"\n return await self._edit(*args, **kwargs)\n\n def _get_super(self, attr: str) -> Optional[Any]:\n return getattr(super(), attr)\n\n\nclass BridgeApplicationContext(BridgeContext, ApplicationContext):\n \"\"\"\n The application context class for compatibility commands. This class is a subclass of :class:`BridgeContext` and\n :class:`ApplicationContext`. This class is meant to be used with :class:`BridgeCommand`.\n\n .. versionadded:: 2.0\n \"\"\"\n\n async def _respond(self, *args, **kwargs) -> Union[Interaction, WebhookMessage]:\n return await self._get_super(\"respond\")(*args, **kwargs)\n\n async def _defer(self, *args, **kwargs) -> None:\n return await self._get_super(\"defer\")(*args, **kwargs)\n\n async def _edit(self, *args, **kwargs) -> InteractionMessage:\n return await self._get_super(\"edit\")(*args, **kwargs)\n\n\nclass BridgeExtContext(BridgeContext, Context):\n \"\"\"\n The ext.commands context class for compatibility commands. This class is a subclass of :class:`BridgeContext` and\n :class:`Context`. This class is meant to be used with :class:`BridgeCommand`.\n\n .. versionadded:: 2.0\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._original_response_message: Optional[Message] = None\n\n async def _respond(self, *args, **kwargs) -> Message:\n message = await self._get_super(\"reply\")(*args, **kwargs)\n if self._original_response_message is None:\n self._original_response_message = message\n return message\n\n async def _defer(self, *args, **kwargs) -> None:\n return await self._get_super(\"trigger_typing\")(*args, **kwargs)\n\n async def _edit(self, *args, **kwargs) -> Message:\n return await self._original_response_message.edit(*args, **kwargs)\n\n async def delete(self, *, delay: Optional[float] = None, reason: Optional[str] = None) -> None:\n \"\"\"|coro|\n\n Deletes the original response message, if it exists.\n\n Parameters\n -----------\n delay: Optional[:class:`float`]\n If provided, the number of seconds to wait before deleting the message.\n reason: Optional[:class:`str`]\n The reason for deleting the message. Shows up on the audit log.\n \"\"\"\n if self._original_response_message:\n await self._original_response_message.delete(delay=delay, reason=reason)\n\n\nif TYPE_CHECKING:\n # This is a workaround for mypy not being able to resolve the type of BridgeCommand.\n class BridgeContext(ApplicationContext, Context):\n ...\n", "path": "discord/ext/bridge/context.py"}]}
3,716
199
gh_patches_debug_31559
rasdani/github-patches
git_diff
uccser__cs-unplugged-195
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Invalid filename for blank resources When downloading a resource that is blank, the filename is `Sorting Network (0 to -1).pdf`. The filename should be something like `Sorting Network (blank).pdf`. </issue> <code> [start of csunplugged/resources/views/sorting_network.py] 1 from PIL import Image, ImageDraw, ImageFont 2 from random import sample 3 4 5 def resource_image(get_request, resource): 6 """Creates a image for Sorting Network resource. 7 8 Returns: 9 A Pillow image object. 10 """ 11 image_path = 'static/img/resource-sorting-network-colour.png' 12 image = Image.open(image_path) 13 draw = ImageDraw.Draw(image) 14 15 (range_min, range_max, font_size) = number_range(get_request) 16 17 font_path = 'static/fonts/PatrickHand-Regular.ttf' 18 19 # Add numbers to text if needed 20 if get_request['prefilled_values'] != 'blank': 21 font = ImageFont.truetype(font_path, font_size) 22 numbers = sample(range(range_min, range_max), 6) 23 base_coord_x = 70 24 base_coord_y = 2560 25 coord_x_increment = 204 26 for number in numbers: 27 text = str(number) 28 text_width, text_height = draw.textsize(text, font=font) 29 coord_x = base_coord_x - (text_width / 2) 30 coord_y = base_coord_y - (text_height / 2) 31 draw.text( 32 (coord_x, coord_y), 33 text, 34 font=font, 35 fill='#000' 36 ) 37 base_coord_x += coord_x_increment 38 39 return image 40 41 42 def subtitle(get_request, resource): 43 """Returns the subtitle string of the resource. 44 45 Used after the resource name in the filename, and 46 also on the resource image. 47 """ 48 SUBTITLE_TEMPLATE = '{} to {}' 49 range_min, range_max, font_size = number_range(get_request) 50 text = SUBTITLE_TEMPLATE.format(range_min, range_max - 1) 51 return text 52 53 54 def number_range(get_request): 55 """Returns a tuple of (range_min, range_max, font_size) 56 for the requested resource. 57 """ 58 prefilled_values = get_request['prefilled_values'] 59 range_min = 0 60 range_max = 0 61 font_size = 150 62 if prefilled_values == 'easy': 63 range_min = 1 64 range_max = 10 65 elif prefilled_values == 'medium': 66 range_min = 10 67 range_max = 100 68 font_size = 120 69 elif prefilled_values == 'hard': 70 range_min = 100 71 range_max = 1000 72 font_size = 90 73 return (range_min, range_max, font_size) 74 [end of csunplugged/resources/views/sorting_network.py] [start of csunplugged/resources/views/treasure_hunt.py] 1 from PIL import Image, ImageDraw, ImageFont 2 from random import sample 3 4 5 def resource_image(get_request, resource): 6 """Creates a image for Treasure Hunt resource. 7 8 Returns: 9 A Pillow image object. 10 """ 11 image_path = 'static/img/resource-treasure-hunt.png' 12 font_path = 'static/fonts/PatrickHand-Regular.ttf' 13 image = Image.open(image_path) 14 draw = ImageDraw.Draw(image) 15 16 (range_min, range_max, font_size) = number_range(get_request) 17 18 # Add numbers to image if required 19 if get_request['prefilled_values'] != 'blank': 20 font = ImageFont.truetype(font_path, font_size) 21 22 total_numbers = 26 23 numbers = sample(range(range_min, range_max), total_numbers) 24 if get_request['number_order'] == 'sorted': 25 numbers.sort() 26 27 starting_coord_y = 494 28 base_coord_y = starting_coord_y 29 coord_y_increment = 286 30 base_coords_x = [257, 692] 31 for i in range(0, total_numbers): 32 text = str(numbers[i]) 33 text_width, text_height = draw.textsize(text, font=font) 34 35 coord_x = base_coords_x[i % 2] - (text_width / 2) 36 coord_y = base_coord_y - (text_height / 2) 37 if i % 2 == 1: 38 coord_y -= 10 39 base_coord_y += coord_y_increment 40 draw.text( 41 (coord_x, coord_y), 42 text, 43 font=font, 44 fill='#000' 45 ) 46 47 # Add number order and range text 48 text = subtitle(get_request, resource) 49 font = ImageFont.truetype(font_path, 110) 50 text_width, text_height = draw.textsize(text, font=font) 51 coord_x = 1472 - (text_width / 2) 52 coord_y = 35 - (text_height / 2) 53 draw.text( 54 (coord_x, coord_y), 55 text, 56 font=font, 57 fill='#000' 58 ) 59 60 return image 61 62 63 def subtitle(get_request, resource): 64 """Returns the subtitle string of the resource. 65 66 Used after the resource name in the filename, and 67 also on the resource image. 68 """ 69 SUBTITLE_TEMPLATE = '{} - {} to {}' 70 number_order_text = get_request['number_order'].title() 71 range_min, range_max, font_size = number_range(get_request) 72 text = SUBTITLE_TEMPLATE.format(number_order_text, range_min, range_max - 1) 73 return text 74 75 76 def number_range(get_request): 77 """Returns a tuple of (range_min, range_max, font_size) 78 for the requested resource. 79 """ 80 prefilled_values = get_request['prefilled_values'] 81 range_min = 0 82 if prefilled_values == 'easy': 83 range_max = 100 84 font_size = 97 85 elif prefilled_values == 'medium': 86 range_max = 1000 87 font_size = 80 88 elif prefilled_values == 'hard': 89 range_max = 10000 90 font_size = 70 91 return (range_min, range_max, font_size) 92 [end of csunplugged/resources/views/treasure_hunt.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/csunplugged/resources/views/sorting_network.py b/csunplugged/resources/views/sorting_network.py --- a/csunplugged/resources/views/sorting_network.py +++ b/csunplugged/resources/views/sorting_network.py @@ -45,9 +45,12 @@ Used after the resource name in the filename, and also on the resource image. """ - SUBTITLE_TEMPLATE = '{} to {}' - range_min, range_max, font_size = number_range(get_request) - text = SUBTITLE_TEMPLATE.format(range_min, range_max - 1) + if get_request['prefilled_values'] == 'blank': + text = 'blank' + else: + SUBTITLE_TEMPLATE = '{} to {}' + range_min, range_max, font_size = number_range(get_request) + text = SUBTITLE_TEMPLATE.format(range_min, range_max - 1) return text diff --git a/csunplugged/resources/views/treasure_hunt.py b/csunplugged/resources/views/treasure_hunt.py --- a/csunplugged/resources/views/treasure_hunt.py +++ b/csunplugged/resources/views/treasure_hunt.py @@ -13,10 +13,9 @@ image = Image.open(image_path) draw = ImageDraw.Draw(image) - (range_min, range_max, font_size) = number_range(get_request) - # Add numbers to image if required if get_request['prefilled_values'] != 'blank': + (range_min, range_max, font_size) = number_range(get_request) font = ImageFont.truetype(font_path, font_size) total_numbers = 26 @@ -66,10 +65,13 @@ Used after the resource name in the filename, and also on the resource image. """ - SUBTITLE_TEMPLATE = '{} - {} to {}' - number_order_text = get_request['number_order'].title() - range_min, range_max, font_size = number_range(get_request) - text = SUBTITLE_TEMPLATE.format(number_order_text, range_min, range_max - 1) + if get_request['prefilled_values'] == 'blank': + text = 'blank' + else: + SUBTITLE_TEMPLATE = '{} - {} to {}' + number_order_text = get_request['number_order'].title() + range_min, range_max, font_size = number_range(get_request) + text = SUBTITLE_TEMPLATE.format(number_order_text, range_min, range_max - 1) return text
{"golden_diff": "diff --git a/csunplugged/resources/views/sorting_network.py b/csunplugged/resources/views/sorting_network.py\n--- a/csunplugged/resources/views/sorting_network.py\n+++ b/csunplugged/resources/views/sorting_network.py\n@@ -45,9 +45,12 @@\n Used after the resource name in the filename, and\n also on the resource image.\n \"\"\"\n- SUBTITLE_TEMPLATE = '{} to {}'\n- range_min, range_max, font_size = number_range(get_request)\n- text = SUBTITLE_TEMPLATE.format(range_min, range_max - 1)\n+ if get_request['prefilled_values'] == 'blank':\n+ text = 'blank'\n+ else:\n+ SUBTITLE_TEMPLATE = '{} to {}'\n+ range_min, range_max, font_size = number_range(get_request)\n+ text = SUBTITLE_TEMPLATE.format(range_min, range_max - 1)\n return text\n \n \ndiff --git a/csunplugged/resources/views/treasure_hunt.py b/csunplugged/resources/views/treasure_hunt.py\n--- a/csunplugged/resources/views/treasure_hunt.py\n+++ b/csunplugged/resources/views/treasure_hunt.py\n@@ -13,10 +13,9 @@\n image = Image.open(image_path)\n draw = ImageDraw.Draw(image)\n \n- (range_min, range_max, font_size) = number_range(get_request)\n-\n # Add numbers to image if required\n if get_request['prefilled_values'] != 'blank':\n+ (range_min, range_max, font_size) = number_range(get_request)\n font = ImageFont.truetype(font_path, font_size)\n \n total_numbers = 26\n@@ -66,10 +65,13 @@\n Used after the resource name in the filename, and\n also on the resource image.\n \"\"\"\n- SUBTITLE_TEMPLATE = '{} - {} to {}'\n- number_order_text = get_request['number_order'].title()\n- range_min, range_max, font_size = number_range(get_request)\n- text = SUBTITLE_TEMPLATE.format(number_order_text, range_min, range_max - 1)\n+ if get_request['prefilled_values'] == 'blank':\n+ text = 'blank'\n+ else:\n+ SUBTITLE_TEMPLATE = '{} - {} to {}'\n+ number_order_text = get_request['number_order'].title()\n+ range_min, range_max, font_size = number_range(get_request)\n+ text = SUBTITLE_TEMPLATE.format(number_order_text, range_min, range_max - 1)\n return text\n", "issue": "Invalid filename for blank resources\nWhen downloading a resource that is blank, the filename is `Sorting Network (0 to -1).pdf`.\r\n\r\nThe filename should be something like `Sorting Network (blank).pdf`.\r\n\n", "before_files": [{"content": "from PIL import Image, ImageDraw, ImageFont\nfrom random import sample\n\n\ndef resource_image(get_request, resource):\n \"\"\"Creates a image for Sorting Network resource.\n\n Returns:\n A Pillow image object.\n \"\"\"\n image_path = 'static/img/resource-sorting-network-colour.png'\n image = Image.open(image_path)\n draw = ImageDraw.Draw(image)\n\n (range_min, range_max, font_size) = number_range(get_request)\n\n font_path = 'static/fonts/PatrickHand-Regular.ttf'\n\n # Add numbers to text if needed\n if get_request['prefilled_values'] != 'blank':\n font = ImageFont.truetype(font_path, font_size)\n numbers = sample(range(range_min, range_max), 6)\n base_coord_x = 70\n base_coord_y = 2560\n coord_x_increment = 204\n for number in numbers:\n text = str(number)\n text_width, text_height = draw.textsize(text, font=font)\n coord_x = base_coord_x - (text_width / 2)\n coord_y = base_coord_y - (text_height / 2)\n draw.text(\n (coord_x, coord_y),\n text,\n font=font,\n fill='#000'\n )\n base_coord_x += coord_x_increment\n\n return image\n\n\ndef subtitle(get_request, resource):\n \"\"\"Returns the subtitle string of the resource.\n\n Used after the resource name in the filename, and\n also on the resource image.\n \"\"\"\n SUBTITLE_TEMPLATE = '{} to {}'\n range_min, range_max, font_size = number_range(get_request)\n text = SUBTITLE_TEMPLATE.format(range_min, range_max - 1)\n return text\n\n\ndef number_range(get_request):\n \"\"\"Returns a tuple of (range_min, range_max, font_size)\n for the requested resource.\n \"\"\"\n prefilled_values = get_request['prefilled_values']\n range_min = 0\n range_max = 0\n font_size = 150\n if prefilled_values == 'easy':\n range_min = 1\n range_max = 10\n elif prefilled_values == 'medium':\n range_min = 10\n range_max = 100\n font_size = 120\n elif prefilled_values == 'hard':\n range_min = 100\n range_max = 1000\n font_size = 90\n return (range_min, range_max, font_size)\n", "path": "csunplugged/resources/views/sorting_network.py"}, {"content": "from PIL import Image, ImageDraw, ImageFont\nfrom random import sample\n\n\ndef resource_image(get_request, resource):\n \"\"\"Creates a image for Treasure Hunt resource.\n\n Returns:\n A Pillow image object.\n \"\"\"\n image_path = 'static/img/resource-treasure-hunt.png'\n font_path = 'static/fonts/PatrickHand-Regular.ttf'\n image = Image.open(image_path)\n draw = ImageDraw.Draw(image)\n\n (range_min, range_max, font_size) = number_range(get_request)\n\n # Add numbers to image if required\n if get_request['prefilled_values'] != 'blank':\n font = ImageFont.truetype(font_path, font_size)\n\n total_numbers = 26\n numbers = sample(range(range_min, range_max), total_numbers)\n if get_request['number_order'] == 'sorted':\n numbers.sort()\n\n starting_coord_y = 494\n base_coord_y = starting_coord_y\n coord_y_increment = 286\n base_coords_x = [257, 692]\n for i in range(0, total_numbers):\n text = str(numbers[i])\n text_width, text_height = draw.textsize(text, font=font)\n\n coord_x = base_coords_x[i % 2] - (text_width / 2)\n coord_y = base_coord_y - (text_height / 2)\n if i % 2 == 1:\n coord_y -= 10\n base_coord_y += coord_y_increment\n draw.text(\n (coord_x, coord_y),\n text,\n font=font,\n fill='#000'\n )\n\n # Add number order and range text\n text = subtitle(get_request, resource)\n font = ImageFont.truetype(font_path, 110)\n text_width, text_height = draw.textsize(text, font=font)\n coord_x = 1472 - (text_width / 2)\n coord_y = 35 - (text_height / 2)\n draw.text(\n (coord_x, coord_y),\n text,\n font=font,\n fill='#000'\n )\n\n return image\n\n\ndef subtitle(get_request, resource):\n \"\"\"Returns the subtitle string of the resource.\n\n Used after the resource name in the filename, and\n also on the resource image.\n \"\"\"\n SUBTITLE_TEMPLATE = '{} - {} to {}'\n number_order_text = get_request['number_order'].title()\n range_min, range_max, font_size = number_range(get_request)\n text = SUBTITLE_TEMPLATE.format(number_order_text, range_min, range_max - 1)\n return text\n\n\ndef number_range(get_request):\n \"\"\"Returns a tuple of (range_min, range_max, font_size)\n for the requested resource.\n \"\"\"\n prefilled_values = get_request['prefilled_values']\n range_min = 0\n if prefilled_values == 'easy':\n range_max = 100\n font_size = 97\n elif prefilled_values == 'medium':\n range_max = 1000\n font_size = 80\n elif prefilled_values == 'hard':\n range_max = 10000\n font_size = 70\n return (range_min, range_max, font_size)\n", "path": "csunplugged/resources/views/treasure_hunt.py"}]}
2,208
563
gh_patches_debug_21522
rasdani/github-patches
git_diff
svthalia__concrexit-3070
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Expand/improve document part of event endpoint <!-- Please add the appropriate label for what change should be made: docs: changes to the documentation) refactor: refactoring production code, eg. renaming a variable or rewriting a function test: adding missing tests, refactoring tests; no production code change chore: updating poetry etc; no production code change --> ### Describe the change Change the current URL or add a URL to a link to the actual document (that is, one that ends with `/[name].pdf`). ### Motivation It's very nice for the app to be able to just fetch files directly. Also, I don't think the current URL is even necessary because you also have the PK with which you can create the URL. ### Current implementation There is a URL that leads to the file, but only after a redirect to `cdn`. </issue> <code> [start of website/documents/api/v2/serializers/document.py] 1 from rest_framework.fields import SerializerMethodField 2 from rest_framework.reverse import reverse 3 4 from documents.models import Document 5 from thaliawebsite.api.v2.serializers.cleaned_model_serializer import ( 6 CleanedModelSerializer, 7 ) 8 9 10 class DocumentSerializer(CleanedModelSerializer): 11 class Meta: 12 model = Document 13 fields = ("pk", "name", "url", "category", "members_only") 14 15 url = SerializerMethodField("_url") 16 17 def _url(self, instance): 18 return self.context["request"].build_absolute_uri( 19 reverse("documents:document", kwargs={"pk": instance.pk}) 20 ) 21 [end of website/documents/api/v2/serializers/document.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/documents/api/v2/serializers/document.py b/website/documents/api/v2/serializers/document.py --- a/website/documents/api/v2/serializers/document.py +++ b/website/documents/api/v2/serializers/document.py @@ -1,10 +1,10 @@ from rest_framework.fields import SerializerMethodField -from rest_framework.reverse import reverse from documents.models import Document from thaliawebsite.api.v2.serializers.cleaned_model_serializer import ( CleanedModelSerializer, ) +from utils.media.services import get_media_url class DocumentSerializer(CleanedModelSerializer): @@ -15,6 +15,12 @@ url = SerializerMethodField("_url") def _url(self, instance): - return self.context["request"].build_absolute_uri( - reverse("documents:document", kwargs={"pk": instance.pk}) - ) + if instance.members_only and ( + not self.context["request"].user.is_authenticated + or not self.context["request"].member.has_active_membership() + ): + return self.context["request"].build_absolute_uri( + instance.get_absolute_url() + ) + + return get_media_url(instance.file, absolute_url=True)
{"golden_diff": "diff --git a/website/documents/api/v2/serializers/document.py b/website/documents/api/v2/serializers/document.py\n--- a/website/documents/api/v2/serializers/document.py\n+++ b/website/documents/api/v2/serializers/document.py\n@@ -1,10 +1,10 @@\n from rest_framework.fields import SerializerMethodField\n-from rest_framework.reverse import reverse\n \n from documents.models import Document\n from thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n )\n+from utils.media.services import get_media_url\n \n \n class DocumentSerializer(CleanedModelSerializer):\n@@ -15,6 +15,12 @@\n url = SerializerMethodField(\"_url\")\n \n def _url(self, instance):\n- return self.context[\"request\"].build_absolute_uri(\n- reverse(\"documents:document\", kwargs={\"pk\": instance.pk})\n- )\n+ if instance.members_only and (\n+ not self.context[\"request\"].user.is_authenticated\n+ or not self.context[\"request\"].member.has_active_membership()\n+ ):\n+ return self.context[\"request\"].build_absolute_uri(\n+ instance.get_absolute_url()\n+ )\n+\n+ return get_media_url(instance.file, absolute_url=True)\n", "issue": "Expand/improve document part of event endpoint\n<!--\r\n\r\nPlease add the appropriate label for what change should be made:\r\ndocs: changes to the documentation)\r\nrefactor: refactoring production code, eg. renaming a variable or rewriting a function\r\ntest: adding missing tests, refactoring tests; no production code change\r\nchore: updating poetry etc; no production code change\r\n\r\n-->\r\n\r\n### Describe the change\r\nChange the current URL or add a URL to a link to the actual document (that is, one that ends with `/[name].pdf`).\r\n\r\n### Motivation\r\nIt's very nice for the app to be able to just fetch files directly.\r\nAlso, I don't think the current URL is even necessary because you also have the PK with which you can create the URL.\r\n\r\n### Current implementation\r\nThere is a URL that leads to the file, but only after a redirect to `cdn`.\n", "before_files": [{"content": "from rest_framework.fields import SerializerMethodField\nfrom rest_framework.reverse import reverse\n\nfrom documents.models import Document\nfrom thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n)\n\n\nclass DocumentSerializer(CleanedModelSerializer):\n class Meta:\n model = Document\n fields = (\"pk\", \"name\", \"url\", \"category\", \"members_only\")\n\n url = SerializerMethodField(\"_url\")\n\n def _url(self, instance):\n return self.context[\"request\"].build_absolute_uri(\n reverse(\"documents:document\", kwargs={\"pk\": instance.pk})\n )\n", "path": "website/documents/api/v2/serializers/document.py"}]}
889
266
gh_patches_debug_17667
rasdani/github-patches
git_diff
mdn__kuma-6528
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AttributeError: brand https://sentry.prod.mozaws.net/operations/mdn-prod/issues/7302494/ ``` Resolver404: {'tried': [[<RegexURLPattern None ^media/(?:redesign/)?css/(?P<doc>.*)-min.css$>], [<RegexURLPattern None ^media/(?:redesign/)?js/(?P<doc>.*)-min.js$>], [<RegexURLPattern None ^media/(?:redesign/)?img(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?css(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?js(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?fonts(?P<suffix>.*)$>], [<RegexURLPattern None ^media/uploads/demos/(?:.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)//(?P<three>.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_1_canvas_rect.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_2_canvas_moveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_3_canvas_lineto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_4_canvas_arc.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_5_canvas_quadraticcurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_6_canvas_beziercurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_1_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_2_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_3_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_4_canvas_gallery.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_1_canvas_fillstyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_2_canvas_strokestyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_3_canvas_globalalpha.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_4_canvas_rgba.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_5_canvas_linewidth.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_6_canvas_linecap.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_7_canvas_linejoin.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_8_canvas_miterlimit.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_9_canvas_lineargradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_10_canvas_radialgradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_11_canvas_createpattern.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_1_canvas_savestate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_2_canvas_translate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_3_canvas_rotate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_4_canvas_scale.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_1_canvas_composite.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_2_canvas_clipping.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/globalCompositeOperation.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/backdrop.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/bg_gallery.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_1.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_2.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_3.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_4.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_5.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_6.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_7.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_8.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/picture_frame.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/rhino.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/wallpaper.png$>], [<RegexURLPattern None (?i)^samples/domref/mozGetAsFile.html$>], [<RegexURLPattern None (?i)^samples/raycaster/input.js$>], [<RegexURLPattern None (?i)^samples/raycaster/Level.js$>], [<RegexURL... File "redirect_urls/middleware.py", line 14, in __call__ resolver_match = self.resolver.resolve(request.path_info) File "newrelic/hooks/framework_django.py", line 600, in wrapper return _wrapped(*args, **kwargs) File "newrelic/hooks/framework_django.py", line 588, in _wrapped result = wrapped(path) File "newrelic/hooks/framework_django.py", line 575, in wrapper return wrapped(*args, **kwargs) File "django/urls/resolvers.py", line 394, in resolve raise Resolver404({'tried': tried, 'path': new_path}) KeyError: 'brand' File "stripe/stripe_object.py", line 90, in __getattr__ return self[k] File "stripe/stripe_object.py", line 131, in __getitem__ raise err File "stripe/stripe_object.py", line 119, in __getitem__ return super(StripeObject, self).__getitem__(k) AttributeError: brand (4 additional frame(s) were not displayed) ... File "django/views/decorators/cache.py", line 57, in _wrapped_view_func response = view_func(request, *args, **kwargs) File "kuma/core/decorators.py", line 210, in wrapped return func(request, *args, **kwargs) File "kuma/users/views.py", line 472, in user_edit "subscription_info": retrieve_stripe_subscription_info(edit_user,), File "kuma/users/utils.py", line 53, in retrieve_stripe_subscription_info "brand": source.brand, File "stripe/stripe_object.py", line 92, in __getattr__ raise AttributeError(*err.args) AttributeError: brand ``` </issue> <code> [start of kuma/users/utils.py] 1 from datetime import datetime 2 3 import stripe 4 from django.conf import settings 5 6 7 def retrieve_stripe_subscription(customer): 8 for subscription in customer.subscriptions.list().auto_paging_iter(): 9 # We have to use array indexing syntax, as stripe uses dicts to 10 # represent its objects (dicts come with an .items method) 11 for item in subscription["items"].auto_paging_iter(): 12 if item.plan.id == settings.STRIPE_PLAN_ID: 13 return subscription 14 15 return None 16 17 18 def create_stripe_customer_and_subscription_for_user(user, email, stripe_token): 19 customer = ( 20 stripe.Customer.retrieve(user.stripe_customer_id) 21 if user.stripe_customer_id 22 else None 23 ) 24 if not customer or customer.email != email: 25 customer = stripe.Customer.create(email=email, source=stripe_token,) 26 user.stripe_customer_id = customer.id 27 user.save() 28 29 if retrieve_stripe_subscription(customer) is None: 30 stripe.Subscription.create( 31 customer=customer.id, items=[{"plan": settings.STRIPE_PLAN_ID}], 32 ) 33 34 35 def retrieve_stripe_subscription_info(user): 36 stripe_customer = ( 37 stripe.Customer.retrieve(user.stripe_customer_id, expand=["default_source"],) 38 if settings.STRIPE_PLAN_ID and user.stripe_customer_id 39 else None 40 ) 41 42 stripe_subscription = ( 43 retrieve_stripe_subscription(stripe_customer) 44 if stripe_customer and stripe_customer.email == user.email 45 else None 46 ) 47 if stripe_subscription: 48 source = stripe_customer.default_source 49 return { 50 "next_payment_at": datetime.fromtimestamp( 51 stripe_subscription.current_period_end 52 ), 53 "brand": source.brand, 54 "expires_at": f"{source.exp_month}/{source.exp_year}", 55 "last4": source.last4, 56 "zip": source.address_zip, 57 } 58 59 return None 60 [end of kuma/users/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kuma/users/utils.py b/kuma/users/utils.py --- a/kuma/users/utils.py +++ b/kuma/users/utils.py @@ -46,14 +46,23 @@ ) if stripe_subscription: source = stripe_customer.default_source + if source.object == "card": + card = source + elif source.object == "source": + card = source.card + else: + raise ValueError( + f"unexpected stripe customer default_source of type {source.object!r}" + ) + return { "next_payment_at": datetime.fromtimestamp( stripe_subscription.current_period_end ), - "brand": source.brand, - "expires_at": f"{source.exp_month}/{source.exp_year}", - "last4": source.last4, - "zip": source.address_zip, + "brand": card.brand, + "expires_at": f"{card.exp_month}/{card.exp_year}", + "last4": card.last4, + "zip": card.address_zip, } return None
{"golden_diff": "diff --git a/kuma/users/utils.py b/kuma/users/utils.py\n--- a/kuma/users/utils.py\n+++ b/kuma/users/utils.py\n@@ -46,14 +46,23 @@\n )\n if stripe_subscription:\n source = stripe_customer.default_source\n+ if source.object == \"card\":\n+ card = source\n+ elif source.object == \"source\":\n+ card = source.card\n+ else:\n+ raise ValueError(\n+ f\"unexpected stripe customer default_source of type {source.object!r}\"\n+ )\n+\n return {\n \"next_payment_at\": datetime.fromtimestamp(\n stripe_subscription.current_period_end\n ),\n- \"brand\": source.brand,\n- \"expires_at\": f\"{source.exp_month}/{source.exp_year}\",\n- \"last4\": source.last4,\n- \"zip\": source.address_zip,\n+ \"brand\": card.brand,\n+ \"expires_at\": f\"{card.exp_month}/{card.exp_year}\",\n+ \"last4\": card.last4,\n+ \"zip\": card.address_zip,\n }\n \n return None\n", "issue": "AttributeError: brand\nhttps://sentry.prod.mozaws.net/operations/mdn-prod/issues/7302494/\n\n```\nResolver404: {'tried': [[<RegexURLPattern None ^media/(?:redesign/)?css/(?P<doc>.*)-min.css$>], [<RegexURLPattern None ^media/(?:redesign/)?js/(?P<doc>.*)-min.js$>], [<RegexURLPattern None ^media/(?:redesign/)?img(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?css(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?js(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?fonts(?P<suffix>.*)$>], [<RegexURLPattern None ^media/uploads/demos/(?:.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)//(?P<three>.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_1_canvas_rect.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_2_canvas_moveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_3_canvas_lineto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_4_canvas_arc.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_5_canvas_quadraticcurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_6_canvas_beziercurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_1_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_2_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_3_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_4_canvas_gallery.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_1_canvas_fillstyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_2_canvas_strokestyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_3_canvas_globalalpha.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_4_canvas_rgba.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_5_canvas_linewidth.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_6_canvas_linecap.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_7_canvas_linejoin.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_8_canvas_miterlimit.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_9_canvas_lineargradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_10_canvas_radialgradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_11_canvas_createpattern.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_1_canvas_savestate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_2_canvas_translate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_3_canvas_rotate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_4_canvas_scale.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_1_canvas_composite.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_2_canvas_clipping.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/globalCompositeOperation.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/backdrop.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/bg_gallery.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_1.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_2.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_3.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_4.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_5.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_6.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_7.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_8.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/picture_frame.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/rhino.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/wallpaper.png$>], [<RegexURLPattern None (?i)^samples/domref/mozGetAsFile.html$>], [<RegexURLPattern None (?i)^samples/raycaster/input.js$>], [<RegexURLPattern None (?i)^samples/raycaster/Level.js$>], [<RegexURL...\n File \"redirect_urls/middleware.py\", line 14, in __call__\n resolver_match = self.resolver.resolve(request.path_info)\n File \"newrelic/hooks/framework_django.py\", line 600, in wrapper\n return _wrapped(*args, **kwargs)\n File \"newrelic/hooks/framework_django.py\", line 588, in _wrapped\n result = wrapped(path)\n File \"newrelic/hooks/framework_django.py\", line 575, in wrapper\n return wrapped(*args, **kwargs)\n File \"django/urls/resolvers.py\", line 394, in resolve\n raise Resolver404({'tried': tried, 'path': new_path})\n\nKeyError: 'brand'\n File \"stripe/stripe_object.py\", line 90, in __getattr__\n return self[k]\n File \"stripe/stripe_object.py\", line 131, in __getitem__\n raise err\n File \"stripe/stripe_object.py\", line 119, in __getitem__\n return super(StripeObject, self).__getitem__(k)\n\nAttributeError: brand\n(4 additional frame(s) were not displayed)\n...\n File \"django/views/decorators/cache.py\", line 57, in _wrapped_view_func\n response = view_func(request, *args, **kwargs)\n File \"kuma/core/decorators.py\", line 210, in wrapped\n return func(request, *args, **kwargs)\n File \"kuma/users/views.py\", line 472, in user_edit\n \"subscription_info\": retrieve_stripe_subscription_info(edit_user,),\n File \"kuma/users/utils.py\", line 53, in retrieve_stripe_subscription_info\n \"brand\": source.brand,\n File \"stripe/stripe_object.py\", line 92, in __getattr__\n raise AttributeError(*err.args)\n\nAttributeError: brand\n```\n", "before_files": [{"content": "from datetime import datetime\n\nimport stripe\nfrom django.conf import settings\n\n\ndef retrieve_stripe_subscription(customer):\n for subscription in customer.subscriptions.list().auto_paging_iter():\n # We have to use array indexing syntax, as stripe uses dicts to\n # represent its objects (dicts come with an .items method)\n for item in subscription[\"items\"].auto_paging_iter():\n if item.plan.id == settings.STRIPE_PLAN_ID:\n return subscription\n\n return None\n\n\ndef create_stripe_customer_and_subscription_for_user(user, email, stripe_token):\n customer = (\n stripe.Customer.retrieve(user.stripe_customer_id)\n if user.stripe_customer_id\n else None\n )\n if not customer or customer.email != email:\n customer = stripe.Customer.create(email=email, source=stripe_token,)\n user.stripe_customer_id = customer.id\n user.save()\n\n if retrieve_stripe_subscription(customer) is None:\n stripe.Subscription.create(\n customer=customer.id, items=[{\"plan\": settings.STRIPE_PLAN_ID}],\n )\n\n\ndef retrieve_stripe_subscription_info(user):\n stripe_customer = (\n stripe.Customer.retrieve(user.stripe_customer_id, expand=[\"default_source\"],)\n if settings.STRIPE_PLAN_ID and user.stripe_customer_id\n else None\n )\n\n stripe_subscription = (\n retrieve_stripe_subscription(stripe_customer)\n if stripe_customer and stripe_customer.email == user.email\n else None\n )\n if stripe_subscription:\n source = stripe_customer.default_source\n return {\n \"next_payment_at\": datetime.fromtimestamp(\n stripe_subscription.current_period_end\n ),\n \"brand\": source.brand,\n \"expires_at\": f\"{source.exp_month}/{source.exp_year}\",\n \"last4\": source.last4,\n \"zip\": source.address_zip,\n }\n\n return None\n", "path": "kuma/users/utils.py"}]}
2,714
237
gh_patches_debug_2468
rasdani/github-patches
git_diff
learningequality__kolibri-4935
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> users should not be able to get 1000% on an exam, unfortunately ### Observed behavior reported by @jtamiace re: @radinamatic's apparent good luck: ![image](https://user-images.githubusercontent.com/2367265/52095361-812c2280-2577-11e9-86d6-8f6cff4e0f5b.png) ### Expected behavior exams are scored between 0 and 100 ### User-facing consequences ???? ### Errors and logs unknown ### Steps to reproduce see http://kolibribeta.learningequality.org/coach/#/fa4cbfeda32c0c0fbf1832fc1ddd10c3/reports/learners ### Context k 0.12.0 alpha 7 </issue> <code> [start of kolibri/plugins/coach/class_summary_api.py] 1 from django.db.models import Max 2 from django.db.models import Sum 3 from django.shortcuts import get_object_or_404 4 from rest_framework import serializers 5 from rest_framework import viewsets 6 from rest_framework.response import Response 7 8 from kolibri.core.auth import models as auth_models 9 from kolibri.core.content.models import ContentNode 10 from kolibri.core.exams.models import Exam 11 from kolibri.core.lessons.models import Lesson 12 from kolibri.core.logger import models as logger_models 13 from kolibri.core.notifications.models import LearnerProgressNotification 14 from kolibri.core.notifications.models import NotificationEventType 15 16 17 # Intended to match NotificationEventType 18 NOT_STARTED = "NotStarted" 19 STARTED = "Started" 20 HELP_NEEDED = "HelpNeeded" 21 COMPLETED = "Completed" 22 23 24 def content_status_serializer(lesson_data, learners_data, classroom): 25 26 # First generate a unique set of content node ids from all the lessons 27 lesson_node_ids = set() 28 for lesson in lesson_data: 29 lesson_node_ids |= set(lesson.get("node_ids")) 30 31 # Now create a map of content_id to node_id so that we can map between lessons, and notifications 32 # which use the node id, and summary logs, which use content_id 33 content_map = {n[0]: n[1] for n in ContentNode.objects.filter(id__in=lesson_node_ids).values_list("content_id", "id")} 34 35 # Get all the values we need from the summary logs to be able to summarize current status on the 36 # relevant content items. 37 content_log_values = logger_models.ContentSummaryLog.objects.filter( 38 content_id__in=set(content_map.keys()), user__in=[learner["id"] for learner in learners_data] 39 ).values("user_id", "content_id", "end_timestamp", "time_spent", "progress") 40 41 # In order to make the lookup speedy, generate a unique key for each user/node that we find 42 # listed in the needs help notifications that are relevant. We can then just check 43 # existence of this key in the set in order to see whether this user has been flagged as needing 44 # help. 45 lookup_key = "{user_id}-{node_id}" 46 needs_help = { 47 lookup_key.format(user_id=n[0], node_id=n[1]): n[2] for n in LearnerProgressNotification.objects.filter( 48 classroom_id=classroom.id, 49 notification_event=NotificationEventType.Help, 50 lesson_id__in=[lesson["id"] for lesson in lesson_data], 51 ).values_list("user_id", "contentnode_id", "timestamp") 52 } 53 54 # In case a previously flagged learner has since completed an exercise, check all the completed 55 # notifications also 56 completed = { 57 lookup_key.format(user_id=n[0], node_id=n[1]): n[2] for n in LearnerProgressNotification.objects.filter( 58 classroom_id=classroom.id, 59 notification_event=NotificationEventType.Completed, 60 lesson_id__in=[lesson["id"] for lesson in lesson_data], 61 ).values_list("user_id", "contentnode_id", "timestamp") 62 } 63 64 def get_status(log): 65 """ 66 Read the dict from a content summary log values query and return the status 67 In the case that we have found a needs help notification for the user and content node 68 in question, return that they need help, otherwise return status based on their 69 current progress. 70 """ 71 content_id = log["content_id"] 72 if content_id in content_map: 73 # Don't try to lookup anything if we don't know the content_id 74 # node_id mapping - might happen if a channel has since been deleted 75 key = lookup_key.format(user_id=log["user_id"], node_id=content_map[content_id]) 76 if key in needs_help: 77 # Now check if we have not already registered completion of the content node 78 # or if we have and the timestamp is earlier than that on the needs_help event 79 if key not in completed or completed[key] < needs_help[key]: 80 return HELP_NEEDED 81 if log["progress"] == 1: 82 return COMPLETED 83 elif log["progress"] == 0: 84 return NOT_STARTED 85 return STARTED 86 87 def map_content_logs(log): 88 """ 89 Parse the content logs to return objects in the expected format. 90 """ 91 return { 92 "learner_id": log["user_id"], 93 "content_id": log["content_id"], 94 "status": get_status(log), 95 "last_activity": log["end_timestamp"], 96 "time_spent": log["time_spent"], 97 } 98 99 return map(map_content_logs, content_log_values) 100 101 102 class ExamStatusSerializer(serializers.ModelSerializer): 103 status = serializers.SerializerMethodField() 104 exam_id = serializers.PrimaryKeyRelatedField(source="exam", read_only=True) 105 learner_id = serializers.PrimaryKeyRelatedField(source="user", read_only=True) 106 last_activity = serializers.CharField() 107 num_correct = serializers.SerializerMethodField() 108 109 def get_status(self, exam_log): 110 if exam_log.closed: 111 return COMPLETED 112 else: 113 return STARTED 114 115 def get_num_correct(self, exam_log): 116 return ( 117 exam_log.attemptlogs.values_list('item') 118 .order_by('completion_timestamp') 119 .distinct() 120 .aggregate(Sum('correct')) 121 .get('correct__sum') 122 ) 123 124 class Meta: 125 model = logger_models.ExamLog 126 fields = ("exam_id", "learner_id", "status", "last_activity", "num_correct") 127 128 129 class GroupSerializer(serializers.ModelSerializer): 130 member_ids = serializers.SerializerMethodField() 131 132 def get_member_ids(self, group): 133 return group.get_members().values_list("id", flat=True) 134 135 class Meta: 136 model = auth_models.LearnerGroup 137 fields = ("id", "name", "member_ids") 138 139 140 class UserSerializer(serializers.ModelSerializer): 141 name = serializers.CharField(source="full_name") 142 143 class Meta: 144 model = auth_models.FacilityUser 145 fields = ("id", "name", "username") 146 147 148 class LessonNodeIdsField(serializers.Field): 149 def to_representation(self, values): 150 return [value["contentnode_id"] for value in values] 151 152 153 class LessonAssignmentsField(serializers.RelatedField): 154 def to_representation(self, assignment): 155 return assignment.collection.id 156 157 158 class LessonSerializer(serializers.ModelSerializer): 159 active = serializers.BooleanField(source="is_active") 160 node_ids = LessonNodeIdsField(default=[], source="resources") 161 162 # classrooms are in here, and filtered out later 163 groups = LessonAssignmentsField( 164 many=True, read_only=True, source="lesson_assignments" 165 ) 166 167 class Meta: 168 model = Lesson 169 fields = ("id", "title", "active", "node_ids", "groups") 170 171 172 class ExamQuestionSourcesField(serializers.Field): 173 def to_representation(self, values): 174 return values 175 176 177 class ExamAssignmentsField(serializers.RelatedField): 178 def to_representation(self, assignment): 179 return assignment.collection.id 180 181 182 class ExamSerializer(serializers.ModelSerializer): 183 184 question_sources = ExamQuestionSourcesField(default=[]) 185 186 # classes are in here, and filtered out later 187 groups = ExamAssignmentsField(many=True, read_only=True, source="assignments") 188 189 class Meta: 190 model = Exam 191 fields = ("id", "title", "active", "question_sources", "groups", "data_model_version") 192 193 194 class ContentSerializer(serializers.ModelSerializer): 195 node_id = serializers.CharField(source="id") 196 197 class Meta: 198 model = ContentNode 199 fields = ("node_id", "content_id", "title", "kind") 200 201 202 def data(Serializer, queryset): 203 return Serializer(queryset, many=True).data 204 205 206 class ClassSummaryViewSet(viewsets.ViewSet): 207 def retrieve(self, request, pk): 208 classroom = get_object_or_404(auth_models.Classroom, id=pk) 209 query_learners = classroom.get_members() 210 query_lesson = Lesson.objects.filter(collection=pk) 211 query_exams = Exam.objects.filter(collection=pk) 212 query_exam_logs = logger_models.ExamLog.objects.filter( 213 exam__in=query_exams 214 ).annotate(last_activity=Max("attemptlogs__end_timestamp")) 215 216 lesson_data = data(LessonSerializer, query_lesson) 217 exam_data = data(ExamSerializer, query_exams) 218 219 # filter classes out of exam assignments 220 for exam in exam_data: 221 exam["groups"] = [g for g in exam["groups"] if g != pk] 222 223 # filter classes out of lesson assignments 224 for lesson in lesson_data: 225 lesson["groups"] = [g for g in lesson["groups"] if g != pk] 226 227 all_node_ids = set() 228 for lesson in lesson_data: 229 all_node_ids |= set(lesson.get("node_ids")) 230 for exam in exam_data: 231 exam_node_ids = [question['exercise_id'] for question in exam.get("question_sources")] 232 all_node_ids |= set(exam_node_ids) 233 234 query_content = ContentNode.objects.filter(id__in=all_node_ids) 235 236 learners_data = data(UserSerializer, query_learners) 237 238 output = { 239 "id": pk, 240 "name": classroom.name, 241 "coaches": data(UserSerializer, classroom.get_coaches()), 242 "learners": learners_data, 243 "groups": data(GroupSerializer, classroom.get_learner_groups()), 244 "exams": exam_data, 245 "exam_learner_status": data(ExamStatusSerializer, query_exam_logs), 246 "content": data(ContentSerializer, query_content), 247 "content_learner_status": content_status_serializer(lesson_data, learners_data, classroom), 248 "lessons": lesson_data, 249 } 250 251 return Response(output) 252 [end of kolibri/plugins/coach/class_summary_api.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kolibri/plugins/coach/class_summary_api.py b/kolibri/plugins/coach/class_summary_api.py --- a/kolibri/plugins/coach/class_summary_api.py +++ b/kolibri/plugins/coach/class_summary_api.py @@ -188,7 +188,7 @@ class Meta: model = Exam - fields = ("id", "title", "active", "question_sources", "groups", "data_model_version") + fields = ("id", "title", "active", "question_sources", "groups", "data_model_version", "question_count") class ContentSerializer(serializers.ModelSerializer):
{"golden_diff": "diff --git a/kolibri/plugins/coach/class_summary_api.py b/kolibri/plugins/coach/class_summary_api.py\n--- a/kolibri/plugins/coach/class_summary_api.py\n+++ b/kolibri/plugins/coach/class_summary_api.py\n@@ -188,7 +188,7 @@\n \n class Meta:\n model = Exam\n- fields = (\"id\", \"title\", \"active\", \"question_sources\", \"groups\", \"data_model_version\")\n+ fields = (\"id\", \"title\", \"active\", \"question_sources\", \"groups\", \"data_model_version\", \"question_count\")\n \n \n class ContentSerializer(serializers.ModelSerializer):\n", "issue": "users should not be able to get 1000% on an exam, unfortunately\n\r\n### Observed behavior\r\n\r\nreported by @jtamiace re: @radinamatic's apparent good luck:\r\n\r\n![image](https://user-images.githubusercontent.com/2367265/52095361-812c2280-2577-11e9-86d6-8f6cff4e0f5b.png)\r\n\r\n### Expected behavior\r\n\r\nexams are scored between 0 and 100\r\n\r\n### User-facing consequences\r\n\r\n????\r\n\r\n### Errors and logs\r\n\r\nunknown\r\n\r\n### Steps to reproduce\r\n\r\nsee http://kolibribeta.learningequality.org/coach/#/fa4cbfeda32c0c0fbf1832fc1ddd10c3/reports/learners\r\n\r\n### Context\r\n\r\nk 0.12.0 alpha 7\r\n\n", "before_files": [{"content": "from django.db.models import Max\nfrom django.db.models import Sum\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import serializers\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\n\nfrom kolibri.core.auth import models as auth_models\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.exams.models import Exam\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.logger import models as logger_models\nfrom kolibri.core.notifications.models import LearnerProgressNotification\nfrom kolibri.core.notifications.models import NotificationEventType\n\n\n# Intended to match NotificationEventType\nNOT_STARTED = \"NotStarted\"\nSTARTED = \"Started\"\nHELP_NEEDED = \"HelpNeeded\"\nCOMPLETED = \"Completed\"\n\n\ndef content_status_serializer(lesson_data, learners_data, classroom):\n\n # First generate a unique set of content node ids from all the lessons\n lesson_node_ids = set()\n for lesson in lesson_data:\n lesson_node_ids |= set(lesson.get(\"node_ids\"))\n\n # Now create a map of content_id to node_id so that we can map between lessons, and notifications\n # which use the node id, and summary logs, which use content_id\n content_map = {n[0]: n[1] for n in ContentNode.objects.filter(id__in=lesson_node_ids).values_list(\"content_id\", \"id\")}\n\n # Get all the values we need from the summary logs to be able to summarize current status on the\n # relevant content items.\n content_log_values = logger_models.ContentSummaryLog.objects.filter(\n content_id__in=set(content_map.keys()), user__in=[learner[\"id\"] for learner in learners_data]\n ).values(\"user_id\", \"content_id\", \"end_timestamp\", \"time_spent\", \"progress\")\n\n # In order to make the lookup speedy, generate a unique key for each user/node that we find\n # listed in the needs help notifications that are relevant. We can then just check\n # existence of this key in the set in order to see whether this user has been flagged as needing\n # help.\n lookup_key = \"{user_id}-{node_id}\"\n needs_help = {\n lookup_key.format(user_id=n[0], node_id=n[1]): n[2] for n in LearnerProgressNotification.objects.filter(\n classroom_id=classroom.id,\n notification_event=NotificationEventType.Help,\n lesson_id__in=[lesson[\"id\"] for lesson in lesson_data],\n ).values_list(\"user_id\", \"contentnode_id\", \"timestamp\")\n }\n\n # In case a previously flagged learner has since completed an exercise, check all the completed\n # notifications also\n completed = {\n lookup_key.format(user_id=n[0], node_id=n[1]): n[2] for n in LearnerProgressNotification.objects.filter(\n classroom_id=classroom.id,\n notification_event=NotificationEventType.Completed,\n lesson_id__in=[lesson[\"id\"] for lesson in lesson_data],\n ).values_list(\"user_id\", \"contentnode_id\", \"timestamp\")\n }\n\n def get_status(log):\n \"\"\"\n Read the dict from a content summary log values query and return the status\n In the case that we have found a needs help notification for the user and content node\n in question, return that they need help, otherwise return status based on their\n current progress.\n \"\"\"\n content_id = log[\"content_id\"]\n if content_id in content_map:\n # Don't try to lookup anything if we don't know the content_id\n # node_id mapping - might happen if a channel has since been deleted\n key = lookup_key.format(user_id=log[\"user_id\"], node_id=content_map[content_id])\n if key in needs_help:\n # Now check if we have not already registered completion of the content node\n # or if we have and the timestamp is earlier than that on the needs_help event\n if key not in completed or completed[key] < needs_help[key]:\n return HELP_NEEDED\n if log[\"progress\"] == 1:\n return COMPLETED\n elif log[\"progress\"] == 0:\n return NOT_STARTED\n return STARTED\n\n def map_content_logs(log):\n \"\"\"\n Parse the content logs to return objects in the expected format.\n \"\"\"\n return {\n \"learner_id\": log[\"user_id\"],\n \"content_id\": log[\"content_id\"],\n \"status\": get_status(log),\n \"last_activity\": log[\"end_timestamp\"],\n \"time_spent\": log[\"time_spent\"],\n }\n\n return map(map_content_logs, content_log_values)\n\n\nclass ExamStatusSerializer(serializers.ModelSerializer):\n status = serializers.SerializerMethodField()\n exam_id = serializers.PrimaryKeyRelatedField(source=\"exam\", read_only=True)\n learner_id = serializers.PrimaryKeyRelatedField(source=\"user\", read_only=True)\n last_activity = serializers.CharField()\n num_correct = serializers.SerializerMethodField()\n\n def get_status(self, exam_log):\n if exam_log.closed:\n return COMPLETED\n else:\n return STARTED\n\n def get_num_correct(self, exam_log):\n return (\n exam_log.attemptlogs.values_list('item')\n .order_by('completion_timestamp')\n .distinct()\n .aggregate(Sum('correct'))\n .get('correct__sum')\n )\n\n class Meta:\n model = logger_models.ExamLog\n fields = (\"exam_id\", \"learner_id\", \"status\", \"last_activity\", \"num_correct\")\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n member_ids = serializers.SerializerMethodField()\n\n def get_member_ids(self, group):\n return group.get_members().values_list(\"id\", flat=True)\n\n class Meta:\n model = auth_models.LearnerGroup\n fields = (\"id\", \"name\", \"member_ids\")\n\n\nclass UserSerializer(serializers.ModelSerializer):\n name = serializers.CharField(source=\"full_name\")\n\n class Meta:\n model = auth_models.FacilityUser\n fields = (\"id\", \"name\", \"username\")\n\n\nclass LessonNodeIdsField(serializers.Field):\n def to_representation(self, values):\n return [value[\"contentnode_id\"] for value in values]\n\n\nclass LessonAssignmentsField(serializers.RelatedField):\n def to_representation(self, assignment):\n return assignment.collection.id\n\n\nclass LessonSerializer(serializers.ModelSerializer):\n active = serializers.BooleanField(source=\"is_active\")\n node_ids = LessonNodeIdsField(default=[], source=\"resources\")\n\n # classrooms are in here, and filtered out later\n groups = LessonAssignmentsField(\n many=True, read_only=True, source=\"lesson_assignments\"\n )\n\n class Meta:\n model = Lesson\n fields = (\"id\", \"title\", \"active\", \"node_ids\", \"groups\")\n\n\nclass ExamQuestionSourcesField(serializers.Field):\n def to_representation(self, values):\n return values\n\n\nclass ExamAssignmentsField(serializers.RelatedField):\n def to_representation(self, assignment):\n return assignment.collection.id\n\n\nclass ExamSerializer(serializers.ModelSerializer):\n\n question_sources = ExamQuestionSourcesField(default=[])\n\n # classes are in here, and filtered out later\n groups = ExamAssignmentsField(many=True, read_only=True, source=\"assignments\")\n\n class Meta:\n model = Exam\n fields = (\"id\", \"title\", \"active\", \"question_sources\", \"groups\", \"data_model_version\")\n\n\nclass ContentSerializer(serializers.ModelSerializer):\n node_id = serializers.CharField(source=\"id\")\n\n class Meta:\n model = ContentNode\n fields = (\"node_id\", \"content_id\", \"title\", \"kind\")\n\n\ndef data(Serializer, queryset):\n return Serializer(queryset, many=True).data\n\n\nclass ClassSummaryViewSet(viewsets.ViewSet):\n def retrieve(self, request, pk):\n classroom = get_object_or_404(auth_models.Classroom, id=pk)\n query_learners = classroom.get_members()\n query_lesson = Lesson.objects.filter(collection=pk)\n query_exams = Exam.objects.filter(collection=pk)\n query_exam_logs = logger_models.ExamLog.objects.filter(\n exam__in=query_exams\n ).annotate(last_activity=Max(\"attemptlogs__end_timestamp\"))\n\n lesson_data = data(LessonSerializer, query_lesson)\n exam_data = data(ExamSerializer, query_exams)\n\n # filter classes out of exam assignments\n for exam in exam_data:\n exam[\"groups\"] = [g for g in exam[\"groups\"] if g != pk]\n\n # filter classes out of lesson assignments\n for lesson in lesson_data:\n lesson[\"groups\"] = [g for g in lesson[\"groups\"] if g != pk]\n\n all_node_ids = set()\n for lesson in lesson_data:\n all_node_ids |= set(lesson.get(\"node_ids\"))\n for exam in exam_data:\n exam_node_ids = [question['exercise_id'] for question in exam.get(\"question_sources\")]\n all_node_ids |= set(exam_node_ids)\n\n query_content = ContentNode.objects.filter(id__in=all_node_ids)\n\n learners_data = data(UserSerializer, query_learners)\n\n output = {\n \"id\": pk,\n \"name\": classroom.name,\n \"coaches\": data(UserSerializer, classroom.get_coaches()),\n \"learners\": learners_data,\n \"groups\": data(GroupSerializer, classroom.get_learner_groups()),\n \"exams\": exam_data,\n \"exam_learner_status\": data(ExamStatusSerializer, query_exam_logs),\n \"content\": data(ContentSerializer, query_content),\n \"content_learner_status\": content_status_serializer(lesson_data, learners_data, classroom),\n \"lessons\": lesson_data,\n }\n\n return Response(output)\n", "path": "kolibri/plugins/coach/class_summary_api.py"}]}
3,460
137
gh_patches_debug_22672
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-2252
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Half precision doesn't work with top_k > 1 ## 🐛 Bug When using predictions at half-precision, MulticlassAccuracy throws an error when using top_k > 1. Please see the code below: ### To Reproduce Steps to reproduce the behavior... ```py import torch from torchmetrics.classification import MulticlassAccuracy preds = torch.tensor([[1, 0, 0]]).half() targets = torch.tensor([1]) metric = MulticlassAccuracy(num_classes=3, top_k=2) metric(preds, targets) metric.compute() ``` ### Environment - TorchMetrics version: 1.0.1 - Python & PyTorch Version (e.g., 1.0): 3.10 and 2.0.1 </issue> <code> [start of src/torchmetrics/utilities/data.py] 1 # Copyright The Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import sys 15 from typing import Any, Dict, List, Optional, Sequence, Tuple, Union 16 17 import torch 18 from lightning_utilities import apply_to_collection 19 from torch import Tensor 20 21 from torchmetrics.utilities.exceptions import TorchMetricsUserWarning 22 from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_12, _XLA_AVAILABLE 23 from torchmetrics.utilities.prints import rank_zero_warn 24 25 METRIC_EPS = 1e-6 26 27 28 def dim_zero_cat(x: Union[Tensor, List[Tensor]]) -> Tensor: 29 """Concatenation along the zero dimension.""" 30 if isinstance(x, torch.Tensor): 31 return x 32 x = [y.unsqueeze(0) if y.numel() == 1 and y.ndim == 0 else y for y in x] 33 if not x: # empty list 34 raise ValueError("No samples to concatenate") 35 return torch.cat(x, dim=0) 36 37 38 def dim_zero_sum(x: Tensor) -> Tensor: 39 """Summation along the zero dimension.""" 40 return torch.sum(x, dim=0) 41 42 43 def dim_zero_mean(x: Tensor) -> Tensor: 44 """Average along the zero dimension.""" 45 return torch.mean(x, dim=0) 46 47 48 def dim_zero_max(x: Tensor) -> Tensor: 49 """Max along the zero dimension.""" 50 return torch.max(x, dim=0).values 51 52 53 def dim_zero_min(x: Tensor) -> Tensor: 54 """Min along the zero dimension.""" 55 return torch.min(x, dim=0).values 56 57 58 def _flatten(x: Sequence) -> list: 59 """Flatten list of list into single list.""" 60 return [item for sublist in x for item in sublist] 61 62 63 def _flatten_dict(x: Dict) -> Tuple[Dict, bool]: 64 """Flatten dict of dicts into single dict and checking for duplicates in keys along the way.""" 65 new_dict = {} 66 duplicates = False 67 for key, value in x.items(): 68 if isinstance(value, dict): 69 for k, v in value.items(): 70 if k in new_dict: 71 duplicates = True 72 new_dict[k] = v 73 else: 74 if key in new_dict: 75 duplicates = True 76 new_dict[key] = value 77 return new_dict, duplicates 78 79 80 def to_onehot( 81 label_tensor: Tensor, 82 num_classes: Optional[int] = None, 83 ) -> Tensor: 84 """Convert a dense label tensor to one-hot format. 85 86 Args: 87 label_tensor: dense label tensor, with shape [N, d1, d2, ...] 88 num_classes: number of classes C 89 90 Returns: 91 A sparse label tensor with shape [N, C, d1, d2, ...] 92 93 Example: 94 >>> x = torch.tensor([1, 2, 3]) 95 >>> to_onehot(x) 96 tensor([[0, 1, 0, 0], 97 [0, 0, 1, 0], 98 [0, 0, 0, 1]]) 99 100 """ 101 if num_classes is None: 102 num_classes = int(label_tensor.max().detach().item() + 1) 103 104 tensor_onehot = torch.zeros( 105 label_tensor.shape[0], 106 num_classes, 107 *label_tensor.shape[1:], 108 dtype=label_tensor.dtype, 109 device=label_tensor.device, 110 ) 111 index = label_tensor.long().unsqueeze(1).expand_as(tensor_onehot) 112 return tensor_onehot.scatter_(1, index, 1.0) 113 114 115 def select_topk(prob_tensor: Tensor, topk: int = 1, dim: int = 1) -> Tensor: 116 """Convert a probability tensor to binary by selecting top-k the highest entries. 117 118 Args: 119 prob_tensor: dense tensor of shape ``[..., C, ...]``, where ``C`` is in the 120 position defined by the ``dim`` argument 121 topk: number of the highest entries to turn into 1s 122 dim: dimension on which to compare entries 123 124 Returns: 125 A binary tensor of the same shape as the input tensor of type ``torch.int32`` 126 127 Example: 128 >>> x = torch.tensor([[1.1, 2.0, 3.0], [2.0, 1.0, 0.5]]) 129 >>> select_topk(x, topk=2) 130 tensor([[0, 1, 1], 131 [1, 1, 0]], dtype=torch.int32) 132 133 """ 134 zeros = torch.zeros_like(prob_tensor) 135 if topk == 1: # argmax has better performance than topk 136 topk_tensor = zeros.scatter(dim, prob_tensor.argmax(dim=dim, keepdim=True), 1.0) 137 else: 138 topk_tensor = zeros.scatter(dim, prob_tensor.topk(k=topk, dim=dim).indices, 1.0) 139 return topk_tensor.int() 140 141 142 def to_categorical(x: Tensor, argmax_dim: int = 1) -> Tensor: 143 """Convert a tensor of probabilities to a dense label tensor. 144 145 Args: 146 x: probabilities to get the categorical label [N, d1, d2, ...] 147 argmax_dim: dimension to apply 148 149 Return: 150 A tensor with categorical labels [N, d2, ...] 151 152 Example: 153 >>> x = torch.tensor([[0.2, 0.5], [0.9, 0.1]]) 154 >>> to_categorical(x) 155 tensor([1, 0]) 156 157 """ 158 return torch.argmax(x, dim=argmax_dim) 159 160 161 def _squeeze_scalar_element_tensor(x: Tensor) -> Tensor: 162 return x.squeeze() if x.numel() == 1 else x 163 164 165 def _squeeze_if_scalar(data: Any) -> Any: 166 return apply_to_collection(data, Tensor, _squeeze_scalar_element_tensor) 167 168 169 def _bincount(x: Tensor, minlength: Optional[int] = None) -> Tensor: 170 """Implement custom bincount. 171 172 PyTorch currently does not support ``torch.bincount`` when running in deterministic mode on GPU or when running 173 MPS devices or when running on XLA device. This implementation therefore falls back to using a combination of 174 `torch.arange` and `torch.eq` in these scenarios. A small performance hit can expected and higher memory consumption 175 as `[batch_size, mincount]` tensor needs to be initialized compared to native ``torch.bincount``. 176 177 Args: 178 x: tensor to count 179 minlength: minimum length to count 180 181 Returns: 182 Number of occurrences for each unique element in x 183 184 Example: 185 >>> x = torch.tensor([0,0,0,1,1,2,2,2,2]) 186 >>> _bincount(x, minlength=3) 187 tensor([3, 2, 4]) 188 189 """ 190 if minlength is None: 191 minlength = len(torch.unique(x)) 192 193 if torch.are_deterministic_algorithms_enabled() or _XLA_AVAILABLE or _TORCH_GREATER_EQUAL_1_12 and x.is_mps: 194 mesh = torch.arange(minlength, device=x.device).repeat(len(x), 1) 195 return torch.eq(x.reshape(-1, 1), mesh).sum(dim=0) 196 197 return torch.bincount(x, minlength=minlength) 198 199 200 def _cumsum(x: Tensor, dim: Optional[int] = 0, dtype: Optional[torch.dtype] = None) -> Tensor: 201 if torch.are_deterministic_algorithms_enabled() and x.is_cuda and x.is_floating_point() and sys.platform != "win32": 202 rank_zero_warn( 203 "You are trying to use a metric in deterministic mode on GPU that uses `torch.cumsum`, which is currently " 204 "not supported. The tensor will be copied to the CPU memory to compute it and then copied back to GPU. " 205 "Expect some slowdowns.", 206 TorchMetricsUserWarning, 207 ) 208 return x.cpu().cumsum(dim=dim, dtype=dtype).cuda() 209 return torch.cumsum(x, dim=dim, dtype=dtype) 210 211 212 def _flexible_bincount(x: Tensor) -> Tensor: 213 """Similar to `_bincount`, but works also with tensor that do not contain continuous values. 214 215 Args: 216 x: tensor to count 217 218 Returns: 219 Number of occurrences for each unique element in x 220 221 """ 222 # make sure elements in x start from 0 223 x = x - x.min() 224 unique_x = torch.unique(x) 225 226 output = _bincount(x, minlength=torch.max(unique_x) + 1) # type: ignore[arg-type] 227 # remove zeros from output tensor 228 return output[unique_x] 229 230 231 def allclose(tensor1: Tensor, tensor2: Tensor) -> bool: 232 """Wrap torch.allclose to be robust towards dtype difference.""" 233 if tensor1.dtype != tensor2.dtype: 234 tensor2 = tensor2.to(dtype=tensor1.dtype) 235 return torch.allclose(tensor1, tensor2) 236 [end of src/torchmetrics/utilities/data.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/torchmetrics/utilities/data.py b/src/torchmetrics/utilities/data.py --- a/src/torchmetrics/utilities/data.py +++ b/src/torchmetrics/utilities/data.py @@ -112,6 +112,14 @@ return tensor_onehot.scatter_(1, index, 1.0) +def _top_k_with_half_precision_support(x: Tensor, k: int = 1, dim: int = 1) -> Tensor: + """torch.top_k does not support half precision on CPU.""" + if x.dtype == torch.half and not x.is_cuda: + idx = torch.argsort(x, dim=dim, descending=True) + return idx.narrow(dim, 0, k) + return x.topk(k=k, dim=dim).indices + + def select_topk(prob_tensor: Tensor, topk: int = 1, dim: int = 1) -> Tensor: """Convert a probability tensor to binary by selecting top-k the highest entries. @@ -135,7 +143,7 @@ if topk == 1: # argmax has better performance than topk topk_tensor = zeros.scatter(dim, prob_tensor.argmax(dim=dim, keepdim=True), 1.0) else: - topk_tensor = zeros.scatter(dim, prob_tensor.topk(k=topk, dim=dim).indices, 1.0) + topk_tensor = zeros.scatter(dim, _top_k_with_half_precision_support(prob_tensor, k=topk, dim=dim), 1.0) return topk_tensor.int()
{"golden_diff": "diff --git a/src/torchmetrics/utilities/data.py b/src/torchmetrics/utilities/data.py\n--- a/src/torchmetrics/utilities/data.py\n+++ b/src/torchmetrics/utilities/data.py\n@@ -112,6 +112,14 @@\n return tensor_onehot.scatter_(1, index, 1.0)\n \n \n+def _top_k_with_half_precision_support(x: Tensor, k: int = 1, dim: int = 1) -> Tensor:\n+ \"\"\"torch.top_k does not support half precision on CPU.\"\"\"\n+ if x.dtype == torch.half and not x.is_cuda:\n+ idx = torch.argsort(x, dim=dim, descending=True)\n+ return idx.narrow(dim, 0, k)\n+ return x.topk(k=k, dim=dim).indices\n+\n+\n def select_topk(prob_tensor: Tensor, topk: int = 1, dim: int = 1) -> Tensor:\n \"\"\"Convert a probability tensor to binary by selecting top-k the highest entries.\n \n@@ -135,7 +143,7 @@\n if topk == 1: # argmax has better performance than topk\n topk_tensor = zeros.scatter(dim, prob_tensor.argmax(dim=dim, keepdim=True), 1.0)\n else:\n- topk_tensor = zeros.scatter(dim, prob_tensor.topk(k=topk, dim=dim).indices, 1.0)\n+ topk_tensor = zeros.scatter(dim, _top_k_with_half_precision_support(prob_tensor, k=topk, dim=dim), 1.0)\n return topk_tensor.int()\n", "issue": "Half precision doesn't work with top_k > 1\n## \ud83d\udc1b Bug\r\n\r\nWhen using predictions at half-precision, MulticlassAccuracy throws an error when using top_k > 1. Please see the code below:\r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior...\r\n\r\n```py\r\nimport torch\r\nfrom torchmetrics.classification import MulticlassAccuracy\r\n\r\npreds = torch.tensor([[1, 0, 0]]).half()\r\ntargets = torch.tensor([1])\r\n\r\nmetric = MulticlassAccuracy(num_classes=3, top_k=2)\r\nmetric(preds, targets)\r\nmetric.compute()\r\n```\r\n\r\n\r\n### Environment\r\n\r\n- TorchMetrics version: 1.0.1\r\n- Python & PyTorch Version (e.g., 1.0): 3.10 and 2.0.1\r\n\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nfrom typing import Any, Dict, List, Optional, Sequence, Tuple, Union\n\nimport torch\nfrom lightning_utilities import apply_to_collection\nfrom torch import Tensor\n\nfrom torchmetrics.utilities.exceptions import TorchMetricsUserWarning\nfrom torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_12, _XLA_AVAILABLE\nfrom torchmetrics.utilities.prints import rank_zero_warn\n\nMETRIC_EPS = 1e-6\n\n\ndef dim_zero_cat(x: Union[Tensor, List[Tensor]]) -> Tensor:\n \"\"\"Concatenation along the zero dimension.\"\"\"\n if isinstance(x, torch.Tensor):\n return x\n x = [y.unsqueeze(0) if y.numel() == 1 and y.ndim == 0 else y for y in x]\n if not x: # empty list\n raise ValueError(\"No samples to concatenate\")\n return torch.cat(x, dim=0)\n\n\ndef dim_zero_sum(x: Tensor) -> Tensor:\n \"\"\"Summation along the zero dimension.\"\"\"\n return torch.sum(x, dim=0)\n\n\ndef dim_zero_mean(x: Tensor) -> Tensor:\n \"\"\"Average along the zero dimension.\"\"\"\n return torch.mean(x, dim=0)\n\n\ndef dim_zero_max(x: Tensor) -> Tensor:\n \"\"\"Max along the zero dimension.\"\"\"\n return torch.max(x, dim=0).values\n\n\ndef dim_zero_min(x: Tensor) -> Tensor:\n \"\"\"Min along the zero dimension.\"\"\"\n return torch.min(x, dim=0).values\n\n\ndef _flatten(x: Sequence) -> list:\n \"\"\"Flatten list of list into single list.\"\"\"\n return [item for sublist in x for item in sublist]\n\n\ndef _flatten_dict(x: Dict) -> Tuple[Dict, bool]:\n \"\"\"Flatten dict of dicts into single dict and checking for duplicates in keys along the way.\"\"\"\n new_dict = {}\n duplicates = False\n for key, value in x.items():\n if isinstance(value, dict):\n for k, v in value.items():\n if k in new_dict:\n duplicates = True\n new_dict[k] = v\n else:\n if key in new_dict:\n duplicates = True\n new_dict[key] = value\n return new_dict, duplicates\n\n\ndef to_onehot(\n label_tensor: Tensor,\n num_classes: Optional[int] = None,\n) -> Tensor:\n \"\"\"Convert a dense label tensor to one-hot format.\n\n Args:\n label_tensor: dense label tensor, with shape [N, d1, d2, ...]\n num_classes: number of classes C\n\n Returns:\n A sparse label tensor with shape [N, C, d1, d2, ...]\n\n Example:\n >>> x = torch.tensor([1, 2, 3])\n >>> to_onehot(x)\n tensor([[0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n \"\"\"\n if num_classes is None:\n num_classes = int(label_tensor.max().detach().item() + 1)\n\n tensor_onehot = torch.zeros(\n label_tensor.shape[0],\n num_classes,\n *label_tensor.shape[1:],\n dtype=label_tensor.dtype,\n device=label_tensor.device,\n )\n index = label_tensor.long().unsqueeze(1).expand_as(tensor_onehot)\n return tensor_onehot.scatter_(1, index, 1.0)\n\n\ndef select_topk(prob_tensor: Tensor, topk: int = 1, dim: int = 1) -> Tensor:\n \"\"\"Convert a probability tensor to binary by selecting top-k the highest entries.\n\n Args:\n prob_tensor: dense tensor of shape ``[..., C, ...]``, where ``C`` is in the\n position defined by the ``dim`` argument\n topk: number of the highest entries to turn into 1s\n dim: dimension on which to compare entries\n\n Returns:\n A binary tensor of the same shape as the input tensor of type ``torch.int32``\n\n Example:\n >>> x = torch.tensor([[1.1, 2.0, 3.0], [2.0, 1.0, 0.5]])\n >>> select_topk(x, topk=2)\n tensor([[0, 1, 1],\n [1, 1, 0]], dtype=torch.int32)\n\n \"\"\"\n zeros = torch.zeros_like(prob_tensor)\n if topk == 1: # argmax has better performance than topk\n topk_tensor = zeros.scatter(dim, prob_tensor.argmax(dim=dim, keepdim=True), 1.0)\n else:\n topk_tensor = zeros.scatter(dim, prob_tensor.topk(k=topk, dim=dim).indices, 1.0)\n return topk_tensor.int()\n\n\ndef to_categorical(x: Tensor, argmax_dim: int = 1) -> Tensor:\n \"\"\"Convert a tensor of probabilities to a dense label tensor.\n\n Args:\n x: probabilities to get the categorical label [N, d1, d2, ...]\n argmax_dim: dimension to apply\n\n Return:\n A tensor with categorical labels [N, d2, ...]\n\n Example:\n >>> x = torch.tensor([[0.2, 0.5], [0.9, 0.1]])\n >>> to_categorical(x)\n tensor([1, 0])\n\n \"\"\"\n return torch.argmax(x, dim=argmax_dim)\n\n\ndef _squeeze_scalar_element_tensor(x: Tensor) -> Tensor:\n return x.squeeze() if x.numel() == 1 else x\n\n\ndef _squeeze_if_scalar(data: Any) -> Any:\n return apply_to_collection(data, Tensor, _squeeze_scalar_element_tensor)\n\n\ndef _bincount(x: Tensor, minlength: Optional[int] = None) -> Tensor:\n \"\"\"Implement custom bincount.\n\n PyTorch currently does not support ``torch.bincount`` when running in deterministic mode on GPU or when running\n MPS devices or when running on XLA device. This implementation therefore falls back to using a combination of\n `torch.arange` and `torch.eq` in these scenarios. A small performance hit can expected and higher memory consumption\n as `[batch_size, mincount]` tensor needs to be initialized compared to native ``torch.bincount``.\n\n Args:\n x: tensor to count\n minlength: minimum length to count\n\n Returns:\n Number of occurrences for each unique element in x\n\n Example:\n >>> x = torch.tensor([0,0,0,1,1,2,2,2,2])\n >>> _bincount(x, minlength=3)\n tensor([3, 2, 4])\n\n \"\"\"\n if minlength is None:\n minlength = len(torch.unique(x))\n\n if torch.are_deterministic_algorithms_enabled() or _XLA_AVAILABLE or _TORCH_GREATER_EQUAL_1_12 and x.is_mps:\n mesh = torch.arange(minlength, device=x.device).repeat(len(x), 1)\n return torch.eq(x.reshape(-1, 1), mesh).sum(dim=0)\n\n return torch.bincount(x, minlength=minlength)\n\n\ndef _cumsum(x: Tensor, dim: Optional[int] = 0, dtype: Optional[torch.dtype] = None) -> Tensor:\n if torch.are_deterministic_algorithms_enabled() and x.is_cuda and x.is_floating_point() and sys.platform != \"win32\":\n rank_zero_warn(\n \"You are trying to use a metric in deterministic mode on GPU that uses `torch.cumsum`, which is currently \"\n \"not supported. The tensor will be copied to the CPU memory to compute it and then copied back to GPU. \"\n \"Expect some slowdowns.\",\n TorchMetricsUserWarning,\n )\n return x.cpu().cumsum(dim=dim, dtype=dtype).cuda()\n return torch.cumsum(x, dim=dim, dtype=dtype)\n\n\ndef _flexible_bincount(x: Tensor) -> Tensor:\n \"\"\"Similar to `_bincount`, but works also with tensor that do not contain continuous values.\n\n Args:\n x: tensor to count\n\n Returns:\n Number of occurrences for each unique element in x\n\n \"\"\"\n # make sure elements in x start from 0\n x = x - x.min()\n unique_x = torch.unique(x)\n\n output = _bincount(x, minlength=torch.max(unique_x) + 1) # type: ignore[arg-type]\n # remove zeros from output tensor\n return output[unique_x]\n\n\ndef allclose(tensor1: Tensor, tensor2: Tensor) -> bool:\n \"\"\"Wrap torch.allclose to be robust towards dtype difference.\"\"\"\n if tensor1.dtype != tensor2.dtype:\n tensor2 = tensor2.to(dtype=tensor1.dtype)\n return torch.allclose(tensor1, tensor2)\n", "path": "src/torchmetrics/utilities/data.py"}]}
3,372
354
gh_patches_debug_8032
rasdani/github-patches
git_diff
elastic__ecs-1115
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Bug][ECS 1.7] Strip `index` attributes on `wildcard` mappings **Description of the issue:** The 1.7 experimental `wildcard` mappings need to have the `index` attribute stripped from them. The one that was causing problems for me is here: https://github.com/elastic/ecs/blob/3edb6f2cf2c657f88875011625cd1801709c5d5b/experimental/generated/beats/fields.ecs.yml#L1154-L1163 Here's the error I'm getting from Elasticsearch: ``` Failed to parse mapping: unknown parameter [index] on mapper [stack_trace] of type [wildcard] ``` </issue> <code> [start of scripts/schema/cleaner.py] 1 import copy 2 3 from generators import ecs_helpers 4 from schema import visitor 5 6 # This script performs a few cleanup functions in place, within the deeply nested 7 # 'fields' structure passed to `clean(fields)`. 8 # 9 # What happens here: 10 # 11 # - check that mandatory attributes are present, without which we can't do much. 12 # - cleans things up, like stripping spaces, sorting arrays 13 # - makes lots of defaults explicit 14 # - pre-calculate a few additional helpful fields 15 # - converts shorthands into full representation (e.g. reuse locations) 16 # 17 # This script only deals with field sets themselves and the fields defined 18 # inside them. It doesn't perform field reuse, and therefore doesn't 19 # deal with final field names either. 20 21 22 def clean(fields, strict=False): 23 global strict_mode 24 strict_mode = strict 25 visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup) 26 27 28 # Schema level cleanup 29 30 31 def schema_cleanup(schema): 32 # Sanity check first 33 schema_mandatory_attributes(schema) 34 # trailing space cleanup 35 ecs_helpers.dict_clean_string_values(schema['schema_details']) 36 ecs_helpers.dict_clean_string_values(schema['field_details']) 37 # Some defaults 38 schema['schema_details'].setdefault('group', 2) 39 schema['schema_details'].setdefault('root', False) 40 schema['field_details'].setdefault('type', 'group') 41 schema['field_details'].setdefault('short', schema['field_details']['description']) 42 if 'reusable' in schema['schema_details']: 43 # order to perform chained reuses. Set to 1 if it needs to happen earlier. 44 schema['schema_details']['reusable'].setdefault('order', 2) 45 # Precalculate stuff. Those can't be set in the YAML. 46 if schema['schema_details']['root']: 47 schema['schema_details']['prefix'] = '' 48 else: 49 schema['schema_details']['prefix'] = schema['field_details']['name'] + '.' 50 normalize_reuse_notation(schema) 51 # Final validity check if in strict mode 52 schema_assertions_and_warnings(schema) 53 54 55 SCHEMA_MANDATORY_ATTRIBUTES = ['name', 'title', 'description'] 56 57 58 def schema_mandatory_attributes(schema): 59 '''Ensures for the presence of the mandatory schema attributes and raises if any are missing''' 60 current_schema_attributes = sorted(list(schema['field_details'].keys()) + 61 list(schema['schema_details'].keys())) 62 missing_attributes = ecs_helpers.list_subtract(SCHEMA_MANDATORY_ATTRIBUTES, current_schema_attributes) 63 if len(missing_attributes) > 0: 64 msg = "Schema {} is missing the following mandatory attributes: {}.\nFound these: {}".format( 65 schema['field_details']['name'], ', '.join(missing_attributes), current_schema_attributes) 66 raise ValueError(msg) 67 if 'reusable' in schema['schema_details']: 68 reuse_attributes = sorted(schema['schema_details']['reusable'].keys()) 69 missing_reuse_attributes = ecs_helpers.list_subtract(['expected', 'top_level'], reuse_attributes) 70 if len(missing_reuse_attributes) > 0: 71 msg = "Reusable schema {} is missing the following reuse attributes: {}.\nFound these: {}".format( 72 schema['field_details']['name'], ', '.join(missing_reuse_attributes), reuse_attributes) 73 raise ValueError(msg) 74 75 76 def schema_assertions_and_warnings(schema): 77 '''Additional checks on a fleshed out schema''' 78 single_line_short_description(schema, strict=strict_mode) 79 80 81 def normalize_reuse_notation(schema): 82 """ 83 Replace single word reuse shorthands from the schema YAMLs with the explicit {at: , as:} notation. 84 85 When marking "user" as reusable under "destination" with the shorthand entry 86 `- destination`, this is expanded to the complete entry 87 `- { "at": "destination", "as": "user" }`. 88 The field set is thus nested at `destination.user.*`, with fields such as `destination.user.name`. 89 90 The dictionary notation enables nesting a field set as a different name. 91 An example is nesting "process" fields to capture parent process details 92 at `process.parent.*`. 93 The dictionary notation `- { "at": "process", "as": "parent" }` will yield 94 fields such as `process.parent.pid`. 95 """ 96 if 'reusable' not in schema['schema_details']: 97 return 98 schema_name = schema['field_details']['name'] 99 reuse_entries = [] 100 for reuse_entry in schema['schema_details']['reusable']['expected']: 101 if type(reuse_entry) is dict: # Already explicit 102 if 'at' in reuse_entry and 'as' in reuse_entry: 103 explicit_entry = reuse_entry 104 else: 105 raise ValueError("When specifying reusable expected locations for {} " + 106 "with the dictionary notation, keys 'as' and 'at' are required. " + 107 "Got {}.".format(schema_name, reuse_entry)) 108 else: # Make it explicit 109 explicit_entry = {'at': reuse_entry, 'as': schema_name} 110 explicit_entry['full'] = explicit_entry['at'] + '.' + explicit_entry['as'] 111 reuse_entries.append(explicit_entry) 112 schema['schema_details']['reusable']['expected'] = reuse_entries 113 114 115 # Field level cleanup 116 117 118 def field_cleanup(field): 119 field_mandatory_attributes(field) 120 if ecs_helpers.is_intermediate(field): 121 return 122 ecs_helpers.dict_clean_string_values(field['field_details']) 123 if 'allowed_values' in field['field_details']: 124 for allowed_value in field['field_details']['allowed_values']: 125 ecs_helpers.dict_clean_string_values(allowed_value) 126 field_defaults(field) 127 field_assertions_and_warnings(field) 128 129 130 def field_defaults(field): 131 field['field_details'].setdefault('short', field['field_details']['description']) 132 field['field_details'].setdefault('normalize', []) 133 field_or_multi_field_datatype_defaults(field['field_details']) 134 if 'multi_fields' in field['field_details']: 135 for mf in field['field_details']['multi_fields']: 136 field_or_multi_field_datatype_defaults(mf) 137 if 'name' not in mf: 138 mf['name'] = mf['type'] 139 140 141 def field_or_multi_field_datatype_defaults(field_details): 142 '''Sets datatype-related defaults on a canonical field or multi-field entries.''' 143 if field_details['type'] == 'keyword': 144 field_details.setdefault('ignore_above', 1024) 145 if field_details['type'] == 'text': 146 field_details.setdefault('norms', False) 147 if 'index' in field_details and not field_details['index']: 148 field_details.setdefault('doc_values', False) 149 150 151 FIELD_MANDATORY_ATTRIBUTES = ['name', 'description', 'type', 'level'] 152 ACCEPTABLE_FIELD_LEVELS = ['core', 'extended', 'custom'] 153 154 155 def field_mandatory_attributes(field): 156 '''Ensures for the presence of the mandatory field attributes and raises if any are missing''' 157 if ecs_helpers.is_intermediate(field): 158 return 159 current_field_attributes = sorted(field['field_details'].keys()) 160 missing_attributes = ecs_helpers.list_subtract(FIELD_MANDATORY_ATTRIBUTES, current_field_attributes) 161 162 # `alias` fields require a target `path` attribute. 163 if field['field_details'].get('type') == 'alias' and 'path' not in current_field_attributes: 164 missing_attributes.append('path') 165 # `scaled_float` fields require a `scaling_factor` attribute. 166 if field['field_details'].get('type') == 'scaled_float' and 'scaling_factor' not in current_field_attributes: 167 missing_attributes.append('scaling_factor') 168 169 if len(missing_attributes) > 0: 170 msg = "Field is missing the following mandatory attributes: {}.\nFound these: {}.\nField details: {}" 171 raise ValueError(msg.format(', '.join(missing_attributes), 172 current_field_attributes, field)) 173 174 175 def field_assertions_and_warnings(field): 176 '''Additional checks on a fleshed out field''' 177 if not ecs_helpers.is_intermediate(field): 178 # check short description length if in strict mode 179 single_line_short_description(field, strict=strict_mode) 180 check_example_value(field, strict=strict_mode) 181 if field['field_details']['level'] not in ACCEPTABLE_FIELD_LEVELS: 182 msg = "Invalid level for field '{}'.\nValue: {}\nAcceptable values: {}".format( 183 field['field_details']['name'], field['field_details']['level'], 184 ACCEPTABLE_FIELD_LEVELS) 185 raise ValueError(msg) 186 187 # Common 188 189 190 SHORT_LIMIT = 120 191 192 193 def single_line_short_description(schema_or_field, strict=True): 194 short_length = len(schema_or_field['field_details']['short']) 195 if "\n" in schema_or_field['field_details']['short'] or short_length > SHORT_LIMIT: 196 msg = "Short descriptions must be single line, and under {} characters (current length: {}).\n".format( 197 SHORT_LIMIT, short_length) 198 msg += "Offending field or field set: {}\nShort description:\n {}".format( 199 schema_or_field['field_details']['name'], 200 schema_or_field['field_details']['short']) 201 if strict: 202 raise ValueError(msg) 203 else: 204 ecs_helpers.strict_warning(msg) 205 206 207 def check_example_value(field, strict=True): 208 """ 209 Checks if value of the example field is of type list or dict. 210 Fails or warns (depending on strict mode) if so. 211 """ 212 example_value = field['field_details'].get('example', None) 213 if isinstance(example_value, (list, dict)): 214 name = field['field_details']['name'] 215 msg = f"Example value for field `{name}` contains an object or array which must be quoted to avoid YAML interpretation." 216 if strict: 217 raise ValueError(msg) 218 else: 219 ecs_helpers.strict_warning(msg) 220 [end of scripts/schema/cleaner.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scripts/schema/cleaner.py b/scripts/schema/cleaner.py --- a/scripts/schema/cleaner.py +++ b/scripts/schema/cleaner.py @@ -144,6 +144,9 @@ field_details.setdefault('ignore_above', 1024) if field_details['type'] == 'text': field_details.setdefault('norms', False) + # wildcard needs the index param stripped + if field_details['type'] == 'wildcard': + field_details.pop('index', None) if 'index' in field_details and not field_details['index']: field_details.setdefault('doc_values', False)
{"golden_diff": "diff --git a/scripts/schema/cleaner.py b/scripts/schema/cleaner.py\n--- a/scripts/schema/cleaner.py\n+++ b/scripts/schema/cleaner.py\n@@ -144,6 +144,9 @@\n field_details.setdefault('ignore_above', 1024)\n if field_details['type'] == 'text':\n field_details.setdefault('norms', False)\n+ # wildcard needs the index param stripped\n+ if field_details['type'] == 'wildcard':\n+ field_details.pop('index', None)\n if 'index' in field_details and not field_details['index']:\n field_details.setdefault('doc_values', False)\n", "issue": "[Bug][ECS 1.7] Strip `index` attributes on `wildcard` mappings\n**Description of the issue:**\r\n\r\nThe 1.7 experimental `wildcard` mappings need to have the `index` attribute stripped from them. The one that was causing problems for me is here:\r\n\r\nhttps://github.com/elastic/ecs/blob/3edb6f2cf2c657f88875011625cd1801709c5d5b/experimental/generated/beats/fields.ecs.yml#L1154-L1163\r\n\r\nHere's the error I'm getting from Elasticsearch:\r\n\r\n```\r\nFailed to parse mapping: unknown parameter [index] on mapper [stack_trace] of type [wildcard]\r\n```\r\n\n", "before_files": [{"content": "import copy\n\nfrom generators import ecs_helpers\nfrom schema import visitor\n\n# This script performs a few cleanup functions in place, within the deeply nested\n# 'fields' structure passed to `clean(fields)`.\n#\n# What happens here:\n#\n# - check that mandatory attributes are present, without which we can't do much.\n# - cleans things up, like stripping spaces, sorting arrays\n# - makes lots of defaults explicit\n# - pre-calculate a few additional helpful fields\n# - converts shorthands into full representation (e.g. reuse locations)\n#\n# This script only deals with field sets themselves and the fields defined\n# inside them. It doesn't perform field reuse, and therefore doesn't\n# deal with final field names either.\n\n\ndef clean(fields, strict=False):\n global strict_mode\n strict_mode = strict\n visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup)\n\n\n# Schema level cleanup\n\n\ndef schema_cleanup(schema):\n # Sanity check first\n schema_mandatory_attributes(schema)\n # trailing space cleanup\n ecs_helpers.dict_clean_string_values(schema['schema_details'])\n ecs_helpers.dict_clean_string_values(schema['field_details'])\n # Some defaults\n schema['schema_details'].setdefault('group', 2)\n schema['schema_details'].setdefault('root', False)\n schema['field_details'].setdefault('type', 'group')\n schema['field_details'].setdefault('short', schema['field_details']['description'])\n if 'reusable' in schema['schema_details']:\n # order to perform chained reuses. Set to 1 if it needs to happen earlier.\n schema['schema_details']['reusable'].setdefault('order', 2)\n # Precalculate stuff. Those can't be set in the YAML.\n if schema['schema_details']['root']:\n schema['schema_details']['prefix'] = ''\n else:\n schema['schema_details']['prefix'] = schema['field_details']['name'] + '.'\n normalize_reuse_notation(schema)\n # Final validity check if in strict mode\n schema_assertions_and_warnings(schema)\n\n\nSCHEMA_MANDATORY_ATTRIBUTES = ['name', 'title', 'description']\n\n\ndef schema_mandatory_attributes(schema):\n '''Ensures for the presence of the mandatory schema attributes and raises if any are missing'''\n current_schema_attributes = sorted(list(schema['field_details'].keys()) +\n list(schema['schema_details'].keys()))\n missing_attributes = ecs_helpers.list_subtract(SCHEMA_MANDATORY_ATTRIBUTES, current_schema_attributes)\n if len(missing_attributes) > 0:\n msg = \"Schema {} is missing the following mandatory attributes: {}.\\nFound these: {}\".format(\n schema['field_details']['name'], ', '.join(missing_attributes), current_schema_attributes)\n raise ValueError(msg)\n if 'reusable' in schema['schema_details']:\n reuse_attributes = sorted(schema['schema_details']['reusable'].keys())\n missing_reuse_attributes = ecs_helpers.list_subtract(['expected', 'top_level'], reuse_attributes)\n if len(missing_reuse_attributes) > 0:\n msg = \"Reusable schema {} is missing the following reuse attributes: {}.\\nFound these: {}\".format(\n schema['field_details']['name'], ', '.join(missing_reuse_attributes), reuse_attributes)\n raise ValueError(msg)\n\n\ndef schema_assertions_and_warnings(schema):\n '''Additional checks on a fleshed out schema'''\n single_line_short_description(schema, strict=strict_mode)\n\n\ndef normalize_reuse_notation(schema):\n \"\"\"\n Replace single word reuse shorthands from the schema YAMLs with the explicit {at: , as:} notation.\n\n When marking \"user\" as reusable under \"destination\" with the shorthand entry\n `- destination`, this is expanded to the complete entry\n `- { \"at\": \"destination\", \"as\": \"user\" }`.\n The field set is thus nested at `destination.user.*`, with fields such as `destination.user.name`.\n\n The dictionary notation enables nesting a field set as a different name.\n An example is nesting \"process\" fields to capture parent process details\n at `process.parent.*`.\n The dictionary notation `- { \"at\": \"process\", \"as\": \"parent\" }` will yield\n fields such as `process.parent.pid`.\n \"\"\"\n if 'reusable' not in schema['schema_details']:\n return\n schema_name = schema['field_details']['name']\n reuse_entries = []\n for reuse_entry in schema['schema_details']['reusable']['expected']:\n if type(reuse_entry) is dict: # Already explicit\n if 'at' in reuse_entry and 'as' in reuse_entry:\n explicit_entry = reuse_entry\n else:\n raise ValueError(\"When specifying reusable expected locations for {} \" +\n \"with the dictionary notation, keys 'as' and 'at' are required. \" +\n \"Got {}.\".format(schema_name, reuse_entry))\n else: # Make it explicit\n explicit_entry = {'at': reuse_entry, 'as': schema_name}\n explicit_entry['full'] = explicit_entry['at'] + '.' + explicit_entry['as']\n reuse_entries.append(explicit_entry)\n schema['schema_details']['reusable']['expected'] = reuse_entries\n\n\n# Field level cleanup\n\n\ndef field_cleanup(field):\n field_mandatory_attributes(field)\n if ecs_helpers.is_intermediate(field):\n return\n ecs_helpers.dict_clean_string_values(field['field_details'])\n if 'allowed_values' in field['field_details']:\n for allowed_value in field['field_details']['allowed_values']:\n ecs_helpers.dict_clean_string_values(allowed_value)\n field_defaults(field)\n field_assertions_and_warnings(field)\n\n\ndef field_defaults(field):\n field['field_details'].setdefault('short', field['field_details']['description'])\n field['field_details'].setdefault('normalize', [])\n field_or_multi_field_datatype_defaults(field['field_details'])\n if 'multi_fields' in field['field_details']:\n for mf in field['field_details']['multi_fields']:\n field_or_multi_field_datatype_defaults(mf)\n if 'name' not in mf:\n mf['name'] = mf['type']\n\n\ndef field_or_multi_field_datatype_defaults(field_details):\n '''Sets datatype-related defaults on a canonical field or multi-field entries.'''\n if field_details['type'] == 'keyword':\n field_details.setdefault('ignore_above', 1024)\n if field_details['type'] == 'text':\n field_details.setdefault('norms', False)\n if 'index' in field_details and not field_details['index']:\n field_details.setdefault('doc_values', False)\n\n\nFIELD_MANDATORY_ATTRIBUTES = ['name', 'description', 'type', 'level']\nACCEPTABLE_FIELD_LEVELS = ['core', 'extended', 'custom']\n\n\ndef field_mandatory_attributes(field):\n '''Ensures for the presence of the mandatory field attributes and raises if any are missing'''\n if ecs_helpers.is_intermediate(field):\n return\n current_field_attributes = sorted(field['field_details'].keys())\n missing_attributes = ecs_helpers.list_subtract(FIELD_MANDATORY_ATTRIBUTES, current_field_attributes)\n\n # `alias` fields require a target `path` attribute.\n if field['field_details'].get('type') == 'alias' and 'path' not in current_field_attributes:\n missing_attributes.append('path')\n # `scaled_float` fields require a `scaling_factor` attribute.\n if field['field_details'].get('type') == 'scaled_float' and 'scaling_factor' not in current_field_attributes:\n missing_attributes.append('scaling_factor')\n\n if len(missing_attributes) > 0:\n msg = \"Field is missing the following mandatory attributes: {}.\\nFound these: {}.\\nField details: {}\"\n raise ValueError(msg.format(', '.join(missing_attributes),\n current_field_attributes, field))\n\n\ndef field_assertions_and_warnings(field):\n '''Additional checks on a fleshed out field'''\n if not ecs_helpers.is_intermediate(field):\n # check short description length if in strict mode\n single_line_short_description(field, strict=strict_mode)\n check_example_value(field, strict=strict_mode)\n if field['field_details']['level'] not in ACCEPTABLE_FIELD_LEVELS:\n msg = \"Invalid level for field '{}'.\\nValue: {}\\nAcceptable values: {}\".format(\n field['field_details']['name'], field['field_details']['level'],\n ACCEPTABLE_FIELD_LEVELS)\n raise ValueError(msg)\n\n# Common\n\n\nSHORT_LIMIT = 120\n\n\ndef single_line_short_description(schema_or_field, strict=True):\n short_length = len(schema_or_field['field_details']['short'])\n if \"\\n\" in schema_or_field['field_details']['short'] or short_length > SHORT_LIMIT:\n msg = \"Short descriptions must be single line, and under {} characters (current length: {}).\\n\".format(\n SHORT_LIMIT, short_length)\n msg += \"Offending field or field set: {}\\nShort description:\\n {}\".format(\n schema_or_field['field_details']['name'],\n schema_or_field['field_details']['short'])\n if strict:\n raise ValueError(msg)\n else:\n ecs_helpers.strict_warning(msg)\n\n\ndef check_example_value(field, strict=True):\n \"\"\"\n Checks if value of the example field is of type list or dict.\n Fails or warns (depending on strict mode) if so.\n \"\"\"\n example_value = field['field_details'].get('example', None)\n if isinstance(example_value, (list, dict)):\n name = field['field_details']['name']\n msg = f\"Example value for field `{name}` contains an object or array which must be quoted to avoid YAML interpretation.\"\n if strict:\n raise ValueError(msg)\n else:\n ecs_helpers.strict_warning(msg)\n", "path": "scripts/schema/cleaner.py"}]}
3,342
143
gh_patches_debug_33132
rasdani/github-patches
git_diff
huggingface__transformers-2065
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [CamemBert] Tokenizer function add_tokens doesn't work ## ❓ Questions & Help Hi, I am trying to add new tokens to the CamemBert tokenizer, but when I run the function tokenizer.add_tokens, it doesn't seem to add any token at all : `from transformers import CamembertTokenizer` `tokenizer = CamembertTokenizer.from_pretrained('camembert-base')` `tokenizer.add_tokens(['notfrenchword'])` `Out[12]: 0` Whereas with Bert model it works perfectly. Is this a bug or am I doing something wrong ? Thanks </issue> <code> [start of transformers/tokenization_camembert.py] 1 # coding=utf-8 2 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License 15 """ Tokenization classes for Camembert model.""" 16 from __future__ import (absolute_import, division, print_function, 17 unicode_literals) 18 19 import logging 20 import os 21 from shutil import copyfile 22 23 import sentencepiece as spm 24 from transformers.tokenization_utils import PreTrainedTokenizer 25 26 logger = logging.getLogger(__name__) 27 28 VOCAB_FILES_NAMES = {'vocab_file': 'sentencepiece.bpe.model'} 29 30 PRETRAINED_VOCAB_FILES_MAP = { 31 'vocab_file': 32 { 33 'camembert-base': "https://s3.amazonaws.com/models.huggingface.co/bert/camembert-base-sentencepiece.bpe.model", 34 } 35 } 36 37 PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { 38 'camembert-base': None, 39 } 40 41 class CamembertTokenizer(PreTrainedTokenizer): 42 """ 43 Adapted from RobertaTokenizer and XLNetTokenizer 44 SentencePiece based tokenizer. Peculiarities: 45 46 - requires `SentencePiece <https://github.com/google/sentencepiece>`_ 47 """ 48 vocab_files_names = VOCAB_FILES_NAMES 49 pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP 50 max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES 51 52 def __init__(self, vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>", 53 cls_token="<s>", unk_token="<unk>", pad_token='<pad>', mask_token='<mask>', 54 additional_special_tokens=['<s>NOTUSED', '<s>NOTUSED'], **kwargs): 55 super(CamembertTokenizer, self).__init__(max_len=512, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, 56 sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, 57 mask_token=mask_token, additional_special_tokens=additional_special_tokens, 58 **kwargs) 59 self.max_len_single_sentence = self.max_len - 2 # take into account special tokens 60 self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens 61 self.sp_model = spm.SentencePieceProcessor() 62 self.sp_model.Load(str(vocab_file)) 63 self.vocab_file = vocab_file 64 # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual 65 # sentencepiece vocabulary (this is the case for <s> and </s> 66 self.fairseq_tokens_to_ids = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3} 67 self.fairseq_offset = len(self.fairseq_tokens_to_ids) 68 self.fairseq_tokens_to_ids['<mask>'] = len(self.sp_model) + len(self.fairseq_tokens_to_ids) 69 self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} 70 71 def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): 72 """ 73 Build model inputs from a sequence or a pair of sequence for sequence classification tasks 74 by concatenating and adding special tokens. 75 A RoBERTa sequence has the following format: 76 single sequence: <s> X </s> 77 pair of sequences: <s> A </s></s> B </s> 78 """ 79 if token_ids_1 is None: 80 return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] 81 cls = [self.cls_token_id] 82 sep = [self.sep_token_id] 83 return cls + token_ids_0 + sep + sep + token_ids_1 + sep 84 85 def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): 86 """ 87 Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding 88 special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. 89 90 Args: 91 token_ids_0: list of ids (must not contain special tokens) 92 token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids 93 for sequence pairs 94 already_has_special_tokens: (default False) Set to True if the token list is already formated with 95 special tokens for the model 96 97 Returns: 98 A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. 99 """ 100 if already_has_special_tokens: 101 if token_ids_1 is not None: 102 raise ValueError("You should not supply a second sequence if the provided sequence of " 103 "ids is already formated with special tokens for the model.") 104 return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) 105 106 if token_ids_1 is None: 107 return [1] + ([0] * len(token_ids_0)) + [1] 108 return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] 109 110 def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): 111 """ 112 Creates a mask from the two sequences passed to be used in a sequence-pair classification task. 113 A RoBERTa sequence pair mask has the following format: 114 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 115 | first sequence | second sequence 116 117 if token_ids_1 is None, only returns the first portion of the mask (0's). 118 """ 119 sep = [self.sep_token_id] 120 cls = [self.cls_token_id] 121 122 if token_ids_1 is None: 123 return len(cls + token_ids_0 + sep) * [0] 124 return len(cls + token_ids_0 + sep + sep) * [0] + len(token_ids_1 + sep) * [1] 125 126 @property 127 def vocab_size(self): 128 return self.fairseq_offset + len(self.sp_model) 129 130 def _tokenize(self, text): 131 return self.sp_model.EncodeAsPieces(text) 132 133 def _convert_token_to_id(self, token): 134 """ Converts a token (str/unicode) in an id using the vocab. """ 135 if token in self.fairseq_tokens_to_ids: 136 return self.fairseq_tokens_to_ids[token] 137 return self.fairseq_offset + self.sp_model.PieceToId(token) 138 139 def _convert_id_to_token(self, index): 140 """Converts an index (integer) in a token (string/unicode) using the vocab.""" 141 if index in self.fairseq_ids_to_tokens: 142 return self.fairseq_ids_to_tokens[index] 143 return self.sp_model.IdToPiece(index - self.fairseq_offset) 144 145 def save_vocabulary(self, save_directory): 146 """ Save the sentencepiece vocabulary (copy original file) and special tokens file 147 to a directory. 148 """ 149 if not os.path.isdir(save_directory): 150 logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) 151 return 152 out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file']) 153 154 if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): 155 copyfile(self.vocab_file, out_vocab_file) 156 157 return (out_vocab_file,) 158 [end of transformers/tokenization_camembert.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/transformers/tokenization_camembert.py b/transformers/tokenization_camembert.py --- a/transformers/tokenization_camembert.py +++ b/transformers/tokenization_camembert.py @@ -51,7 +51,7 @@ def __init__(self, vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token='<pad>', mask_token='<mask>', - additional_special_tokens=['<s>NOTUSED', '<s>NOTUSED'], **kwargs): + additional_special_tokens=['<s>NOTUSED', '</s>NOTUSED'], **kwargs): super(CamembertTokenizer, self).__init__(max_len=512, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, @@ -125,7 +125,7 @@ @property def vocab_size(self): - return self.fairseq_offset + len(self.sp_model) + return len(self.fairseq_tokens_to_ids) + len(self.sp_model) def _tokenize(self, text): return self.sp_model.EncodeAsPieces(text) @@ -134,6 +134,9 @@ """ Converts a token (str/unicode) in an id using the vocab. """ if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] + elif self.sp_model.PieceToId(token) == 0: + # Convert sentence piece unk token to fairseq unk token index + return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(token) def _convert_id_to_token(self, index):
{"golden_diff": "diff --git a/transformers/tokenization_camembert.py b/transformers/tokenization_camembert.py\n--- a/transformers/tokenization_camembert.py\n+++ b/transformers/tokenization_camembert.py\n@@ -51,7 +51,7 @@\n \n def __init__(self, vocab_file, bos_token=\"<s>\", eos_token=\"</s>\", sep_token=\"</s>\",\n cls_token=\"<s>\", unk_token=\"<unk>\", pad_token='<pad>', mask_token='<mask>',\n- additional_special_tokens=['<s>NOTUSED', '<s>NOTUSED'], **kwargs):\n+ additional_special_tokens=['<s>NOTUSED', '</s>NOTUSED'], **kwargs):\n super(CamembertTokenizer, self).__init__(max_len=512, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token,\n sep_token=sep_token, cls_token=cls_token, pad_token=pad_token,\n mask_token=mask_token, additional_special_tokens=additional_special_tokens,\n@@ -125,7 +125,7 @@\n \n @property\n def vocab_size(self):\n- return self.fairseq_offset + len(self.sp_model)\n+ return len(self.fairseq_tokens_to_ids) + len(self.sp_model)\n \n def _tokenize(self, text):\n return self.sp_model.EncodeAsPieces(text)\n@@ -134,6 +134,9 @@\n \"\"\" Converts a token (str/unicode) in an id using the vocab. \"\"\"\n if token in self.fairseq_tokens_to_ids:\n return self.fairseq_tokens_to_ids[token]\n+ elif self.sp_model.PieceToId(token) == 0:\n+ # Convert sentence piece unk token to fairseq unk token index\n+ return self.unk_token_id\n return self.fairseq_offset + self.sp_model.PieceToId(token)\n \n def _convert_id_to_token(self, index):\n", "issue": "[CamemBert] Tokenizer function add_tokens doesn't work\n## \u2753 Questions & Help\r\n\r\nHi, \r\n\r\nI am trying to add new tokens to the CamemBert tokenizer, but when I run the function tokenizer.add_tokens, it doesn't seem to add any token at all :\r\n\r\n`from transformers import CamembertTokenizer`\r\n`tokenizer = CamembertTokenizer.from_pretrained('camembert-base')`\r\n`tokenizer.add_tokens(['notfrenchword'])`\r\n\r\n`Out[12]: 0`\r\n\r\nWhereas with Bert model it works perfectly. Is this a bug or am I doing something wrong ?\r\n\r\nThanks\r\n\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\"\"\" Tokenization classes for Camembert model.\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport logging\nimport os\nfrom shutil import copyfile\n\nimport sentencepiece as spm\nfrom transformers.tokenization_utils import PreTrainedTokenizer\n\nlogger = logging.getLogger(__name__)\n\nVOCAB_FILES_NAMES = {'vocab_file': 'sentencepiece.bpe.model'}\n\nPRETRAINED_VOCAB_FILES_MAP = {\n 'vocab_file':\n {\n 'camembert-base': \"https://s3.amazonaws.com/models.huggingface.co/bert/camembert-base-sentencepiece.bpe.model\",\n }\n}\n\nPRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {\n 'camembert-base': None,\n}\n\nclass CamembertTokenizer(PreTrainedTokenizer):\n \"\"\"\n Adapted from RobertaTokenizer and XLNetTokenizer\n SentencePiece based tokenizer. Peculiarities:\n\n - requires `SentencePiece <https://github.com/google/sentencepiece>`_\n \"\"\"\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n\n def __init__(self, vocab_file, bos_token=\"<s>\", eos_token=\"</s>\", sep_token=\"</s>\",\n cls_token=\"<s>\", unk_token=\"<unk>\", pad_token='<pad>', mask_token='<mask>',\n additional_special_tokens=['<s>NOTUSED', '<s>NOTUSED'], **kwargs):\n super(CamembertTokenizer, self).__init__(max_len=512, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token,\n sep_token=sep_token, cls_token=cls_token, pad_token=pad_token,\n mask_token=mask_token, additional_special_tokens=additional_special_tokens,\n **kwargs)\n self.max_len_single_sentence = self.max_len - 2 # take into account special tokens\n self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens\n self.sp_model = spm.SentencePieceProcessor()\n self.sp_model.Load(str(vocab_file))\n self.vocab_file = vocab_file\n # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual\n # sentencepiece vocabulary (this is the case for <s> and </s>\n self.fairseq_tokens_to_ids = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}\n self.fairseq_offset = len(self.fairseq_tokens_to_ids)\n self.fairseq_tokens_to_ids['<mask>'] = len(self.sp_model) + len(self.fairseq_tokens_to_ids)\n self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n \"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks\n by concatenating and adding special tokens.\n A RoBERTa sequence has the following format:\n single sequence: <s> X </s>\n pair of sequences: <s> A </s></s> B </s>\n \"\"\"\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + sep + token_ids_1 + sep\n\n def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):\n \"\"\"\n Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.\n\n Args:\n token_ids_0: list of ids (must not contain special tokens)\n token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids\n for sequence pairs\n already_has_special_tokens: (default False) Set to True if the token list is already formated with\n special tokens for the model\n\n Returns:\n A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n if already_has_special_tokens:\n if token_ids_1 is not None:\n raise ValueError(\"You should not supply a second sequence if the provided sequence of \"\n \"ids is already formated with special tokens for the model.\")\n return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))\n\n if token_ids_1 is None:\n return [1] + ([0] * len(token_ids_0)) + [1]\n return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]\n\n def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):\n \"\"\"\n Creates a mask from the two sequences passed to be used in a sequence-pair classification task.\n A RoBERTa sequence pair mask has the following format:\n 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence\n\n if token_ids_1 is None, only returns the first portion of the mask (0's).\n \"\"\"\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep + sep) * [0] + len(token_ids_1 + sep) * [1]\n\n @property\n def vocab_size(self):\n return self.fairseq_offset + len(self.sp_model)\n\n def _tokenize(self, text):\n return self.sp_model.EncodeAsPieces(text)\n\n def _convert_token_to_id(self, token):\n \"\"\" Converts a token (str/unicode) in an id using the vocab. \"\"\"\n if token in self.fairseq_tokens_to_ids:\n return self.fairseq_tokens_to_ids[token]\n return self.fairseq_offset + self.sp_model.PieceToId(token)\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (string/unicode) using the vocab.\"\"\"\n if index in self.fairseq_ids_to_tokens:\n return self.fairseq_ids_to_tokens[index]\n return self.sp_model.IdToPiece(index - self.fairseq_offset)\n\n def save_vocabulary(self, save_directory):\n \"\"\" Save the sentencepiece vocabulary (copy original file) and special tokens file\n to a directory.\n \"\"\"\n if not os.path.isdir(save_directory):\n logger.error(\"Vocabulary path ({}) should be a directory\".format(save_directory))\n return\n out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])\n\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n\n return (out_vocab_file,)\n", "path": "transformers/tokenization_camembert.py"}]}
2,829
423
gh_patches_debug_3907
rasdani/github-patches
git_diff
AUTOMATIC1111__stable-diffusion-webui-10041
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Feature Request]: img2img batch should ignore non-image files ### Is there an existing issue for this? - [X] I have searched the existing issues and checked the recent builds/commits ### What would your feature do ? I may place some parameter files along the image files in the same folder. Currently an exception is throw for non-image files when do "Image.open()". ### Proposed workflow 1. Place a txt file along with the png file in folder A 2. Set batch input folder to A 3. Press generate ### Additional information _No response_ </issue> <code> [start of modules/img2img.py] 1 import math 2 import os 3 import sys 4 import traceback 5 6 import numpy as np 7 from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError 8 9 from modules import devices, sd_samplers 10 from modules.generation_parameters_copypaste import create_override_settings_dict 11 from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images 12 from modules.shared import opts, state 13 import modules.shared as shared 14 import modules.processing as processing 15 from modules.ui import plaintext_to_html 16 import modules.images as images 17 import modules.scripts 18 19 20 def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): 21 processing.fix_seed(p) 22 23 images = shared.listfiles(input_dir) 24 25 is_inpaint_batch = False 26 if inpaint_mask_dir: 27 inpaint_masks = shared.listfiles(inpaint_mask_dir) 28 is_inpaint_batch = len(inpaint_masks) > 0 29 if is_inpaint_batch: 30 print(f"\nInpaint batch is enabled. {len(inpaint_masks)} masks found.") 31 32 print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.") 33 34 save_normally = output_dir == '' 35 36 p.do_not_save_grid = True 37 p.do_not_save_samples = not save_normally 38 39 state.job_count = len(images) * p.n_iter 40 41 for i, image in enumerate(images): 42 state.job = f"{i+1} out of {len(images)}" 43 if state.skipped: 44 state.skipped = False 45 46 if state.interrupted: 47 break 48 49 try: 50 img = Image.open(image) 51 except UnidentifiedImageError: 52 continue 53 # Use the EXIF orientation of photos taken by smartphones. 54 img = ImageOps.exif_transpose(img) 55 p.init_images = [img] * p.batch_size 56 57 if is_inpaint_batch: 58 # try to find corresponding mask for an image using simple filename matching 59 mask_image_path = os.path.join(inpaint_mask_dir, os.path.basename(image)) 60 # if not found use first one ("same mask for all images" use-case) 61 if not mask_image_path in inpaint_masks: 62 mask_image_path = inpaint_masks[0] 63 mask_image = Image.open(mask_image_path) 64 p.image_mask = mask_image 65 66 proc = modules.scripts.scripts_img2img.run(p, *args) 67 if proc is None: 68 proc = process_images(p) 69 70 for n, processed_image in enumerate(proc.images): 71 filename = os.path.basename(image) 72 73 if n > 0: 74 left, right = os.path.splitext(filename) 75 filename = f"{left}-{n}{right}" 76 77 if not save_normally: 78 os.makedirs(output_dir, exist_ok=True) 79 if processed_image.mode == 'RGBA': 80 processed_image = processed_image.convert("RGB") 81 processed_image.save(os.path.join(output_dir, filename)) 82 83 84 def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args): 85 override_settings = create_override_settings_dict(override_settings_texts) 86 87 is_batch = mode == 5 88 89 if mode == 0: # img2img 90 image = init_img.convert("RGB") 91 mask = None 92 elif mode == 1: # img2img sketch 93 image = sketch.convert("RGB") 94 mask = None 95 elif mode == 2: # inpaint 96 image, mask = init_img_with_mask["image"], init_img_with_mask["mask"] 97 alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1') 98 mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L') 99 image = image.convert("RGB") 100 elif mode == 3: # inpaint sketch 101 image = inpaint_color_sketch 102 orig = inpaint_color_sketch_orig or inpaint_color_sketch 103 pred = np.any(np.array(image) != np.array(orig), axis=-1) 104 mask = Image.fromarray(pred.astype(np.uint8) * 255, "L") 105 mask = ImageEnhance.Brightness(mask).enhance(1 - mask_alpha / 100) 106 blur = ImageFilter.GaussianBlur(mask_blur) 107 image = Image.composite(image.filter(blur), orig, mask.filter(blur)) 108 image = image.convert("RGB") 109 elif mode == 4: # inpaint upload mask 110 image = init_img_inpaint 111 mask = init_mask_inpaint 112 else: 113 image = None 114 mask = None 115 116 # Use the EXIF orientation of photos taken by smartphones. 117 if image is not None: 118 image = ImageOps.exif_transpose(image) 119 120 if selected_scale_tab == 1: 121 assert image, "Can't scale by because no image is selected" 122 123 width = int(image.width * scale_by) 124 height = int(image.height * scale_by) 125 126 assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]' 127 128 p = StableDiffusionProcessingImg2Img( 129 sd_model=shared.sd_model, 130 outpath_samples=opts.outdir_samples or opts.outdir_img2img_samples, 131 outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids, 132 prompt=prompt, 133 negative_prompt=negative_prompt, 134 styles=prompt_styles, 135 seed=seed, 136 subseed=subseed, 137 subseed_strength=subseed_strength, 138 seed_resize_from_h=seed_resize_from_h, 139 seed_resize_from_w=seed_resize_from_w, 140 seed_enable_extras=seed_enable_extras, 141 sampler_name=sd_samplers.samplers_for_img2img[sampler_index].name, 142 batch_size=batch_size, 143 n_iter=n_iter, 144 steps=steps, 145 cfg_scale=cfg_scale, 146 width=width, 147 height=height, 148 restore_faces=restore_faces, 149 tiling=tiling, 150 init_images=[image], 151 mask=mask, 152 mask_blur=mask_blur, 153 inpainting_fill=inpainting_fill, 154 resize_mode=resize_mode, 155 denoising_strength=denoising_strength, 156 image_cfg_scale=image_cfg_scale, 157 inpaint_full_res=inpaint_full_res, 158 inpaint_full_res_padding=inpaint_full_res_padding, 159 inpainting_mask_invert=inpainting_mask_invert, 160 override_settings=override_settings, 161 ) 162 163 p.scripts = modules.scripts.scripts_img2img 164 p.script_args = args 165 166 if shared.cmd_opts.enable_console_prompts: 167 print(f"\nimg2img: {prompt}", file=shared.progress_print_out) 168 169 if mask: 170 p.extra_generation_params["Mask blur"] = mask_blur 171 172 if is_batch: 173 assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" 174 175 process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args) 176 177 processed = Processed(p, [], p.seed, "") 178 else: 179 processed = modules.scripts.scripts_img2img.run(p, *args) 180 if processed is None: 181 processed = process_images(p) 182 183 p.close() 184 185 shared.total_tqdm.clear() 186 187 generation_info_js = processed.js() 188 if opts.samples_log_stdout: 189 print(generation_info_js) 190 191 if opts.do_not_show_images: 192 processed.images = [] 193 194 return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments) 195 [end of modules/img2img.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/modules/img2img.py b/modules/img2img.py --- a/modules/img2img.py +++ b/modules/img2img.py @@ -48,7 +48,8 @@ try: img = Image.open(image) - except UnidentifiedImageError: + except UnidentifiedImageError as e: + print(e) continue # Use the EXIF orientation of photos taken by smartphones. img = ImageOps.exif_transpose(img)
{"golden_diff": "diff --git a/modules/img2img.py b/modules/img2img.py\n--- a/modules/img2img.py\n+++ b/modules/img2img.py\n@@ -48,7 +48,8 @@\n \r\n try:\r\n img = Image.open(image)\r\n- except UnidentifiedImageError:\r\n+ except UnidentifiedImageError as e:\r\n+ print(e)\r\n continue\r\n # Use the EXIF orientation of photos taken by smartphones.\r\n img = ImageOps.exif_transpose(img)\n", "issue": "[Feature Request]: img2img batch should ignore non-image files\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues and checked the recent builds/commits\n\n### What would your feature do ?\n\nI may place some parameter files along the image files in the same folder.\r\nCurrently an exception is throw for non-image files when do \"Image.open()\".\n\n### Proposed workflow\n\n1. Place a txt file along with the png file in folder A\r\n2. Set batch input folder to A\r\n3. Press generate\r\n\n\n### Additional information\n\n_No response_\n", "before_files": [{"content": "import math\r\nimport os\r\nimport sys\r\nimport traceback\r\n\r\nimport numpy as np\r\nfrom PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError\r\n\r\nfrom modules import devices, sd_samplers\r\nfrom modules.generation_parameters_copypaste import create_override_settings_dict\r\nfrom modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images\r\nfrom modules.shared import opts, state\r\nimport modules.shared as shared\r\nimport modules.processing as processing\r\nfrom modules.ui import plaintext_to_html\r\nimport modules.images as images\r\nimport modules.scripts\r\n\r\n\r\ndef process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):\r\n processing.fix_seed(p)\r\n\r\n images = shared.listfiles(input_dir)\r\n\r\n is_inpaint_batch = False\r\n if inpaint_mask_dir:\r\n inpaint_masks = shared.listfiles(inpaint_mask_dir)\r\n is_inpaint_batch = len(inpaint_masks) > 0\r\n if is_inpaint_batch:\r\n print(f\"\\nInpaint batch is enabled. {len(inpaint_masks)} masks found.\")\r\n\r\n print(f\"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.\")\r\n\r\n save_normally = output_dir == ''\r\n\r\n p.do_not_save_grid = True\r\n p.do_not_save_samples = not save_normally\r\n\r\n state.job_count = len(images) * p.n_iter\r\n\r\n for i, image in enumerate(images):\r\n state.job = f\"{i+1} out of {len(images)}\"\r\n if state.skipped:\r\n state.skipped = False\r\n\r\n if state.interrupted:\r\n break\r\n\r\n try:\r\n img = Image.open(image)\r\n except UnidentifiedImageError:\r\n continue\r\n # Use the EXIF orientation of photos taken by smartphones.\r\n img = ImageOps.exif_transpose(img)\r\n p.init_images = [img] * p.batch_size\r\n\r\n if is_inpaint_batch:\r\n # try to find corresponding mask for an image using simple filename matching\r\n mask_image_path = os.path.join(inpaint_mask_dir, os.path.basename(image))\r\n # if not found use first one (\"same mask for all images\" use-case)\r\n if not mask_image_path in inpaint_masks:\r\n mask_image_path = inpaint_masks[0]\r\n mask_image = Image.open(mask_image_path)\r\n p.image_mask = mask_image\r\n\r\n proc = modules.scripts.scripts_img2img.run(p, *args)\r\n if proc is None:\r\n proc = process_images(p)\r\n\r\n for n, processed_image in enumerate(proc.images):\r\n filename = os.path.basename(image)\r\n\r\n if n > 0:\r\n left, right = os.path.splitext(filename)\r\n filename = f\"{left}-{n}{right}\"\r\n\r\n if not save_normally:\r\n os.makedirs(output_dir, exist_ok=True)\r\n if processed_image.mode == 'RGBA':\r\n processed_image = processed_image.convert(\"RGB\")\r\n processed_image.save(os.path.join(output_dir, filename))\r\n\r\n\r\ndef img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):\r\n override_settings = create_override_settings_dict(override_settings_texts)\r\n\r\n is_batch = mode == 5\r\n\r\n if mode == 0: # img2img\r\n image = init_img.convert(\"RGB\")\r\n mask = None\r\n elif mode == 1: # img2img sketch\r\n image = sketch.convert(\"RGB\")\r\n mask = None\r\n elif mode == 2: # inpaint\r\n image, mask = init_img_with_mask[\"image\"], init_img_with_mask[\"mask\"]\r\n alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')\r\n mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L')\r\n image = image.convert(\"RGB\")\r\n elif mode == 3: # inpaint sketch\r\n image = inpaint_color_sketch\r\n orig = inpaint_color_sketch_orig or inpaint_color_sketch\r\n pred = np.any(np.array(image) != np.array(orig), axis=-1)\r\n mask = Image.fromarray(pred.astype(np.uint8) * 255, \"L\")\r\n mask = ImageEnhance.Brightness(mask).enhance(1 - mask_alpha / 100)\r\n blur = ImageFilter.GaussianBlur(mask_blur)\r\n image = Image.composite(image.filter(blur), orig, mask.filter(blur))\r\n image = image.convert(\"RGB\")\r\n elif mode == 4: # inpaint upload mask\r\n image = init_img_inpaint\r\n mask = init_mask_inpaint\r\n else:\r\n image = None\r\n mask = None\r\n\r\n # Use the EXIF orientation of photos taken by smartphones.\r\n if image is not None:\r\n image = ImageOps.exif_transpose(image)\r\n\r\n if selected_scale_tab == 1:\r\n assert image, \"Can't scale by because no image is selected\"\r\n\r\n width = int(image.width * scale_by)\r\n height = int(image.height * scale_by)\r\n\r\n assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'\r\n\r\n p = StableDiffusionProcessingImg2Img(\r\n sd_model=shared.sd_model,\r\n outpath_samples=opts.outdir_samples or opts.outdir_img2img_samples,\r\n outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids,\r\n prompt=prompt,\r\n negative_prompt=negative_prompt,\r\n styles=prompt_styles,\r\n seed=seed,\r\n subseed=subseed,\r\n subseed_strength=subseed_strength,\r\n seed_resize_from_h=seed_resize_from_h,\r\n seed_resize_from_w=seed_resize_from_w,\r\n seed_enable_extras=seed_enable_extras,\r\n sampler_name=sd_samplers.samplers_for_img2img[sampler_index].name,\r\n batch_size=batch_size,\r\n n_iter=n_iter,\r\n steps=steps,\r\n cfg_scale=cfg_scale,\r\n width=width,\r\n height=height,\r\n restore_faces=restore_faces,\r\n tiling=tiling,\r\n init_images=[image],\r\n mask=mask,\r\n mask_blur=mask_blur,\r\n inpainting_fill=inpainting_fill,\r\n resize_mode=resize_mode,\r\n denoising_strength=denoising_strength,\r\n image_cfg_scale=image_cfg_scale,\r\n inpaint_full_res=inpaint_full_res,\r\n inpaint_full_res_padding=inpaint_full_res_padding,\r\n inpainting_mask_invert=inpainting_mask_invert,\r\n override_settings=override_settings,\r\n )\r\n\r\n p.scripts = modules.scripts.scripts_img2img\r\n p.script_args = args\r\n\r\n if shared.cmd_opts.enable_console_prompts:\r\n print(f\"\\nimg2img: {prompt}\", file=shared.progress_print_out)\r\n\r\n if mask:\r\n p.extra_generation_params[\"Mask blur\"] = mask_blur\r\n\r\n if is_batch:\r\n assert not shared.cmd_opts.hide_ui_dir_config, \"Launched with --hide-ui-dir-config, batch img2img disabled\"\r\n\r\n process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args)\r\n\r\n processed = Processed(p, [], p.seed, \"\")\r\n else:\r\n processed = modules.scripts.scripts_img2img.run(p, *args)\r\n if processed is None:\r\n processed = process_images(p)\r\n\r\n p.close()\r\n\r\n shared.total_tqdm.clear()\r\n\r\n generation_info_js = processed.js()\r\n if opts.samples_log_stdout:\r\n print(generation_info_js)\r\n\r\n if opts.do_not_show_images:\r\n processed.images = []\r\n\r\n return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments)\r\n", "path": "modules/img2img.py"}]}
3,041
105
gh_patches_debug_28907
rasdani/github-patches
git_diff
ansible__ansible-43525
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Missing all_content param in ovirt_hosts_facts ##### SUMMARY ovirt_hosts_facts misses the all_content param and so it's not possible to get back the whole host details. ovirt_vms_facts list, for instance, has it. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ovirt_hosts_facts ##### ANSIBLE VERSION ``` ansible 2.6.1 config file = /etc/ansible/ansible.cfg configured module search path = [u'/home/stirabos/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python2.7/site-packages/ansible executable location = /usr/bin/ansible python version = 2.7.15 (default, May 16 2018, 17:50:09) [GCC 8.1.1 20180502 (Red Hat 8.1.1-1)] ``` ##### CONFIGURATION ##### OS / ENVIRONMENT N/A ##### STEPS TO REPRODUCE <!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case. For new features, show how the feature would be used. --> <!--- Paste example playbooks or commands between quotes below --> ```yaml - ovirt_vms_facts: auth: "{{ ovirt_auth }}" all_content: true - debug: var=ovirt_vms - ovirt_hosts_facts: auth: "{{ ovirt_auth }}" all_content: true - debug: var=ovirt_hosts ``` ##### EXPECTED RESULTS a list of hosts with full detail for each of them ##### ACTUAL RESULTS ``` TASK [ovirt_hosts_facts] ****************************************************************************************************************************************************************************************** fatal: [localhost]: FAILED! => {"changed": false, "msg": "Unsupported parameters for (ovirt_hosts_facts) module: all_content Supported parameters include: auth, fetch_nested, nested_attributes, pattern"} to retry, use: --limit @/root/test.retry ``` </issue> <code> [start of lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py] 1 #!/usr/bin/python 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright (c) 2016 Red Hat, Inc. 5 # 6 # This file is part of Ansible 7 # 8 # Ansible is free software: you can redistribute it and/or modify 9 # it under the terms of the GNU General Public License as published by 10 # the Free Software Foundation, either version 3 of the License, or 11 # (at your option) any later version. 12 # 13 # Ansible is distributed in the hope that it will be useful, 14 # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 # GNU General Public License for more details. 17 # 18 # You should have received a copy of the GNU General Public License 19 # along with Ansible. If not, see <http://www.gnu.org/licenses/>. 20 # 21 22 ANSIBLE_METADATA = {'metadata_version': '1.1', 23 'status': ['preview'], 24 'supported_by': 'community'} 25 26 27 DOCUMENTATION = ''' 28 --- 29 module: ovirt_hosts_facts 30 short_description: Retrieve facts about one or more oVirt/RHV hosts 31 author: "Ondra Machacek (@machacekondra)" 32 version_added: "2.3" 33 description: 34 - "Retrieve facts about one or more oVirt/RHV hosts." 35 notes: 36 - "This module creates a new top-level C(ovirt_hosts) fact, which 37 contains a list of hosts." 38 options: 39 pattern: 40 description: 41 - "Search term which is accepted by oVirt/RHV search backend." 42 - "For example to search host X from datacenter Y use following pattern: 43 name=X and datacenter=Y" 44 extends_documentation_fragment: ovirt_facts 45 ''' 46 47 EXAMPLES = ''' 48 # Examples don't contain auth parameter for simplicity, 49 # look at ovirt_auth module to see how to reuse authentication: 50 51 # Gather facts about all hosts which names start with C(host) and 52 # belong to data center C(west): 53 - ovirt_hosts_facts: 54 pattern: name=host* and datacenter=west 55 - debug: 56 var: ovirt_hosts 57 ''' 58 59 RETURN = ''' 60 ovirt_hosts: 61 description: "List of dictionaries describing the hosts. Host attribues are mapped to dictionary keys, 62 all hosts attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host." 63 returned: On success. 64 type: list 65 ''' 66 67 import traceback 68 69 from ansible.module_utils.basic import AnsibleModule 70 from ansible.module_utils.ovirt import ( 71 check_sdk, 72 create_connection, 73 get_dict_of_struct, 74 ovirt_facts_full_argument_spec, 75 ) 76 77 78 def main(): 79 argument_spec = ovirt_facts_full_argument_spec( 80 pattern=dict(default='', required=False), 81 ) 82 module = AnsibleModule(argument_spec) 83 check_sdk(module) 84 85 try: 86 auth = module.params.pop('auth') 87 connection = create_connection(auth) 88 hosts_service = connection.system_service().hosts_service() 89 hosts = hosts_service.list(search=module.params['pattern']) 90 module.exit_json( 91 changed=False, 92 ansible_facts=dict( 93 ovirt_hosts=[ 94 get_dict_of_struct( 95 struct=c, 96 connection=connection, 97 fetch_nested=module.params.get('fetch_nested'), 98 attributes=module.params.get('nested_attributes'), 99 ) for c in hosts 100 ], 101 ), 102 ) 103 except Exception as e: 104 module.fail_json(msg=str(e), exception=traceback.format_exc()) 105 finally: 106 connection.close(logout=auth.get('token') is None) 107 108 109 if __name__ == '__main__': 110 main() 111 [end of lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py --- a/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py +++ b/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py @@ -41,6 +41,12 @@ - "Search term which is accepted by oVirt/RHV search backend." - "For example to search host X from datacenter Y use following pattern: name=X and datacenter=Y" + all_content: + description: + - "If I(true) all the attributes of the hosts should be + included in the response." + default: False + version_added: "2.7" extends_documentation_fragment: ovirt_facts ''' @@ -78,6 +84,7 @@ def main(): argument_spec = ovirt_facts_full_argument_spec( pattern=dict(default='', required=False), + all_content=dict(default=False, type='bool'), ) module = AnsibleModule(argument_spec) check_sdk(module) @@ -86,7 +93,10 @@ auth = module.params.pop('auth') connection = create_connection(auth) hosts_service = connection.system_service().hosts_service() - hosts = hosts_service.list(search=module.params['pattern']) + hosts = hosts_service.list( + search=module.params['pattern'], + all_content=module.params['all_content'], + ) module.exit_json( changed=False, ansible_facts=dict(
{"golden_diff": "diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py\n--- a/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py\n+++ b/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py\n@@ -41,6 +41,12 @@\n - \"Search term which is accepted by oVirt/RHV search backend.\"\n - \"For example to search host X from datacenter Y use following pattern:\n name=X and datacenter=Y\"\n+ all_content:\n+ description:\n+ - \"If I(true) all the attributes of the hosts should be\n+ included in the response.\"\n+ default: False\n+ version_added: \"2.7\"\n extends_documentation_fragment: ovirt_facts\n '''\n \n@@ -78,6 +84,7 @@\n def main():\n argument_spec = ovirt_facts_full_argument_spec(\n pattern=dict(default='', required=False),\n+ all_content=dict(default=False, type='bool'),\n )\n module = AnsibleModule(argument_spec)\n check_sdk(module)\n@@ -86,7 +93,10 @@\n auth = module.params.pop('auth')\n connection = create_connection(auth)\n hosts_service = connection.system_service().hosts_service()\n- hosts = hosts_service.list(search=module.params['pattern'])\n+ hosts = hosts_service.list(\n+ search=module.params['pattern'],\n+ all_content=module.params['all_content'],\n+ )\n module.exit_json(\n changed=False,\n ansible_facts=dict(\n", "issue": "Missing all_content param in ovirt_hosts_facts\n##### SUMMARY\r\novirt_hosts_facts misses the all_content param and so it's not possible to get back the whole host details. ovirt_vms_facts list, for instance, has it.\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\novirt_hosts_facts\r\n\r\n##### ANSIBLE VERSION\r\n```\r\nansible 2.6.1\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = [u'/home/stirabos/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python2.7/site-packages/ansible\r\n executable location = /usr/bin/ansible\r\n python version = 2.7.15 (default, May 16 2018, 17:50:09) [GCC 8.1.1 20180502 (Red Hat 8.1.1-1)]\r\n```\r\n\r\n##### CONFIGURATION\r\n\r\n\r\n##### OS / ENVIRONMENT\r\nN/A\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case.\r\nFor new features, show how the feature would be used. -->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n - ovirt_vms_facts:\r\n auth: \"{{ ovirt_auth }}\"\r\n all_content: true\r\n - debug: var=ovirt_vms\r\n - ovirt_hosts_facts:\r\n auth: \"{{ ovirt_auth }}\"\r\n all_content: true\r\n - debug: var=ovirt_hosts\r\n```\r\n\r\n##### EXPECTED RESULTS\r\na list of hosts with full detail for each of them\r\n\r\n##### ACTUAL RESULTS\r\n```\r\nTASK [ovirt_hosts_facts] ******************************************************************************************************************************************************************************************\r\nfatal: [localhost]: FAILED! => {\"changed\": false, \"msg\": \"Unsupported parameters for (ovirt_hosts_facts) module: all_content Supported parameters include: auth, fetch_nested, nested_attributes, pattern\"}\r\n\tto retry, use: --limit @/root/test.retry\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2016 Red Hat, Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: ovirt_hosts_facts\nshort_description: Retrieve facts about one or more oVirt/RHV hosts\nauthor: \"Ondra Machacek (@machacekondra)\"\nversion_added: \"2.3\"\ndescription:\n - \"Retrieve facts about one or more oVirt/RHV hosts.\"\nnotes:\n - \"This module creates a new top-level C(ovirt_hosts) fact, which\n contains a list of hosts.\"\noptions:\n pattern:\n description:\n - \"Search term which is accepted by oVirt/RHV search backend.\"\n - \"For example to search host X from datacenter Y use following pattern:\n name=X and datacenter=Y\"\nextends_documentation_fragment: ovirt_facts\n'''\n\nEXAMPLES = '''\n# Examples don't contain auth parameter for simplicity,\n# look at ovirt_auth module to see how to reuse authentication:\n\n# Gather facts about all hosts which names start with C(host) and\n# belong to data center C(west):\n- ovirt_hosts_facts:\n pattern: name=host* and datacenter=west\n- debug:\n var: ovirt_hosts\n'''\n\nRETURN = '''\novirt_hosts:\n description: \"List of dictionaries describing the hosts. Host attribues are mapped to dictionary keys,\n all hosts attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host.\"\n returned: On success.\n type: list\n'''\n\nimport traceback\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.ovirt import (\n check_sdk,\n create_connection,\n get_dict_of_struct,\n ovirt_facts_full_argument_spec,\n)\n\n\ndef main():\n argument_spec = ovirt_facts_full_argument_spec(\n pattern=dict(default='', required=False),\n )\n module = AnsibleModule(argument_spec)\n check_sdk(module)\n\n try:\n auth = module.params.pop('auth')\n connection = create_connection(auth)\n hosts_service = connection.system_service().hosts_service()\n hosts = hosts_service.list(search=module.params['pattern'])\n module.exit_json(\n changed=False,\n ansible_facts=dict(\n ovirt_hosts=[\n get_dict_of_struct(\n struct=c,\n connection=connection,\n fetch_nested=module.params.get('fetch_nested'),\n attributes=module.params.get('nested_attributes'),\n ) for c in hosts\n ],\n ),\n )\n except Exception as e:\n module.fail_json(msg=str(e), exception=traceback.format_exc())\n finally:\n connection.close(logout=auth.get('token') is None)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py"}]}
2,016
351
gh_patches_debug_5892
rasdani/github-patches
git_diff
buildbot__buildbot-7318
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Address PEP 706 - Filter for tarfile.extractall Given proposal improves security of tarfile extraction to help avoid CVE-2007-4559. - In Python 3.12-3.13, a DeprecationWarning is emitted and extraction uses `fully_trusted` filter. - In Python 3.14+, it will use the `data` filter. It seems given proposal was backported also to older version of Python. Reference: https://peps.python.org/pep-0706/ </issue> <code> [start of master/buildbot/process/remotetransfer.py] 1 # This file is part of Buildbot. Buildbot is free software: you can 2 # redistribute it and/or modify it under the terms of the GNU General Public 3 # License as published by the Free Software Foundation, version 2. 4 # 5 # This program is distributed in the hope that it will be useful, but WITHOUT 6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more 8 # details. 9 # 10 # You should have received a copy of the GNU General Public License along with 11 # this program; if not, write to the Free Software Foundation, Inc., 51 12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 13 # 14 # Copyright Buildbot Team Members 15 16 """ 17 module for regrouping all FileWriterImpl and FileReaderImpl away from steps 18 """ 19 20 import os 21 import tarfile 22 import tempfile 23 from io import BytesIO 24 25 from buildbot.util import bytes2unicode 26 from buildbot.util import unicode2bytes 27 from buildbot.worker.protocols import base 28 29 30 class FileWriter(base.FileWriterImpl): 31 32 """ 33 Helper class that acts as a file-object with write access 34 """ 35 36 def __init__(self, destfile, maxsize, mode): 37 # Create missing directories. 38 destfile = os.path.abspath(destfile) 39 dirname = os.path.dirname(destfile) 40 if not os.path.exists(dirname): 41 os.makedirs(dirname) 42 43 self.destfile = destfile 44 self.mode = mode 45 fd, self.tmpname = tempfile.mkstemp(dir=dirname, prefix='buildbot-transfer-') 46 self.fp = os.fdopen(fd, 'wb') 47 self.remaining = maxsize 48 49 def remote_write(self, data): 50 """ 51 Called from remote worker to write L{data} to L{fp} within boundaries 52 of L{maxsize} 53 54 @type data: C{string} 55 @param data: String of data to write 56 """ 57 data = unicode2bytes(data) 58 if self.remaining is not None: 59 if len(data) > self.remaining: 60 data = data[:self.remaining] 61 self.fp.write(data) 62 self.remaining = self.remaining - len(data) 63 else: 64 self.fp.write(data) 65 66 def remote_utime(self, accessed_modified): 67 os.utime(self.destfile, accessed_modified) 68 69 def remote_close(self): 70 """ 71 Called by remote worker to state that no more data will be transferred 72 """ 73 self.fp.close() 74 self.fp = None 75 # on windows, os.rename does not automatically unlink, so do it 76 # manually 77 if os.path.exists(self.destfile): 78 os.unlink(self.destfile) 79 os.rename(self.tmpname, self.destfile) 80 self.tmpname = None 81 if self.mode is not None: 82 os.chmod(self.destfile, self.mode) 83 84 def cancel(self): 85 # unclean shutdown, the file is probably truncated, so delete it 86 # altogether rather than deliver a corrupted file 87 fp = getattr(self, "fp", None) 88 if fp: 89 fp.close() 90 if self.destfile and os.path.exists(self.destfile): 91 os.unlink(self.destfile) 92 if self.tmpname and os.path.exists(self.tmpname): 93 os.unlink(self.tmpname) 94 95 96 class DirectoryWriter(FileWriter): 97 98 """ 99 A DirectoryWriter is implemented as a FileWriter, with an added post-processing 100 step to unpack the archive, once the transfer has completed. 101 """ 102 103 def __init__(self, destroot, maxsize, compress, mode): 104 self.destroot = destroot 105 self.compress = compress 106 107 self.fd, self.tarname = tempfile.mkstemp(prefix='buildbot-transfer-') 108 os.close(self.fd) 109 110 super().__init__(self.tarname, maxsize, mode) 111 112 def remote_unpack(self): 113 """ 114 Called by remote worker to state that no more data will be transferred 115 """ 116 # Make sure remote_close is called, otherwise atomic rename won't happen 117 self.remote_close() 118 119 # Map configured compression to a TarFile setting 120 if self.compress == 'bz2': 121 mode = 'r|bz2' 122 elif self.compress == 'gz': 123 mode = 'r|gz' 124 else: 125 mode = 'r' 126 127 # Unpack archive and clean up after self 128 with tarfile.open(name=self.tarname, mode=mode) as archive: 129 archive.extractall(path=self.destroot) 130 os.remove(self.tarname) 131 132 133 class FileReader(base.FileReaderImpl): 134 135 """ 136 Helper class that acts as a file-object with read access 137 """ 138 139 def __init__(self, fp): 140 self.fp = fp 141 142 def remote_read(self, maxlength): 143 """ 144 Called from remote worker to read at most L{maxlength} bytes of data 145 146 @type maxlength: C{integer} 147 @param maxlength: Maximum number of data bytes that can be returned 148 149 @return: Data read from L{fp} 150 @rtype: C{string} of bytes read from file 151 """ 152 if self.fp is None: 153 return '' 154 155 data = self.fp.read(maxlength) 156 return data 157 158 def remote_close(self): 159 """ 160 Called by remote worker to state that no more data will be transferred 161 """ 162 if self.fp is not None: 163 self.fp.close() 164 self.fp = None 165 166 167 class StringFileWriter(base.FileWriterImpl): 168 169 """ 170 FileWriter class that just puts received data into a buffer. 171 172 Used to upload a file from worker for inline processing rather than 173 writing into a file on master. 174 """ 175 176 def __init__(self): 177 self.buffer = "" 178 179 def remote_write(self, data): 180 self.buffer += bytes2unicode(data) 181 182 def remote_close(self): 183 pass 184 185 186 class StringFileReader(FileReader): 187 188 """ 189 FileWriter class that just buid send data from a string. 190 191 Used to download a file to worker from local string rather than first 192 writing into a file on master. 193 """ 194 195 def __init__(self, s): 196 s = unicode2bytes(s) 197 super().__init__(BytesIO(s)) 198 [end of master/buildbot/process/remotetransfer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/master/buildbot/process/remotetransfer.py b/master/buildbot/process/remotetransfer.py --- a/master/buildbot/process/remotetransfer.py +++ b/master/buildbot/process/remotetransfer.py @@ -126,7 +126,10 @@ # Unpack archive and clean up after self with tarfile.open(name=self.tarname, mode=mode) as archive: - archive.extractall(path=self.destroot) + if hasattr(tarfile, 'data_filter'): + archive.extractall(path=self.destroot, filter='data') + else: + archive.extractall(path=self.destroot) os.remove(self.tarname)
{"golden_diff": "diff --git a/master/buildbot/process/remotetransfer.py b/master/buildbot/process/remotetransfer.py\n--- a/master/buildbot/process/remotetransfer.py\n+++ b/master/buildbot/process/remotetransfer.py\n@@ -126,7 +126,10 @@\n \n # Unpack archive and clean up after self\n with tarfile.open(name=self.tarname, mode=mode) as archive:\n- archive.extractall(path=self.destroot)\n+ if hasattr(tarfile, 'data_filter'):\n+ archive.extractall(path=self.destroot, filter='data')\n+ else:\n+ archive.extractall(path=self.destroot)\n os.remove(self.tarname)\n", "issue": "Address PEP 706 - Filter for tarfile.extractall\nGiven proposal improves security of tarfile extraction to help avoid CVE-2007-4559.\r\n\r\n- In Python 3.12-3.13, a DeprecationWarning is emitted and extraction uses `fully_trusted` filter.\r\n- In Python 3.14+, it will use the `data` filter.\r\n\r\nIt seems given proposal was backported also to older version of Python.\r\n\r\nReference: https://peps.python.org/pep-0706/\r\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\n\"\"\"\nmodule for regrouping all FileWriterImpl and FileReaderImpl away from steps\n\"\"\"\n\nimport os\nimport tarfile\nimport tempfile\nfrom io import BytesIO\n\nfrom buildbot.util import bytes2unicode\nfrom buildbot.util import unicode2bytes\nfrom buildbot.worker.protocols import base\n\n\nclass FileWriter(base.FileWriterImpl):\n\n \"\"\"\n Helper class that acts as a file-object with write access\n \"\"\"\n\n def __init__(self, destfile, maxsize, mode):\n # Create missing directories.\n destfile = os.path.abspath(destfile)\n dirname = os.path.dirname(destfile)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n self.destfile = destfile\n self.mode = mode\n fd, self.tmpname = tempfile.mkstemp(dir=dirname, prefix='buildbot-transfer-')\n self.fp = os.fdopen(fd, 'wb')\n self.remaining = maxsize\n\n def remote_write(self, data):\n \"\"\"\n Called from remote worker to write L{data} to L{fp} within boundaries\n of L{maxsize}\n\n @type data: C{string}\n @param data: String of data to write\n \"\"\"\n data = unicode2bytes(data)\n if self.remaining is not None:\n if len(data) > self.remaining:\n data = data[:self.remaining]\n self.fp.write(data)\n self.remaining = self.remaining - len(data)\n else:\n self.fp.write(data)\n\n def remote_utime(self, accessed_modified):\n os.utime(self.destfile, accessed_modified)\n\n def remote_close(self):\n \"\"\"\n Called by remote worker to state that no more data will be transferred\n \"\"\"\n self.fp.close()\n self.fp = None\n # on windows, os.rename does not automatically unlink, so do it\n # manually\n if os.path.exists(self.destfile):\n os.unlink(self.destfile)\n os.rename(self.tmpname, self.destfile)\n self.tmpname = None\n if self.mode is not None:\n os.chmod(self.destfile, self.mode)\n\n def cancel(self):\n # unclean shutdown, the file is probably truncated, so delete it\n # altogether rather than deliver a corrupted file\n fp = getattr(self, \"fp\", None)\n if fp:\n fp.close()\n if self.destfile and os.path.exists(self.destfile):\n os.unlink(self.destfile)\n if self.tmpname and os.path.exists(self.tmpname):\n os.unlink(self.tmpname)\n\n\nclass DirectoryWriter(FileWriter):\n\n \"\"\"\n A DirectoryWriter is implemented as a FileWriter, with an added post-processing\n step to unpack the archive, once the transfer has completed.\n \"\"\"\n\n def __init__(self, destroot, maxsize, compress, mode):\n self.destroot = destroot\n self.compress = compress\n\n self.fd, self.tarname = tempfile.mkstemp(prefix='buildbot-transfer-')\n os.close(self.fd)\n\n super().__init__(self.tarname, maxsize, mode)\n\n def remote_unpack(self):\n \"\"\"\n Called by remote worker to state that no more data will be transferred\n \"\"\"\n # Make sure remote_close is called, otherwise atomic rename won't happen\n self.remote_close()\n\n # Map configured compression to a TarFile setting\n if self.compress == 'bz2':\n mode = 'r|bz2'\n elif self.compress == 'gz':\n mode = 'r|gz'\n else:\n mode = 'r'\n\n # Unpack archive and clean up after self\n with tarfile.open(name=self.tarname, mode=mode) as archive:\n archive.extractall(path=self.destroot)\n os.remove(self.tarname)\n\n\nclass FileReader(base.FileReaderImpl):\n\n \"\"\"\n Helper class that acts as a file-object with read access\n \"\"\"\n\n def __init__(self, fp):\n self.fp = fp\n\n def remote_read(self, maxlength):\n \"\"\"\n Called from remote worker to read at most L{maxlength} bytes of data\n\n @type maxlength: C{integer}\n @param maxlength: Maximum number of data bytes that can be returned\n\n @return: Data read from L{fp}\n @rtype: C{string} of bytes read from file\n \"\"\"\n if self.fp is None:\n return ''\n\n data = self.fp.read(maxlength)\n return data\n\n def remote_close(self):\n \"\"\"\n Called by remote worker to state that no more data will be transferred\n \"\"\"\n if self.fp is not None:\n self.fp.close()\n self.fp = None\n\n\nclass StringFileWriter(base.FileWriterImpl):\n\n \"\"\"\n FileWriter class that just puts received data into a buffer.\n\n Used to upload a file from worker for inline processing rather than\n writing into a file on master.\n \"\"\"\n\n def __init__(self):\n self.buffer = \"\"\n\n def remote_write(self, data):\n self.buffer += bytes2unicode(data)\n\n def remote_close(self):\n pass\n\n\nclass StringFileReader(FileReader):\n\n \"\"\"\n FileWriter class that just buid send data from a string.\n\n Used to download a file to worker from local string rather than first\n writing into a file on master.\n \"\"\"\n\n def __init__(self, s):\n s = unicode2bytes(s)\n super().__init__(BytesIO(s))\n", "path": "master/buildbot/process/remotetransfer.py"}]}
2,484
153
gh_patches_debug_24340
rasdani/github-patches
git_diff
apache__airflow-17850
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ExasolHook get_pandas_df does not return pandas dataframe but None When calling the exasol hooks get_pandas_df function (https://github.com/apache/airflow/blob/main/airflow/providers/exasol/hooks/exasol.py) I noticed that it does not return a pandas dataframe. It returns None. In fact the function definition type hint explicitly states that None is returned. But the name of the function suggests otherwise. The name get_pandas_df implies that it should return a dataframe and not None. I think that it would make more sense if get_pandas_df would indeed return a dataframe as the name is alluring to. So the code should be like this: `def get_pandas_df(self, sql: Union[str, list], parameters: Optional[dict] = None, **kwargs) -> pd.DataFrame: ... some code ... with closing(self.get_conn()) as conn: df=conn.export_to_pandas(sql, query_params=parameters, **kwargs) return df` INSTEAD OF: `def get_pandas_df(self, sql: Union[str, list], parameters: Optional[dict] = None, **kwargs) -> None: ... some code ... with closing(self.get_conn()) as conn: conn.export_to_pandas(sql, query_params=parameters, **kwargs)` **Apache Airflow version**: 2.1.0 **Kubernetes version (if you are using kubernetes)** (use `kubectl version`): Not using Kubernetes **Environment**:Official Airflow-Docker Image - **Cloud provider or hardware configuration**: no cloud - docker host (DELL Server with 48 Cores, 512GB RAM and many TB storage) - **OS** (e.g. from /etc/os-release):Official Airflow-Docker Image on CentOS 7 Host - **Kernel** (e.g. `uname -a`): Linux cad18b35be00 3.10.0-1160.21.1.el7.x86_64 #1 SMP Tue Mar 16 18:28:22 UTC 2021 x86_64 GNU/Linux - **Install tools**: only docker - **Others**: **What happened**: You can replicate the findings with following dag file: import datetime from airflow import DAG from airflow.operators.python_operator import PythonOperator from airflow.providers.exasol.operators.exasol import ExasolHook import pandas as pd default_args = {"owner": "airflow"} def call_exasol_hook(**kwargs): #Make connection to Exasol hook = ExasolHook(exasol_conn_id='Exasol QA') sql = 'select 42;' df = hook.get_pandas_df(sql = sql) return df with DAG( dag_id="exasol_hook_problem", start_date=datetime.datetime(2021, 5, 5), schedule_interval="@once", default_args=default_args, catchup=False, ) as dag: set_variable = PythonOperator( task_id='call_exasol_hook', python_callable=call_exasol_hook ) Sorry for the strange code formatting. I do not know how to fix this in the github UI form. Sorry also in case I missed something. When testing or executing the task via CLI: ` airflow tasks test exasol_hook_problem call_exasol_hook 2021-07-20` the logs show: `[2021-07-21 12:53:19,775] {python.py:151} INFO - Done. Returned value was: None` None was returned - although get_pandas_df was called. A pandas df should have been returned instead. </issue> <code> [start of airflow/providers/exasol/hooks/exasol.py] 1 # 2 # Licensed to the Apache Software Foundation (ASF) under one 3 # or more contributor license agreements. See the NOTICE file 4 # distributed with this work for additional information 5 # regarding copyright ownership. The ASF licenses this file 6 # to you under the Apache License, Version 2.0 (the 7 # "License"); you may not use this file except in compliance 8 # with the License. You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, 13 # software distributed under the License is distributed on an 14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 # KIND, either express or implied. See the License for the 16 # specific language governing permissions and limitations 17 # under the License. 18 19 from contextlib import closing 20 from typing import Any, Dict, List, Optional, Tuple, Union 21 22 import pyexasol 23 from pyexasol import ExaConnection 24 25 from airflow.hooks.dbapi import DbApiHook 26 27 28 class ExasolHook(DbApiHook): 29 """ 30 Interact with Exasol. 31 You can specify the pyexasol ``compression``, ``encryption``, ``json_lib`` 32 and ``client_name`` parameters in the extra field of your connection 33 as ``{"compression": True, "json_lib": "rapidjson", etc}``. 34 See `pyexasol reference 35 <https://github.com/badoo/pyexasol/blob/master/docs/REFERENCE.md#connect>`_ 36 for more details. 37 """ 38 39 conn_name_attr = 'exasol_conn_id' 40 default_conn_name = 'exasol_default' 41 conn_type = 'exasol' 42 hook_name = 'Exasol' 43 supports_autocommit = True 44 45 def __init__(self, *args, **kwargs) -> None: 46 super().__init__(*args, **kwargs) 47 self.schema = kwargs.pop("schema", None) 48 49 def get_conn(self) -> ExaConnection: 50 conn_id = getattr(self, self.conn_name_attr) 51 conn = self.get_connection(conn_id) 52 conn_args = dict( 53 dsn=f'{conn.host}:{conn.port}', 54 user=conn.login, 55 password=conn.password, 56 schema=self.schema or conn.schema, 57 ) 58 # check for parameters in conn.extra 59 for arg_name, arg_val in conn.extra_dejson.items(): 60 if arg_name in ['compression', 'encryption', 'json_lib', 'client_name']: 61 conn_args[arg_name] = arg_val 62 63 conn = pyexasol.connect(**conn_args) 64 return conn 65 66 def get_pandas_df(self, sql: Union[str, list], parameters: Optional[dict] = None, **kwargs) -> None: 67 """ 68 Executes the sql and returns a pandas dataframe 69 70 :param sql: the sql statement to be executed (str) or a list of 71 sql statements to execute 72 :type sql: str or list 73 :param parameters: The parameters to render the SQL query with. 74 :type parameters: dict or iterable 75 :param kwargs: (optional) passed into pyexasol.ExaConnection.export_to_pandas method 76 :type kwargs: dict 77 """ 78 with closing(self.get_conn()) as conn: 79 conn.export_to_pandas(sql, query_params=parameters, **kwargs) 80 81 def get_records( 82 self, sql: Union[str, list], parameters: Optional[dict] = None 83 ) -> List[Union[dict, Tuple[Any, ...]]]: 84 """ 85 Executes the sql and returns a set of records. 86 87 :param sql: the sql statement to be executed (str) or a list of 88 sql statements to execute 89 :type sql: str or list 90 :param parameters: The parameters to render the SQL query with. 91 :type parameters: dict or iterable 92 """ 93 with closing(self.get_conn()) as conn: 94 with closing(conn.execute(sql, parameters)) as cur: 95 return cur.fetchall() 96 97 def get_first(self, sql: Union[str, list], parameters: Optional[dict] = None) -> Optional[Any]: 98 """ 99 Executes the sql and returns the first resulting row. 100 101 :param sql: the sql statement to be executed (str) or a list of 102 sql statements to execute 103 :type sql: str or list 104 :param parameters: The parameters to render the SQL query with. 105 :type parameters: dict or iterable 106 """ 107 with closing(self.get_conn()) as conn: 108 with closing(conn.execute(sql, parameters)) as cur: 109 return cur.fetchone() 110 111 def export_to_file( 112 self, 113 filename: str, 114 query_or_table: str, 115 query_params: Optional[Dict] = None, 116 export_params: Optional[Dict] = None, 117 ) -> None: 118 """ 119 Exports data to a file. 120 121 :param filename: Path to the file to which the data has to be exported 122 :type filename: str 123 :param query_or_table: the sql statement to be executed or table name to export 124 :type query_or_table: str 125 :param query_params: Query parameters passed to underlying ``export_to_file`` 126 method of :class:`~pyexasol.connection.ExaConnection`. 127 :type query_params: dict 128 :param export_params: Extra parameters passed to underlying ``export_to_file`` 129 method of :class:`~pyexasol.connection.ExaConnection`. 130 :type export_params: dict 131 """ 132 self.log.info("Getting data from exasol") 133 with closing(self.get_conn()) as conn: 134 conn.export_to_file( 135 dst=filename, 136 query_or_table=query_or_table, 137 query_params=query_params, 138 export_params=export_params, 139 ) 140 self.log.info("Data saved to %s", filename) 141 142 def run(self, sql: Union[str, list], autocommit: bool = False, parameters: Optional[dict] = None) -> None: 143 """ 144 Runs a command or a list of commands. Pass a list of sql 145 statements to the sql parameter to get them to execute 146 sequentially 147 148 :param sql: the sql statement to be executed (str) or a list of 149 sql statements to execute 150 :type sql: str or list 151 :param autocommit: What to set the connection's autocommit setting to 152 before executing the query. 153 :type autocommit: bool 154 :param parameters: The parameters to render the SQL query with. 155 :type parameters: dict or iterable 156 """ 157 if isinstance(sql, str): 158 sql = [sql] 159 160 with closing(self.get_conn()) as conn: 161 if self.supports_autocommit: 162 self.set_autocommit(conn, autocommit) 163 164 for query in sql: 165 self.log.info(query) 166 with closing(conn.execute(query, parameters)) as cur: 167 self.log.info(cur.row_count) 168 # If autocommit was set to False for db that supports autocommit, 169 # or if db does not supports autocommit, we do a manual commit. 170 if not self.get_autocommit(conn): 171 conn.commit() 172 173 def set_autocommit(self, conn, autocommit: bool) -> None: 174 """ 175 Sets the autocommit flag on the connection 176 177 :param conn: Connection to set autocommit setting to. 178 :type conn: connection object 179 :param autocommit: The autocommit setting to set. 180 :type autocommit: bool 181 """ 182 if not self.supports_autocommit and autocommit: 183 self.log.warning( 184 "%s connection doesn't support autocommit but autocommit activated.", 185 getattr(self, self.conn_name_attr), 186 ) 187 conn.set_autocommit(autocommit) 188 189 def get_autocommit(self, conn) -> bool: 190 """ 191 Get autocommit setting for the provided connection. 192 Return True if autocommit is set. 193 Return False if autocommit is not set or set to False or conn 194 does not support autocommit. 195 196 :param conn: Connection to get autocommit setting from. 197 :type conn: connection object 198 :return: connection autocommit setting. 199 :rtype: bool 200 """ 201 autocommit = conn.attr.get('autocommit') 202 if autocommit is None: 203 autocommit = super().get_autocommit(conn) 204 return autocommit 205 206 @staticmethod 207 def _serialize_cell(cell, conn=None) -> object: 208 """ 209 Exasol will adapt all arguments to the execute() method internally, 210 hence we return cell without any conversion. 211 212 :param cell: The cell to insert into the table 213 :type cell: object 214 :param conn: The database connection 215 :type conn: connection object 216 :return: The cell 217 :rtype: object 218 """ 219 return cell 220 [end of airflow/providers/exasol/hooks/exasol.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/airflow/providers/exasol/hooks/exasol.py b/airflow/providers/exasol/hooks/exasol.py --- a/airflow/providers/exasol/hooks/exasol.py +++ b/airflow/providers/exasol/hooks/exasol.py @@ -19,6 +19,7 @@ from contextlib import closing from typing import Any, Dict, List, Optional, Tuple, Union +import pandas as pd import pyexasol from pyexasol import ExaConnection @@ -63,7 +64,9 @@ conn = pyexasol.connect(**conn_args) return conn - def get_pandas_df(self, sql: Union[str, list], parameters: Optional[dict] = None, **kwargs) -> None: + def get_pandas_df( + self, sql: Union[str, list], parameters: Optional[dict] = None, **kwargs + ) -> pd.DataFrame: """ Executes the sql and returns a pandas dataframe @@ -76,7 +79,8 @@ :type kwargs: dict """ with closing(self.get_conn()) as conn: - conn.export_to_pandas(sql, query_params=parameters, **kwargs) + df = conn.export_to_pandas(sql, query_params=parameters, **kwargs) + return df def get_records( self, sql: Union[str, list], parameters: Optional[dict] = None
{"golden_diff": "diff --git a/airflow/providers/exasol/hooks/exasol.py b/airflow/providers/exasol/hooks/exasol.py\n--- a/airflow/providers/exasol/hooks/exasol.py\n+++ b/airflow/providers/exasol/hooks/exasol.py\n@@ -19,6 +19,7 @@\n from contextlib import closing\n from typing import Any, Dict, List, Optional, Tuple, Union\n \n+import pandas as pd\n import pyexasol\n from pyexasol import ExaConnection\n \n@@ -63,7 +64,9 @@\n conn = pyexasol.connect(**conn_args)\n return conn\n \n- def get_pandas_df(self, sql: Union[str, list], parameters: Optional[dict] = None, **kwargs) -> None:\n+ def get_pandas_df(\n+ self, sql: Union[str, list], parameters: Optional[dict] = None, **kwargs\n+ ) -> pd.DataFrame:\n \"\"\"\n Executes the sql and returns a pandas dataframe\n \n@@ -76,7 +79,8 @@\n :type kwargs: dict\n \"\"\"\n with closing(self.get_conn()) as conn:\n- conn.export_to_pandas(sql, query_params=parameters, **kwargs)\n+ df = conn.export_to_pandas(sql, query_params=parameters, **kwargs)\n+ return df\n \n def get_records(\n self, sql: Union[str, list], parameters: Optional[dict] = None\n", "issue": "ExasolHook get_pandas_df does not return pandas dataframe but None\n\r\nWhen calling the exasol hooks get_pandas_df function (https://github.com/apache/airflow/blob/main/airflow/providers/exasol/hooks/exasol.py) I noticed that it does not return a pandas dataframe. It returns None. In fact the function definition type hint explicitly states that None is returned. But the name of the function suggests otherwise. The name get_pandas_df implies that it should return a dataframe and not None.\r\n\r\nI think that it would make more sense if get_pandas_df would indeed return a dataframe as the name is alluring to. So the code should be like this:\r\n\r\n`def get_pandas_df(self, sql: Union[str, list], parameters: Optional[dict] = None, **kwargs) -> pd.DataFrame:\r\n... some code ...\r\nwith closing(self.get_conn()) as conn:\r\ndf=conn.export_to_pandas(sql, query_params=parameters, **kwargs)\r\nreturn df`\r\n\r\nINSTEAD OF:\r\n\r\n`def get_pandas_df(self, sql: Union[str, list], parameters: Optional[dict] = None, **kwargs) -> None:\r\n... some code ...\r\nwith closing(self.get_conn()) as conn:\r\nconn.export_to_pandas(sql, query_params=parameters, **kwargs)`\r\n\r\n**Apache Airflow version**: 2.1.0\r\n\r\n\r\n**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): Not using Kubernetes\r\n\r\n**Environment**:Official Airflow-Docker Image\r\n\r\n- **Cloud provider or hardware configuration**: no cloud - docker host (DELL Server with 48 Cores, 512GB RAM and many TB storage)\r\n- **OS** (e.g. from /etc/os-release):Official Airflow-Docker Image on CentOS 7 Host\r\n- **Kernel** (e.g. `uname -a`): Linux cad18b35be00 3.10.0-1160.21.1.el7.x86_64 #1 SMP Tue Mar 16 18:28:22 UTC 2021 x86_64 GNU/Linux\r\n- **Install tools**: only docker\r\n- **Others**:\r\n\r\n**What happened**:\r\nYou can replicate the findings with following dag file:\r\n\r\nimport datetime\r\n\r\nfrom airflow import DAG\r\nfrom airflow.operators.python_operator import PythonOperator\r\nfrom airflow.providers.exasol.operators.exasol import ExasolHook\r\nimport pandas as pd\r\n\r\n\r\ndefault_args = {\"owner\": \"airflow\"}\r\n\r\n\r\ndef call_exasol_hook(**kwargs):\r\n #Make connection to Exasol\r\n hook = ExasolHook(exasol_conn_id='Exasol QA')\r\n sql = 'select 42;' \r\n df = hook.get_pandas_df(sql = sql) \r\n return df\r\n \r\nwith DAG(\r\n dag_id=\"exasol_hook_problem\",\r\n start_date=datetime.datetime(2021, 5, 5),\r\n schedule_interval=\"@once\",\r\n default_args=default_args,\r\n catchup=False,\r\n) as dag:\r\n \r\n set_variable = PythonOperator(\r\n task_id='call_exasol_hook',\r\n python_callable=call_exasol_hook\r\n )\r\n\r\nSorry for the strange code formatting. I do not know how to fix this in the github UI form. \r\nSorry also in case I missed something.\r\n \r\nWhen testing or executing the task via CLI:\r\n` airflow tasks test exasol_hook_problem call_exasol_hook 2021-07-20`\r\n\r\nthe logs show:\r\n`[2021-07-21 12:53:19,775] {python.py:151} INFO - Done. Returned value was: None`\r\n\r\nNone was returned - although get_pandas_df was called. A pandas df should have been returned instead.\r\n\r\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom contextlib import closing\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport pyexasol\nfrom pyexasol import ExaConnection\n\nfrom airflow.hooks.dbapi import DbApiHook\n\n\nclass ExasolHook(DbApiHook):\n \"\"\"\n Interact with Exasol.\n You can specify the pyexasol ``compression``, ``encryption``, ``json_lib``\n and ``client_name`` parameters in the extra field of your connection\n as ``{\"compression\": True, \"json_lib\": \"rapidjson\", etc}``.\n See `pyexasol reference\n <https://github.com/badoo/pyexasol/blob/master/docs/REFERENCE.md#connect>`_\n for more details.\n \"\"\"\n\n conn_name_attr = 'exasol_conn_id'\n default_conn_name = 'exasol_default'\n conn_type = 'exasol'\n hook_name = 'Exasol'\n supports_autocommit = True\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.schema = kwargs.pop(\"schema\", None)\n\n def get_conn(self) -> ExaConnection:\n conn_id = getattr(self, self.conn_name_attr)\n conn = self.get_connection(conn_id)\n conn_args = dict(\n dsn=f'{conn.host}:{conn.port}',\n user=conn.login,\n password=conn.password,\n schema=self.schema or conn.schema,\n )\n # check for parameters in conn.extra\n for arg_name, arg_val in conn.extra_dejson.items():\n if arg_name in ['compression', 'encryption', 'json_lib', 'client_name']:\n conn_args[arg_name] = arg_val\n\n conn = pyexasol.connect(**conn_args)\n return conn\n\n def get_pandas_df(self, sql: Union[str, list], parameters: Optional[dict] = None, **kwargs) -> None:\n \"\"\"\n Executes the sql and returns a pandas dataframe\n\n :param sql: the sql statement to be executed (str) or a list of\n sql statements to execute\n :type sql: str or list\n :param parameters: The parameters to render the SQL query with.\n :type parameters: dict or iterable\n :param kwargs: (optional) passed into pyexasol.ExaConnection.export_to_pandas method\n :type kwargs: dict\n \"\"\"\n with closing(self.get_conn()) as conn:\n conn.export_to_pandas(sql, query_params=parameters, **kwargs)\n\n def get_records(\n self, sql: Union[str, list], parameters: Optional[dict] = None\n ) -> List[Union[dict, Tuple[Any, ...]]]:\n \"\"\"\n Executes the sql and returns a set of records.\n\n :param sql: the sql statement to be executed (str) or a list of\n sql statements to execute\n :type sql: str or list\n :param parameters: The parameters to render the SQL query with.\n :type parameters: dict or iterable\n \"\"\"\n with closing(self.get_conn()) as conn:\n with closing(conn.execute(sql, parameters)) as cur:\n return cur.fetchall()\n\n def get_first(self, sql: Union[str, list], parameters: Optional[dict] = None) -> Optional[Any]:\n \"\"\"\n Executes the sql and returns the first resulting row.\n\n :param sql: the sql statement to be executed (str) or a list of\n sql statements to execute\n :type sql: str or list\n :param parameters: The parameters to render the SQL query with.\n :type parameters: dict or iterable\n \"\"\"\n with closing(self.get_conn()) as conn:\n with closing(conn.execute(sql, parameters)) as cur:\n return cur.fetchone()\n\n def export_to_file(\n self,\n filename: str,\n query_or_table: str,\n query_params: Optional[Dict] = None,\n export_params: Optional[Dict] = None,\n ) -> None:\n \"\"\"\n Exports data to a file.\n\n :param filename: Path to the file to which the data has to be exported\n :type filename: str\n :param query_or_table: the sql statement to be executed or table name to export\n :type query_or_table: str\n :param query_params: Query parameters passed to underlying ``export_to_file``\n method of :class:`~pyexasol.connection.ExaConnection`.\n :type query_params: dict\n :param export_params: Extra parameters passed to underlying ``export_to_file``\n method of :class:`~pyexasol.connection.ExaConnection`.\n :type export_params: dict\n \"\"\"\n self.log.info(\"Getting data from exasol\")\n with closing(self.get_conn()) as conn:\n conn.export_to_file(\n dst=filename,\n query_or_table=query_or_table,\n query_params=query_params,\n export_params=export_params,\n )\n self.log.info(\"Data saved to %s\", filename)\n\n def run(self, sql: Union[str, list], autocommit: bool = False, parameters: Optional[dict] = None) -> None:\n \"\"\"\n Runs a command or a list of commands. Pass a list of sql\n statements to the sql parameter to get them to execute\n sequentially\n\n :param sql: the sql statement to be executed (str) or a list of\n sql statements to execute\n :type sql: str or list\n :param autocommit: What to set the connection's autocommit setting to\n before executing the query.\n :type autocommit: bool\n :param parameters: The parameters to render the SQL query with.\n :type parameters: dict or iterable\n \"\"\"\n if isinstance(sql, str):\n sql = [sql]\n\n with closing(self.get_conn()) as conn:\n if self.supports_autocommit:\n self.set_autocommit(conn, autocommit)\n\n for query in sql:\n self.log.info(query)\n with closing(conn.execute(query, parameters)) as cur:\n self.log.info(cur.row_count)\n # If autocommit was set to False for db that supports autocommit,\n # or if db does not supports autocommit, we do a manual commit.\n if not self.get_autocommit(conn):\n conn.commit()\n\n def set_autocommit(self, conn, autocommit: bool) -> None:\n \"\"\"\n Sets the autocommit flag on the connection\n\n :param conn: Connection to set autocommit setting to.\n :type conn: connection object\n :param autocommit: The autocommit setting to set.\n :type autocommit: bool\n \"\"\"\n if not self.supports_autocommit and autocommit:\n self.log.warning(\n \"%s connection doesn't support autocommit but autocommit activated.\",\n getattr(self, self.conn_name_attr),\n )\n conn.set_autocommit(autocommit)\n\n def get_autocommit(self, conn) -> bool:\n \"\"\"\n Get autocommit setting for the provided connection.\n Return True if autocommit is set.\n Return False if autocommit is not set or set to False or conn\n does not support autocommit.\n\n :param conn: Connection to get autocommit setting from.\n :type conn: connection object\n :return: connection autocommit setting.\n :rtype: bool\n \"\"\"\n autocommit = conn.attr.get('autocommit')\n if autocommit is None:\n autocommit = super().get_autocommit(conn)\n return autocommit\n\n @staticmethod\n def _serialize_cell(cell, conn=None) -> object:\n \"\"\"\n Exasol will adapt all arguments to the execute() method internally,\n hence we return cell without any conversion.\n\n :param cell: The cell to insert into the table\n :type cell: object\n :param conn: The database connection\n :type conn: connection object\n :return: The cell\n :rtype: object\n \"\"\"\n return cell\n", "path": "airflow/providers/exasol/hooks/exasol.py"}]}
3,832
317
gh_patches_debug_11327
rasdani/github-patches
git_diff
cloud-custodian__cloud-custodian-9504
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add support for backup in timestream backup ### Describe the feature This will use AWS backup service to take time-stream backup. ### Extra information or context _No response_ </issue> <code> [start of c7n/resources/timestream.py] 1 from c7n.manager import resources 2 from c7n.actions import Action 3 from c7n.filters.kms import KmsRelatedFilter 4 from c7n.query import DescribeSource, QueryResourceManager, TypeInfo 5 from c7n.utils import local_session, type_schema 6 from c7n.tags import ( 7 TagDelayedAction, 8 TagActionFilter, 9 Tag as TagAction, 10 RemoveTag as RemoveTagAction 11 ) 12 13 14 class DescribeTimestream(DescribeSource): 15 def augment(self, resources): 16 for r in resources: 17 client = local_session(self.manager.session_factory).client('timestream-write') 18 r['Tags'] = client.list_tags_for_resource(ResourceARN=r['Arn'])['Tags'] 19 return resources 20 21 22 @resources.register('timestream-database') 23 class TimestreamDatabase(QueryResourceManager): 24 class resource_type(TypeInfo): 25 service = 'timestream-write' 26 arn_type = '' 27 name = 'DatabaseName' 28 id = arn = 'Arn' 29 enum_spec = ('list_databases', 'Databases', {}) 30 permission_prefix = 'timestream' 31 permissions = ('timestream:ListDatabases', ) 32 permissions_augment = ("timestream:ListTagsForResource",) 33 source_mapping = { 34 'describe': DescribeTimestream, 35 } 36 37 38 @resources.register('timestream-table') 39 class TimestreamTable(QueryResourceManager): 40 class resource_type(TypeInfo): 41 service = 'timestream-write' 42 arn_type = '' 43 name = 'TableName' 44 id = arn = 'Arn' 45 enum_spec = ('list_tables', 'Tables', {}) 46 permission_prefix = 'timestream' 47 permissions = ('timestream:ListTables', ) 48 49 source_mapping = { 50 'describe': DescribeTimestream, 51 } 52 53 54 @TimestreamDatabase.action_registry.register('tag') 55 @TimestreamTable.action_registry.register('tag') 56 class TimestreamTag(TagAction): 57 58 permissions = ('timestream:TagResource', ) 59 60 def process_resource_set(self, client, resource_set, tags): 61 for r in resource_set: 62 client.tag_resource(ResourceARN=r['Arn'], Tags=tags) 63 64 65 @TimestreamDatabase.action_registry.register('remove-tag') 66 @TimestreamTable.action_registry.register('remove-tag') 67 class TimestreamRemoveTag(RemoveTagAction): 68 69 permissions = ('timestream:UntagResource', ) 70 71 def process_resource_set(self, client, resource_set, tag_keys): 72 for r in resource_set: 73 client.untag_resource(ResourceARN=r['Arn'], TagKeys=tag_keys) 74 75 76 TimestreamDatabase.action_registry.register('mark-for-op', TagDelayedAction) 77 TimestreamTable.action_registry.register('mark-for-op', TagDelayedAction) 78 79 TimestreamDatabase.filter_registry.register('marked-for-op', TagActionFilter) 80 TimestreamTable.filter_registry.register('marked-for-op', TagActionFilter) 81 82 83 @TimestreamTable.action_registry.register('delete') 84 class TimestreamTableDelete(Action): 85 """ 86 Deletes a timestream table 87 """ 88 89 schema = type_schema('delete') 90 permissions = ('timestream:DeleteTable', ) 91 92 def process(self, resources): 93 client = local_session(self.manager.session_factory).client('timestream-write') 94 for r in resources: 95 try: 96 client.delete_table( 97 DatabaseName=r['DatabaseName'], 98 TableName=r['TableName'] 99 ) 100 except client.exceptions.ResourceNotFoundException: 101 continue 102 103 104 @TimestreamDatabase.action_registry.register('delete') 105 class TimestreamDatabaseDelete(Action): 106 """ 107 Deletes a timestream database 108 """ 109 110 schema = type_schema('delete', force={'type': 'boolean', 'default': False}) 111 permissions = ( 112 'timestream:DeleteDatabase', 113 'timestream:ListTables', 'timestream:DeleteTable', ) 114 115 def process(self, resources): 116 client = local_session(self.manager.session_factory).client('timestream-write') 117 for r in resources: 118 try: 119 client.delete_database( 120 DatabaseName=r['DatabaseName'], 121 ) 122 except client.exceptions.ResourceNotFoundException: 123 continue 124 except client.exceptions.ValidationException: 125 if not self.data.get('force', False): 126 self.log.error( 127 f'Unable to delete database:{r["DatabaseName"]}, ' 128 'tables must be deleted first') 129 continue 130 tables = client.list_tables(DatabaseName=r['DatabaseName'])['Tables'] 131 TimestreamTableDelete( 132 data={'type': 'delete'}, 133 manager=self.manager, 134 log_dir=self.log_dir 135 ).process(tables) 136 client.delete_database( 137 DatabaseName=r['DatabaseName'], 138 ) 139 140 141 @TimestreamDatabase.filter_registry.register('kms-key') 142 class KmsFilter(KmsRelatedFilter): 143 RelatedIdsExpression = 'KmsKeyId' 144 [end of c7n/resources/timestream.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/c7n/resources/timestream.py b/c7n/resources/timestream.py --- a/c7n/resources/timestream.py +++ b/c7n/resources/timestream.py @@ -9,6 +9,7 @@ Tag as TagAction, RemoveTag as RemoveTagAction ) +from c7n.filters.backup import ConsecutiveAwsBackupsFilter class DescribeTimestream(DescribeSource): @@ -138,6 +139,9 @@ ) +TimestreamTable.filter_registry.register('consecutive-aws-backups', ConsecutiveAwsBackupsFilter) + + @TimestreamDatabase.filter_registry.register('kms-key') class KmsFilter(KmsRelatedFilter): RelatedIdsExpression = 'KmsKeyId'
{"golden_diff": "diff --git a/c7n/resources/timestream.py b/c7n/resources/timestream.py\n--- a/c7n/resources/timestream.py\n+++ b/c7n/resources/timestream.py\n@@ -9,6 +9,7 @@\n Tag as TagAction,\n RemoveTag as RemoveTagAction\n )\n+from c7n.filters.backup import ConsecutiveAwsBackupsFilter\n \n \n class DescribeTimestream(DescribeSource):\n@@ -138,6 +139,9 @@\n )\n \n \n+TimestreamTable.filter_registry.register('consecutive-aws-backups', ConsecutiveAwsBackupsFilter)\n+\n+\n @TimestreamDatabase.filter_registry.register('kms-key')\n class KmsFilter(KmsRelatedFilter):\n RelatedIdsExpression = 'KmsKeyId'\n", "issue": "Add support for backup in timestream backup\n### Describe the feature\n\nThis will use AWS backup service to take time-stream backup.\n\n### Extra information or context\n\n_No response_\n", "before_files": [{"content": "from c7n.manager import resources\nfrom c7n.actions import Action\nfrom c7n.filters.kms import KmsRelatedFilter\nfrom c7n.query import DescribeSource, QueryResourceManager, TypeInfo\nfrom c7n.utils import local_session, type_schema\nfrom c7n.tags import (\n TagDelayedAction,\n TagActionFilter,\n Tag as TagAction,\n RemoveTag as RemoveTagAction\n)\n\n\nclass DescribeTimestream(DescribeSource):\n def augment(self, resources):\n for r in resources:\n client = local_session(self.manager.session_factory).client('timestream-write')\n r['Tags'] = client.list_tags_for_resource(ResourceARN=r['Arn'])['Tags']\n return resources\n\n\[email protected]('timestream-database')\nclass TimestreamDatabase(QueryResourceManager):\n class resource_type(TypeInfo):\n service = 'timestream-write'\n arn_type = ''\n name = 'DatabaseName'\n id = arn = 'Arn'\n enum_spec = ('list_databases', 'Databases', {})\n permission_prefix = 'timestream'\n permissions = ('timestream:ListDatabases', )\n permissions_augment = (\"timestream:ListTagsForResource\",)\n source_mapping = {\n 'describe': DescribeTimestream,\n }\n\n\[email protected]('timestream-table')\nclass TimestreamTable(QueryResourceManager):\n class resource_type(TypeInfo):\n service = 'timestream-write'\n arn_type = ''\n name = 'TableName'\n id = arn = 'Arn'\n enum_spec = ('list_tables', 'Tables', {})\n permission_prefix = 'timestream'\n permissions = ('timestream:ListTables', )\n\n source_mapping = {\n 'describe': DescribeTimestream,\n }\n\n\[email protected]_registry.register('tag')\[email protected]_registry.register('tag')\nclass TimestreamTag(TagAction):\n\n permissions = ('timestream:TagResource', )\n\n def process_resource_set(self, client, resource_set, tags):\n for r in resource_set:\n client.tag_resource(ResourceARN=r['Arn'], Tags=tags)\n\n\[email protected]_registry.register('remove-tag')\[email protected]_registry.register('remove-tag')\nclass TimestreamRemoveTag(RemoveTagAction):\n\n permissions = ('timestream:UntagResource', )\n\n def process_resource_set(self, client, resource_set, tag_keys):\n for r in resource_set:\n client.untag_resource(ResourceARN=r['Arn'], TagKeys=tag_keys)\n\n\nTimestreamDatabase.action_registry.register('mark-for-op', TagDelayedAction)\nTimestreamTable.action_registry.register('mark-for-op', TagDelayedAction)\n\nTimestreamDatabase.filter_registry.register('marked-for-op', TagActionFilter)\nTimestreamTable.filter_registry.register('marked-for-op', TagActionFilter)\n\n\[email protected]_registry.register('delete')\nclass TimestreamTableDelete(Action):\n \"\"\"\n Deletes a timestream table\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('timestream:DeleteTable', )\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('timestream-write')\n for r in resources:\n try:\n client.delete_table(\n DatabaseName=r['DatabaseName'],\n TableName=r['TableName']\n )\n except client.exceptions.ResourceNotFoundException:\n continue\n\n\[email protected]_registry.register('delete')\nclass TimestreamDatabaseDelete(Action):\n \"\"\"\n Deletes a timestream database\n \"\"\"\n\n schema = type_schema('delete', force={'type': 'boolean', 'default': False})\n permissions = (\n 'timestream:DeleteDatabase',\n 'timestream:ListTables', 'timestream:DeleteTable', )\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('timestream-write')\n for r in resources:\n try:\n client.delete_database(\n DatabaseName=r['DatabaseName'],\n )\n except client.exceptions.ResourceNotFoundException:\n continue\n except client.exceptions.ValidationException:\n if not self.data.get('force', False):\n self.log.error(\n f'Unable to delete database:{r[\"DatabaseName\"]}, '\n 'tables must be deleted first')\n continue\n tables = client.list_tables(DatabaseName=r['DatabaseName'])['Tables']\n TimestreamTableDelete(\n data={'type': 'delete'},\n manager=self.manager,\n log_dir=self.log_dir\n ).process(tables)\n client.delete_database(\n DatabaseName=r['DatabaseName'],\n )\n\n\[email protected]_registry.register('kms-key')\nclass KmsFilter(KmsRelatedFilter):\n RelatedIdsExpression = 'KmsKeyId'\n", "path": "c7n/resources/timestream.py"}]}
1,935
168
gh_patches_debug_8455
rasdani/github-patches
git_diff
gratipay__gratipay.com-1769
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Integrate SASS compilation into development environment This started in [IRC](https://botbot.me/freenode/gittip/msg/8682409/). The goal here is to use the same SASS compiler that we use in production, in development. We currently use the standard Ruby version, but that won't quite work for development since we shouldn't require all of Ruby just to run a single tool. So it seems the best course of action is to move to a Python-based SASS compiler. </issue> <code> [start of gittip/wireup.py] 1 """Wireup 2 """ 3 from __future__ import absolute_import, division, print_function, unicode_literals 4 import os 5 import sys 6 import threading 7 import time 8 9 import aspen 10 import balanced 11 import gittip 12 import raven 13 import psycopg2 14 import stripe 15 import gittip.utils.mixpanel 16 from gittip.models.community import Community 17 from gittip.models.participant import Participant 18 from postgres import Postgres 19 20 21 def canonical(): 22 gittip.canonical_scheme = os.environ['CANONICAL_SCHEME'] 23 gittip.canonical_host = os.environ['CANONICAL_HOST'] 24 25 26 # wireup.db() should only ever be called once by the application 27 def db(): 28 dburl = os.environ['DATABASE_URL'] 29 maxconn = int(os.environ['DATABASE_MAXCONN']) 30 db = Postgres(dburl, maxconn=maxconn) 31 32 # register hstore type 33 with db.get_cursor() as cursor: 34 psycopg2.extras.register_hstore(cursor, globally=True, unicode=True) 35 36 db.register_model(Community) 37 db.register_model(Participant) 38 39 return db 40 41 42 def billing(): 43 stripe.api_key= os.environ['STRIPE_SECRET_API_KEY'] 44 stripe.publishable_api_key= os.environ['STRIPE_PUBLISHABLE_API_KEY'] 45 balanced.configure(os.environ['BALANCED_API_SECRET']) 46 47 48 def username_restrictions(website): 49 gittip.RESTRICTED_USERNAMES = os.listdir(website.www_root) 50 51 52 def request_metrics(website): 53 def add_start_timestamp(request): 54 request.x_start = time.time() 55 def log_request_count_and_response_time(response): 56 print("count#requests=1") 57 response_time = time.time() - response.request.x_start 58 print("measure#response_time={}ms".format(response_time * 1000)) 59 website.hooks.inbound_early.insert(0, add_start_timestamp) 60 website.hooks.outbound += [log_request_count_and_response_time] 61 62 63 def sentry(website): 64 if not website.sentry_dsn: 65 aspen.log_dammit("Won't log to Sentry (SENTRY_DSN is empty).") 66 return 67 68 sentry = raven.Client(website.sentry_dsn) 69 70 def tell_sentry(request): 71 cls, response = sys.exc_info()[:2] 72 73 74 # Decide if we care. 75 # ================== 76 77 if cls is aspen.Response: 78 79 if response.code < 500: 80 81 # Only log server errors to Sentry. For responses < 500 we use 82 # stream-/line-based access logging. See discussion on: 83 84 # https://github.com/gittip/www.gittip.com/pull/1560. 85 86 return 87 88 89 # Find a user. 90 # ============ 91 # | is disallowed in usernames, so we can use it here to indicate 92 # situations in which we can't get a username. 93 94 request_context = getattr(request, 'context', None) 95 user = {} 96 user_id = 'n/a' 97 if request_context is None: 98 username = '| no context' 99 else: 100 user = request.context.get('user', None) 101 if user is None: 102 username = '| no user' 103 else: 104 is_anon = getattr(user, 'ANON', None) 105 if is_anon is None: 106 username = '| no ANON' 107 elif is_anon: 108 username = '| anonymous' 109 else: 110 participant = getattr(user, 'participant', None) 111 if participant is None: 112 username = '| no participant' 113 else: 114 username = getattr(user.participant, 'username', None) 115 if username is None: 116 username = '| no username' 117 else: 118 user_id = user.participant.id 119 username = username.encode('utf8') 120 user = { 'id': user_id 121 , 'is_admin': user.participant.is_admin 122 , 'is_suspicious': user.participant.is_suspicious 123 , 'claimed_time': user.participant.claimed_time.isoformat() 124 , 'url': 'https://www.gittip.com/{}/'.format(username) 125 } 126 127 128 # Fire off a Sentry call. 129 # ======================= 130 131 tags = { 'username': username 132 , 'user_id': user_id 133 } 134 extra = { 'filepath': getattr(request, 'fs', None) 135 , 'request': str(request).splitlines() 136 , 'user': user 137 } 138 result = sentry.captureException(tags=tags, extra=extra) 139 140 141 # Emit a reference string to stdout. 142 # ================================== 143 144 ident = sentry.get_ident(result) 145 aspen.log_dammit('Exception reference: ' + ident) 146 147 148 website.hooks.error_early += [tell_sentry] 149 return tell_sentry 150 151 152 def mixpanel(website): 153 website.mixpanel_token = os.environ['MIXPANEL_TOKEN'] 154 gittip.utils.mixpanel.MIXPANEL_TOKEN = os.environ['MIXPANEL_TOKEN'] 155 156 def nanswers(): 157 from gittip.models import participant 158 participant.NANSWERS_THRESHOLD = int(os.environ['NANSWERS_THRESHOLD']) 159 160 def nmembers(website): 161 from gittip.models import community 162 community.NMEMBERS_THRESHOLD = int(os.environ['NMEMBERS_THRESHOLD']) 163 website.NMEMBERS_THRESHOLD = community.NMEMBERS_THRESHOLD 164 165 def envvars(website): 166 167 missing_keys = [] 168 malformed_values = [] 169 170 def envvar(key, cast=None): 171 if key not in os.environ: 172 missing_keys.append(key) 173 return "" 174 value = os.environ[key].decode('ASCII') 175 if cast is not None: 176 try: 177 value = cast(value) 178 except: 179 err = str(sys.exc_info()[1]) 180 malformed_values.append((key, err)) 181 return "" 182 return value 183 184 def is_yesish(val): 185 return val.lower() in ('1', 'true', 'yes') 186 187 website.bitbucket_consumer_key = envvar('BITBUCKET_CONSUMER_KEY') 188 website.bitbucket_consumer_secret = envvar('BITBUCKET_CONSUMER_SECRET') 189 website.bitbucket_callback = envvar('BITBUCKET_CALLBACK') 190 191 website.github_client_id = envvar('GITHUB_CLIENT_ID') 192 website.github_client_secret = envvar('GITHUB_CLIENT_SECRET') 193 website.github_callback = envvar('GITHUB_CALLBACK') 194 195 website.twitter_consumer_key = envvar('TWITTER_CONSUMER_KEY') 196 website.twitter_consumer_secret = envvar('TWITTER_CONSUMER_SECRET') 197 website.twitter_access_token = envvar('TWITTER_ACCESS_TOKEN') 198 website.twitter_access_token_secret = envvar('TWITTER_ACCESS_TOKEN_SECRET') 199 website.twitter_callback = envvar('TWITTER_CALLBACK') 200 201 website.bountysource_www_host = envvar('BOUNTYSOURCE_WWW_HOST') 202 website.bountysource_api_host = envvar('BOUNTYSOURCE_API_HOST') 203 website.bountysource_api_secret = envvar('BOUNTYSOURCE_API_SECRET') 204 website.bountysource_callback = envvar('BOUNTYSOURCE_CALLBACK') 205 206 website.css_href = envvar('GITTIP_CSS_HREF') \ 207 .replace('%version', website.version) 208 website.js_src = envvar('GITTIP_JS_SRC') \ 209 .replace('%version', website.version) 210 website.cache_static = is_yesish(envvar('GITTIP_CACHE_STATIC')) 211 212 website.google_analytics_id = envvar('GOOGLE_ANALYTICS_ID') 213 website.gauges_id = envvar('GAUGES_ID') 214 website.sentry_dsn = envvar('SENTRY_DSN') 215 216 website.min_threads = envvar('MIN_THREADS', int) 217 website.log_busy_threads_every = envvar('LOG_BUSY_THREADS_EVERY', int) 218 website.log_metrics = is_yesish(envvar('LOG_METRICS')) 219 220 if malformed_values: 221 malformed_values.sort() 222 these = len(malformed_values) != 1 and 'these' or 'this' 223 plural = len(malformed_values) != 1 and 's' or '' 224 aspen.log_dammit("=" * 42) 225 aspen.log_dammit( "Oh no! Gittip.com couldn't understand %s " % these 226 , "environment variable%s:" % plural 227 ) 228 aspen.log_dammit(" ") 229 for key, err in malformed_values: 230 aspen.log_dammit(" {} ({})".format(key, err)) 231 aspen.log_dammit(" ") 232 aspen.log_dammit("See ./default_local.env for hints.") 233 234 aspen.log_dammit("=" * 42) 235 raise SystemExit 236 237 if missing_keys: 238 missing_keys.sort() 239 these = len(missing_keys) != 1 and 'these' or 'this' 240 plural = len(missing_keys) != 1 and 's' or '' 241 aspen.log_dammit("=" * 42) 242 aspen.log_dammit( "Oh no! Gittip.com needs %s missing " % these 243 , "environment variable%s:" % plural 244 ) 245 aspen.log_dammit(" ") 246 for key in missing_keys: 247 aspen.log_dammit(" " + key) 248 aspen.log_dammit(" ") 249 aspen.log_dammit( "(Sorry, we must've started looking for " 250 , "%s since you last updated Gittip!)" % these 251 ) 252 aspen.log_dammit(" ") 253 aspen.log_dammit("Running Gittip locally? Edit ./local.env.") 254 aspen.log_dammit("Running the test suite? Edit ./tests/env.") 255 aspen.log_dammit(" ") 256 aspen.log_dammit("See ./default_local.env for hints.") 257 258 aspen.log_dammit("=" * 42) 259 raise SystemExit 260 [end of gittip/wireup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gittip/wireup.py b/gittip/wireup.py --- a/gittip/wireup.py +++ b/gittip/wireup.py @@ -208,6 +208,7 @@ website.js_src = envvar('GITTIP_JS_SRC') \ .replace('%version', website.version) website.cache_static = is_yesish(envvar('GITTIP_CACHE_STATIC')) + website.compress_assets = is_yesish(envvar('GITTIP_COMPRESS_ASSETS')) website.google_analytics_id = envvar('GOOGLE_ANALYTICS_ID') website.gauges_id = envvar('GAUGES_ID')
{"golden_diff": "diff --git a/gittip/wireup.py b/gittip/wireup.py\n--- a/gittip/wireup.py\n+++ b/gittip/wireup.py\n@@ -208,6 +208,7 @@\n website.js_src = envvar('GITTIP_JS_SRC') \\\n .replace('%version', website.version)\n website.cache_static = is_yesish(envvar('GITTIP_CACHE_STATIC'))\n+ website.compress_assets = is_yesish(envvar('GITTIP_COMPRESS_ASSETS'))\n \n website.google_analytics_id = envvar('GOOGLE_ANALYTICS_ID')\n website.gauges_id = envvar('GAUGES_ID')\n", "issue": "Integrate SASS compilation into development environment\nThis started in [IRC](https://botbot.me/freenode/gittip/msg/8682409/).\n\nThe goal here is to use the same SASS compiler that we use in production, in development. We currently use the standard Ruby version, but that won't quite work for development since we shouldn't require all of Ruby just to run a single tool. So it seems the best course of action is to move to a Python-based SASS compiler.\n\n", "before_files": [{"content": "\"\"\"Wireup\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport os\nimport sys\nimport threading\nimport time\n\nimport aspen\nimport balanced\nimport gittip\nimport raven\nimport psycopg2\nimport stripe\nimport gittip.utils.mixpanel\nfrom gittip.models.community import Community\nfrom gittip.models.participant import Participant\nfrom postgres import Postgres\n\n\ndef canonical():\n gittip.canonical_scheme = os.environ['CANONICAL_SCHEME']\n gittip.canonical_host = os.environ['CANONICAL_HOST']\n\n\n# wireup.db() should only ever be called once by the application\ndef db():\n dburl = os.environ['DATABASE_URL']\n maxconn = int(os.environ['DATABASE_MAXCONN'])\n db = Postgres(dburl, maxconn=maxconn)\n\n # register hstore type\n with db.get_cursor() as cursor:\n psycopg2.extras.register_hstore(cursor, globally=True, unicode=True)\n\n db.register_model(Community)\n db.register_model(Participant)\n\n return db\n\n\ndef billing():\n stripe.api_key= os.environ['STRIPE_SECRET_API_KEY']\n stripe.publishable_api_key= os.environ['STRIPE_PUBLISHABLE_API_KEY']\n balanced.configure(os.environ['BALANCED_API_SECRET'])\n\n\ndef username_restrictions(website):\n gittip.RESTRICTED_USERNAMES = os.listdir(website.www_root)\n\n\ndef request_metrics(website):\n def add_start_timestamp(request):\n request.x_start = time.time()\n def log_request_count_and_response_time(response):\n print(\"count#requests=1\")\n response_time = time.time() - response.request.x_start\n print(\"measure#response_time={}ms\".format(response_time * 1000))\n website.hooks.inbound_early.insert(0, add_start_timestamp)\n website.hooks.outbound += [log_request_count_and_response_time]\n\n\ndef sentry(website):\n if not website.sentry_dsn:\n aspen.log_dammit(\"Won't log to Sentry (SENTRY_DSN is empty).\")\n return\n\n sentry = raven.Client(website.sentry_dsn)\n\n def tell_sentry(request):\n cls, response = sys.exc_info()[:2]\n\n\n # Decide if we care.\n # ==================\n\n if cls is aspen.Response:\n\n if response.code < 500:\n\n # Only log server errors to Sentry. For responses < 500 we use\n # stream-/line-based access logging. See discussion on:\n\n # https://github.com/gittip/www.gittip.com/pull/1560.\n\n return\n\n\n # Find a user.\n # ============\n # | is disallowed in usernames, so we can use it here to indicate\n # situations in which we can't get a username.\n\n request_context = getattr(request, 'context', None)\n user = {}\n user_id = 'n/a'\n if request_context is None:\n username = '| no context'\n else:\n user = request.context.get('user', None)\n if user is None:\n username = '| no user'\n else:\n is_anon = getattr(user, 'ANON', None)\n if is_anon is None:\n username = '| no ANON'\n elif is_anon:\n username = '| anonymous'\n else:\n participant = getattr(user, 'participant', None)\n if participant is None:\n username = '| no participant'\n else:\n username = getattr(user.participant, 'username', None)\n if username is None:\n username = '| no username'\n else:\n user_id = user.participant.id\n username = username.encode('utf8')\n user = { 'id': user_id\n , 'is_admin': user.participant.is_admin\n , 'is_suspicious': user.participant.is_suspicious\n , 'claimed_time': user.participant.claimed_time.isoformat()\n , 'url': 'https://www.gittip.com/{}/'.format(username)\n }\n\n\n # Fire off a Sentry call.\n # =======================\n\n tags = { 'username': username\n , 'user_id': user_id\n }\n extra = { 'filepath': getattr(request, 'fs', None)\n , 'request': str(request).splitlines()\n , 'user': user\n }\n result = sentry.captureException(tags=tags, extra=extra)\n\n\n # Emit a reference string to stdout.\n # ==================================\n\n ident = sentry.get_ident(result)\n aspen.log_dammit('Exception reference: ' + ident)\n\n\n website.hooks.error_early += [tell_sentry]\n return tell_sentry\n\n\ndef mixpanel(website):\n website.mixpanel_token = os.environ['MIXPANEL_TOKEN']\n gittip.utils.mixpanel.MIXPANEL_TOKEN = os.environ['MIXPANEL_TOKEN']\n\ndef nanswers():\n from gittip.models import participant\n participant.NANSWERS_THRESHOLD = int(os.environ['NANSWERS_THRESHOLD'])\n\ndef nmembers(website):\n from gittip.models import community\n community.NMEMBERS_THRESHOLD = int(os.environ['NMEMBERS_THRESHOLD'])\n website.NMEMBERS_THRESHOLD = community.NMEMBERS_THRESHOLD\n\ndef envvars(website):\n\n missing_keys = []\n malformed_values = []\n\n def envvar(key, cast=None):\n if key not in os.environ:\n missing_keys.append(key)\n return \"\"\n value = os.environ[key].decode('ASCII')\n if cast is not None:\n try:\n value = cast(value)\n except:\n err = str(sys.exc_info()[1])\n malformed_values.append((key, err))\n return \"\"\n return value\n\n def is_yesish(val):\n return val.lower() in ('1', 'true', 'yes')\n\n website.bitbucket_consumer_key = envvar('BITBUCKET_CONSUMER_KEY')\n website.bitbucket_consumer_secret = envvar('BITBUCKET_CONSUMER_SECRET')\n website.bitbucket_callback = envvar('BITBUCKET_CALLBACK')\n\n website.github_client_id = envvar('GITHUB_CLIENT_ID')\n website.github_client_secret = envvar('GITHUB_CLIENT_SECRET')\n website.github_callback = envvar('GITHUB_CALLBACK')\n\n website.twitter_consumer_key = envvar('TWITTER_CONSUMER_KEY')\n website.twitter_consumer_secret = envvar('TWITTER_CONSUMER_SECRET')\n website.twitter_access_token = envvar('TWITTER_ACCESS_TOKEN')\n website.twitter_access_token_secret = envvar('TWITTER_ACCESS_TOKEN_SECRET')\n website.twitter_callback = envvar('TWITTER_CALLBACK')\n\n website.bountysource_www_host = envvar('BOUNTYSOURCE_WWW_HOST')\n website.bountysource_api_host = envvar('BOUNTYSOURCE_API_HOST')\n website.bountysource_api_secret = envvar('BOUNTYSOURCE_API_SECRET')\n website.bountysource_callback = envvar('BOUNTYSOURCE_CALLBACK')\n\n website.css_href = envvar('GITTIP_CSS_HREF') \\\n .replace('%version', website.version)\n website.js_src = envvar('GITTIP_JS_SRC') \\\n .replace('%version', website.version)\n website.cache_static = is_yesish(envvar('GITTIP_CACHE_STATIC'))\n\n website.google_analytics_id = envvar('GOOGLE_ANALYTICS_ID')\n website.gauges_id = envvar('GAUGES_ID')\n website.sentry_dsn = envvar('SENTRY_DSN')\n\n website.min_threads = envvar('MIN_THREADS', int)\n website.log_busy_threads_every = envvar('LOG_BUSY_THREADS_EVERY', int)\n website.log_metrics = is_yesish(envvar('LOG_METRICS'))\n\n if malformed_values:\n malformed_values.sort()\n these = len(malformed_values) != 1 and 'these' or 'this'\n plural = len(malformed_values) != 1 and 's' or ''\n aspen.log_dammit(\"=\" * 42)\n aspen.log_dammit( \"Oh no! Gittip.com couldn't understand %s \" % these\n , \"environment variable%s:\" % plural\n )\n aspen.log_dammit(\" \")\n for key, err in malformed_values:\n aspen.log_dammit(\" {} ({})\".format(key, err))\n aspen.log_dammit(\" \")\n aspen.log_dammit(\"See ./default_local.env for hints.\")\n\n aspen.log_dammit(\"=\" * 42)\n raise SystemExit\n\n if missing_keys:\n missing_keys.sort()\n these = len(missing_keys) != 1 and 'these' or 'this'\n plural = len(missing_keys) != 1 and 's' or ''\n aspen.log_dammit(\"=\" * 42)\n aspen.log_dammit( \"Oh no! Gittip.com needs %s missing \" % these\n , \"environment variable%s:\" % plural\n )\n aspen.log_dammit(\" \")\n for key in missing_keys:\n aspen.log_dammit(\" \" + key)\n aspen.log_dammit(\" \")\n aspen.log_dammit( \"(Sorry, we must've started looking for \"\n , \"%s since you last updated Gittip!)\" % these\n )\n aspen.log_dammit(\" \")\n aspen.log_dammit(\"Running Gittip locally? Edit ./local.env.\")\n aspen.log_dammit(\"Running the test suite? Edit ./tests/env.\")\n aspen.log_dammit(\" \")\n aspen.log_dammit(\"See ./default_local.env for hints.\")\n\n aspen.log_dammit(\"=\" * 42)\n raise SystemExit\n", "path": "gittip/wireup.py"}]}
3,446
148
gh_patches_debug_13691
rasdani/github-patches
git_diff
pre-commit__pre-commit-2187
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> doesn't work in visual studio 2019 on Windows ### describe your issue When I try to commit from visual studio 2019, I get the following message: ``` /usr/bin/env: ‘bash’: No such file or directory ``` I have confirmed that it works from vscode and command prompt. I rewrote the `.git/hooks/pre-commit` to below and it worked. ``` #!/usr/bin/env sh ``` As far as I can see, visual studio git hooks only support `sh`. Are you aware of this problem? thank you. ### pre-commit --version pre-commit 2.16.0 ### .pre-commit-config.yaml ```yaml repos: - repo: https://github.com/pre-commit/mirrors-clang-format rev: v13.0.0 hooks: - id: clang-format ``` ### ~/.cache/pre-commit/pre-commit.log (if present) _No response_ </issue> <code> [start of pre_commit/commands/install_uninstall.py] 1 import logging 2 import os.path 3 import shlex 4 import shutil 5 import sys 6 from typing import Optional 7 from typing import Sequence 8 from typing import Tuple 9 10 from pre_commit import git 11 from pre_commit import output 12 from pre_commit.clientlib import load_config 13 from pre_commit.repository import all_hooks 14 from pre_commit.repository import install_hook_envs 15 from pre_commit.store import Store 16 from pre_commit.util import make_executable 17 from pre_commit.util import resource_text 18 19 20 logger = logging.getLogger(__name__) 21 22 # This is used to identify the hook file we install 23 PRIOR_HASHES = ( 24 b'4d9958c90bc262f47553e2c073f14cfe', 25 b'd8ee923c46731b42cd95cc869add4062', 26 b'49fd668cb42069aa1b6048464be5d395', 27 b'79f09a650522a87b0da915d0d983b2de', 28 b'e358c9dae00eac5d06b38dfdb1e33a8c', 29 ) 30 CURRENT_HASH = b'138fd403232d2ddd5efb44317e38bf03' 31 TEMPLATE_START = '# start templated\n' 32 TEMPLATE_END = '# end templated\n' 33 34 35 def _hook_paths( 36 hook_type: str, 37 git_dir: Optional[str] = None, 38 ) -> Tuple[str, str]: 39 git_dir = git_dir if git_dir is not None else git.get_git_dir() 40 pth = os.path.join(git_dir, 'hooks', hook_type) 41 return pth, f'{pth}.legacy' 42 43 44 def is_our_script(filename: str) -> bool: 45 if not os.path.exists(filename): # pragma: win32 no cover (symlink) 46 return False 47 with open(filename, 'rb') as f: 48 contents = f.read() 49 return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES) 50 51 52 def _install_hook_script( 53 config_file: str, 54 hook_type: str, 55 overwrite: bool = False, 56 skip_on_missing_config: bool = False, 57 git_dir: Optional[str] = None, 58 ) -> None: 59 hook_path, legacy_path = _hook_paths(hook_type, git_dir=git_dir) 60 61 os.makedirs(os.path.dirname(hook_path), exist_ok=True) 62 63 # If we have an existing hook, move it to pre-commit.legacy 64 if os.path.lexists(hook_path) and not is_our_script(hook_path): 65 shutil.move(hook_path, legacy_path) 66 67 # If we specify overwrite, we simply delete the legacy file 68 if overwrite and os.path.exists(legacy_path): 69 os.remove(legacy_path) 70 elif os.path.exists(legacy_path): 71 output.write_line( 72 f'Running in migration mode with existing hooks at {legacy_path}\n' 73 f'Use -f to use only pre-commit.', 74 ) 75 76 args = ['hook-impl', f'--config={config_file}', f'--hook-type={hook_type}'] 77 if skip_on_missing_config: 78 args.append('--skip-on-missing-config') 79 80 with open(hook_path, 'w') as hook_file: 81 contents = resource_text('hook-tmpl') 82 before, rest = contents.split(TEMPLATE_START) 83 _, after = rest.split(TEMPLATE_END) 84 85 hook_file.write(before + TEMPLATE_START) 86 hook_file.write(f'INSTALL_PYTHON={shlex.quote(sys.executable)}\n') 87 # TODO: python3.8+: shlex.join 88 args_s = ' '.join(shlex.quote(part) for part in args) 89 hook_file.write(f'ARGS=({args_s})\n') 90 hook_file.write(TEMPLATE_END + after) 91 make_executable(hook_path) 92 93 output.write_line(f'pre-commit installed at {hook_path}') 94 95 96 def install( 97 config_file: str, 98 store: Store, 99 hook_types: Sequence[str], 100 overwrite: bool = False, 101 hooks: bool = False, 102 skip_on_missing_config: bool = False, 103 git_dir: Optional[str] = None, 104 ) -> int: 105 if git_dir is None and git.has_core_hookpaths_set(): 106 logger.error( 107 'Cowardly refusing to install hooks with `core.hooksPath` set.\n' 108 'hint: `git config --unset-all core.hooksPath`', 109 ) 110 return 1 111 112 for hook_type in hook_types: 113 _install_hook_script( 114 config_file, hook_type, 115 overwrite=overwrite, 116 skip_on_missing_config=skip_on_missing_config, 117 git_dir=git_dir, 118 ) 119 120 if hooks: 121 install_hooks(config_file, store) 122 123 return 0 124 125 126 def install_hooks(config_file: str, store: Store) -> int: 127 install_hook_envs(all_hooks(load_config(config_file), store), store) 128 return 0 129 130 131 def _uninstall_hook_script(hook_type: str) -> None: 132 hook_path, legacy_path = _hook_paths(hook_type) 133 134 # If our file doesn't exist or it isn't ours, gtfo. 135 if not os.path.exists(hook_path) or not is_our_script(hook_path): 136 return 137 138 os.remove(hook_path) 139 output.write_line(f'{hook_type} uninstalled') 140 141 if os.path.exists(legacy_path): 142 os.replace(legacy_path, hook_path) 143 output.write_line(f'Restored previous hooks to {hook_path}') 144 145 146 def uninstall(hook_types: Sequence[str]) -> int: 147 for hook_type in hook_types: 148 _uninstall_hook_script(hook_type) 149 return 0 150 [end of pre_commit/commands/install_uninstall.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py --- a/pre_commit/commands/install_uninstall.py +++ b/pre_commit/commands/install_uninstall.py @@ -82,6 +82,13 @@ before, rest = contents.split(TEMPLATE_START) _, after = rest.split(TEMPLATE_END) + # on windows always use `/bin/sh` since `bash` might not be on PATH + # though we use bash-specific features `sh` on windows is actually + # bash in "POSIXLY_CORRECT" mode which still supports the features we + # use: subshells / arrays + if sys.platform == 'win32': # pragma: win32 cover + hook_file.write('#!/bin/sh\n') + hook_file.write(before + TEMPLATE_START) hook_file.write(f'INSTALL_PYTHON={shlex.quote(sys.executable)}\n') # TODO: python3.8+: shlex.join
{"golden_diff": "diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py\n--- a/pre_commit/commands/install_uninstall.py\n+++ b/pre_commit/commands/install_uninstall.py\n@@ -82,6 +82,13 @@\n before, rest = contents.split(TEMPLATE_START)\n _, after = rest.split(TEMPLATE_END)\n \n+ # on windows always use `/bin/sh` since `bash` might not be on PATH\n+ # though we use bash-specific features `sh` on windows is actually\n+ # bash in \"POSIXLY_CORRECT\" mode which still supports the features we\n+ # use: subshells / arrays\n+ if sys.platform == 'win32': # pragma: win32 cover\n+ hook_file.write('#!/bin/sh\\n')\n+\n hook_file.write(before + TEMPLATE_START)\n hook_file.write(f'INSTALL_PYTHON={shlex.quote(sys.executable)}\\n')\n # TODO: python3.8+: shlex.join\n", "issue": "doesn't work in visual studio 2019 on Windows\n### describe your issue\n\nWhen I try to commit from visual studio 2019, I get the following message:\r\n```\r\n/usr/bin/env: \u2018bash\u2019: No such file or directory\r\n```\r\n\r\nI have confirmed that it works from vscode and command prompt.\r\nI rewrote the `.git/hooks/pre-commit` to below and it worked.\r\n```\r\n#!/usr/bin/env sh\r\n```\r\n\r\nAs far as I can see, visual studio git hooks only support `sh`.\r\nAre you aware of this problem?\r\n\r\nthank you.\r\n\n\n### pre-commit --version\n\npre-commit 2.16.0\n\n### .pre-commit-config.yaml\n\n```yaml\nrepos:\r\n - repo: https://github.com/pre-commit/mirrors-clang-format\r\n rev: v13.0.0\r\n hooks:\r\n - id: clang-format\n```\n\n\n### ~/.cache/pre-commit/pre-commit.log (if present)\n\n_No response_\n", "before_files": [{"content": "import logging\nimport os.path\nimport shlex\nimport shutil\nimport sys\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.repository import all_hooks\nfrom pre_commit.repository import install_hook_envs\nfrom pre_commit.store import Store\nfrom pre_commit.util import make_executable\nfrom pre_commit.util import resource_text\n\n\nlogger = logging.getLogger(__name__)\n\n# This is used to identify the hook file we install\nPRIOR_HASHES = (\n b'4d9958c90bc262f47553e2c073f14cfe',\n b'd8ee923c46731b42cd95cc869add4062',\n b'49fd668cb42069aa1b6048464be5d395',\n b'79f09a650522a87b0da915d0d983b2de',\n b'e358c9dae00eac5d06b38dfdb1e33a8c',\n)\nCURRENT_HASH = b'138fd403232d2ddd5efb44317e38bf03'\nTEMPLATE_START = '# start templated\\n'\nTEMPLATE_END = '# end templated\\n'\n\n\ndef _hook_paths(\n hook_type: str,\n git_dir: Optional[str] = None,\n) -> Tuple[str, str]:\n git_dir = git_dir if git_dir is not None else git.get_git_dir()\n pth = os.path.join(git_dir, 'hooks', hook_type)\n return pth, f'{pth}.legacy'\n\n\ndef is_our_script(filename: str) -> bool:\n if not os.path.exists(filename): # pragma: win32 no cover (symlink)\n return False\n with open(filename, 'rb') as f:\n contents = f.read()\n return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)\n\n\ndef _install_hook_script(\n config_file: str,\n hook_type: str,\n overwrite: bool = False,\n skip_on_missing_config: bool = False,\n git_dir: Optional[str] = None,\n) -> None:\n hook_path, legacy_path = _hook_paths(hook_type, git_dir=git_dir)\n\n os.makedirs(os.path.dirname(hook_path), exist_ok=True)\n\n # If we have an existing hook, move it to pre-commit.legacy\n if os.path.lexists(hook_path) and not is_our_script(hook_path):\n shutil.move(hook_path, legacy_path)\n\n # If we specify overwrite, we simply delete the legacy file\n if overwrite and os.path.exists(legacy_path):\n os.remove(legacy_path)\n elif os.path.exists(legacy_path):\n output.write_line(\n f'Running in migration mode with existing hooks at {legacy_path}\\n'\n f'Use -f to use only pre-commit.',\n )\n\n args = ['hook-impl', f'--config={config_file}', f'--hook-type={hook_type}']\n if skip_on_missing_config:\n args.append('--skip-on-missing-config')\n\n with open(hook_path, 'w') as hook_file:\n contents = resource_text('hook-tmpl')\n before, rest = contents.split(TEMPLATE_START)\n _, after = rest.split(TEMPLATE_END)\n\n hook_file.write(before + TEMPLATE_START)\n hook_file.write(f'INSTALL_PYTHON={shlex.quote(sys.executable)}\\n')\n # TODO: python3.8+: shlex.join\n args_s = ' '.join(shlex.quote(part) for part in args)\n hook_file.write(f'ARGS=({args_s})\\n')\n hook_file.write(TEMPLATE_END + after)\n make_executable(hook_path)\n\n output.write_line(f'pre-commit installed at {hook_path}')\n\n\ndef install(\n config_file: str,\n store: Store,\n hook_types: Sequence[str],\n overwrite: bool = False,\n hooks: bool = False,\n skip_on_missing_config: bool = False,\n git_dir: Optional[str] = None,\n) -> int:\n if git_dir is None and git.has_core_hookpaths_set():\n logger.error(\n 'Cowardly refusing to install hooks with `core.hooksPath` set.\\n'\n 'hint: `git config --unset-all core.hooksPath`',\n )\n return 1\n\n for hook_type in hook_types:\n _install_hook_script(\n config_file, hook_type,\n overwrite=overwrite,\n skip_on_missing_config=skip_on_missing_config,\n git_dir=git_dir,\n )\n\n if hooks:\n install_hooks(config_file, store)\n\n return 0\n\n\ndef install_hooks(config_file: str, store: Store) -> int:\n install_hook_envs(all_hooks(load_config(config_file), store), store)\n return 0\n\n\ndef _uninstall_hook_script(hook_type: str) -> None:\n hook_path, legacy_path = _hook_paths(hook_type)\n\n # If our file doesn't exist or it isn't ours, gtfo.\n if not os.path.exists(hook_path) or not is_our_script(hook_path):\n return\n\n os.remove(hook_path)\n output.write_line(f'{hook_type} uninstalled')\n\n if os.path.exists(legacy_path):\n os.replace(legacy_path, hook_path)\n output.write_line(f'Restored previous hooks to {hook_path}')\n\n\ndef uninstall(hook_types: Sequence[str]) -> int:\n for hook_type in hook_types:\n _uninstall_hook_script(hook_type)\n return 0\n", "path": "pre_commit/commands/install_uninstall.py"}]}
2,376
222
gh_patches_debug_23069
rasdani/github-patches
git_diff
bokeh__bokeh-6911
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bokeh DateRangeSlider returns tuple of integers instead of dates In 0.12.7 the DateRangeSlider `.value` returns a tuple of integers. Additionally, in order to convert them back to dates you have to divide by 1000 (i.e. `datetime.fromtimestamp(the_tuple[0]/1000)`). Expected behavior: Return a tuple of Date objects. ``` from datetime import datetime from bokeh.models.widgets import DateRangeSlider from bokeh.io import curdoc def date_range_update(attrname, old, new): print('-- range values:', date_slider.value) # Works d1 = datetime.fromtimestamp(date_slider.value[0] / 1000) # Does not Work, gives error d2 = datetime.fromtimestamp(date_slider.value[0]) date_slider = DateRangeSlider(value=(date_start,date_end), start=date_start, end=date_end) date_slider.on_change('value', date_range_update) curdoc().add_root(date_slider) ``` #### Stack traceback and/or browser JavaScript console output #### Screenshots or screencasts of the bug in action </issue> <code> [start of bokeh/models/widgets/sliders.py] 1 """ Various kinds of slider widgets. 2 3 """ 4 from __future__ import absolute_import 5 6 from ...core.has_props import abstract 7 from ...core.properties import Bool, Int, Float, String, Date, Enum, Tuple, Instance, Color, Override 8 from ...core.enums import SliderCallbackPolicy 9 from ..callbacks import Callback 10 from .widget import Widget 11 12 @abstract 13 class AbstractSlider(Widget): 14 """ """ 15 16 title = String(default="", help=""" 17 Slider's label. 18 """) 19 20 show_value = Bool(default=True, help=""" 21 Whether or not show slider's value. 22 """) 23 24 format = String(help=""" 25 """) 26 27 orientation = Enum("horizontal", "vertical", help=""" 28 Orient the slider either horizontally (default) or vertically. 29 """) 30 31 direction = Enum("ltr", "rtl", help=""" 32 """) 33 34 tooltips = Bool(default=True, help=""" 35 """) 36 37 callback = Instance(Callback, help=""" 38 A callback to run in the browser whenever the current Slider value changes. 39 """) 40 41 callback_throttle = Float(default=200, help=""" 42 Number of millseconds to pause between callback calls as the slider is moved. 43 """) 44 45 callback_policy = Enum(SliderCallbackPolicy, default="throttle", help=""" 46 When the callback is initiated. This parameter can take on only one of three options: 47 48 * "continuous": the callback will be executed immediately for each movement of the slider 49 * "throttle": the callback will be executed at most every ``callback_throttle`` milliseconds. 50 * "mouseup": the callback will be executed only once when the slider is released. 51 52 The "mouseup" policy is intended for scenarios in which the callback is expensive in time. 53 """) 54 55 bar_color = Color(default="#e6e6e6", help=""" 56 """) 57 58 class Slider(AbstractSlider): 59 """ Slider-based number selection widget. """ 60 61 start = Float(help=""" 62 The minimum allowable value. 63 """) 64 65 end = Float(help=""" 66 The maximum allowable value. 67 """) 68 69 value = Float(help=""" 70 Initial or selected value. 71 """) 72 73 step = Float(default=1, help=""" 74 The step between consecutive values. 75 """) 76 77 format = Override(default="0[.]00") 78 79 class RangeSlider(AbstractSlider): 80 """ Range-slider based number range selection widget. """ 81 82 value = Tuple(Float, Float, help=""" 83 Initial or selected range. 84 """) 85 86 start = Float(help=""" 87 The minimum allowable value. 88 """) 89 90 end = Float(help=""" 91 The maximum allowable value. 92 """) 93 94 step = Float(default=1, help=""" 95 The step between consecutive values. 96 """) 97 98 format = Override(default="0[.]00") 99 100 class DateSlider(AbstractSlider): 101 """ Slider-based date selection widget. """ 102 103 value = Date(help=""" 104 Initial or selected value. 105 """) 106 107 start = Date(help=""" 108 The minimum allowable value. 109 """) 110 111 end = Date(help=""" 112 The maximum allowable value. 113 """) 114 115 step = Int(default=1, help=""" 116 The step between consecutive values. 117 """) 118 119 format = Override(default="%d %b %G") 120 121 class DateRangeSlider(AbstractSlider): 122 """ Slider-based date range selection widget. """ 123 124 value = Tuple(Date, Date, help=""" 125 Initial or selected range. 126 """) 127 128 start = Date(help=""" 129 The minimum allowable value. 130 """) 131 132 end = Date(help=""" 133 The maximum allowable value. 134 """) 135 136 step = Int(default=1, help=""" 137 The step between consecutive values. 138 """) 139 140 format = Override(default="%d %b %G") 141 [end of bokeh/models/widgets/sliders.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bokeh/models/widgets/sliders.py b/bokeh/models/widgets/sliders.py --- a/bokeh/models/widgets/sliders.py +++ b/bokeh/models/widgets/sliders.py @@ -3,6 +3,9 @@ """ from __future__ import absolute_import +from datetime import datetime +import numbers + from ...core.has_props import abstract from ...core.properties import Bool, Int, Float, String, Date, Enum, Tuple, Instance, Color, Override from ...core.enums import SliderCallbackPolicy @@ -121,6 +124,25 @@ class DateRangeSlider(AbstractSlider): """ Slider-based date range selection widget. """ + @property + def value_as_datetime(self): + ''' Convenience property to retrieve the value tuple as a tuple of + datetime objects. + + ''' + if self.value is None: + return None + v1, v2 = self.value + if isinstance(v1, numbers.Number): + d1 = datetime.utcfromtimestamp(v1 / 1000) + else: + d1 = v1 + if isinstance(v2, numbers.Number): + d2 = datetime.utcfromtimestamp(v2 / 1000) + else: + d2 = v2 + return d1, d2 + value = Tuple(Date, Date, help=""" Initial or selected range. """)
{"golden_diff": "diff --git a/bokeh/models/widgets/sliders.py b/bokeh/models/widgets/sliders.py\n--- a/bokeh/models/widgets/sliders.py\n+++ b/bokeh/models/widgets/sliders.py\n@@ -3,6 +3,9 @@\n \"\"\"\n from __future__ import absolute_import\n \n+from datetime import datetime\n+import numbers\n+\n from ...core.has_props import abstract\n from ...core.properties import Bool, Int, Float, String, Date, Enum, Tuple, Instance, Color, Override\n from ...core.enums import SliderCallbackPolicy\n@@ -121,6 +124,25 @@\n class DateRangeSlider(AbstractSlider):\n \"\"\" Slider-based date range selection widget. \"\"\"\n \n+ @property\n+ def value_as_datetime(self):\n+ ''' Convenience property to retrieve the value tuple as a tuple of\n+ datetime objects.\n+\n+ '''\n+ if self.value is None:\n+ return None\n+ v1, v2 = self.value\n+ if isinstance(v1, numbers.Number):\n+ d1 = datetime.utcfromtimestamp(v1 / 1000)\n+ else:\n+ d1 = v1\n+ if isinstance(v2, numbers.Number):\n+ d2 = datetime.utcfromtimestamp(v2 / 1000)\n+ else:\n+ d2 = v2\n+ return d1, d2\n+\n value = Tuple(Date, Date, help=\"\"\"\n Initial or selected range.\n \"\"\")\n", "issue": "Bokeh DateRangeSlider returns tuple of integers instead of dates\nIn 0.12.7 the DateRangeSlider `.value` returns a tuple of integers. Additionally, in order to convert them back to dates you have to divide by 1000 (i.e. `datetime.fromtimestamp(the_tuple[0]/1000)`).\r\n\r\nExpected behavior: Return a tuple of Date objects.\r\n\r\n```\r\nfrom datetime import datetime\r\nfrom bokeh.models.widgets import DateRangeSlider\r\nfrom bokeh.io import curdoc\r\n\r\ndef date_range_update(attrname, old, new):\r\n print('-- range values:', date_slider.value)\r\n # Works\r\n d1 = datetime.fromtimestamp(date_slider.value[0] / 1000) \r\n # Does not Work, gives error\r\n d2 = datetime.fromtimestamp(date_slider.value[0])\r\n\r\ndate_slider = DateRangeSlider(value=(date_start,date_end), start=date_start, end=date_end)\r\ndate_slider.on_change('value', date_range_update)\r\n\r\ncurdoc().add_root(date_slider)\r\n\r\n```\r\n\r\n#### Stack traceback and/or browser JavaScript console output\r\n\r\n#### Screenshots or screencasts of the bug in action\r\n\n", "before_files": [{"content": "\"\"\" Various kinds of slider widgets.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom ...core.has_props import abstract\nfrom ...core.properties import Bool, Int, Float, String, Date, Enum, Tuple, Instance, Color, Override\nfrom ...core.enums import SliderCallbackPolicy\nfrom ..callbacks import Callback\nfrom .widget import Widget\n\n@abstract\nclass AbstractSlider(Widget):\n \"\"\" \"\"\"\n\n title = String(default=\"\", help=\"\"\"\n Slider's label.\n \"\"\")\n\n show_value = Bool(default=True, help=\"\"\"\n Whether or not show slider's value.\n \"\"\")\n\n format = String(help=\"\"\"\n \"\"\")\n\n orientation = Enum(\"horizontal\", \"vertical\", help=\"\"\"\n Orient the slider either horizontally (default) or vertically.\n \"\"\")\n\n direction = Enum(\"ltr\", \"rtl\", help=\"\"\"\n \"\"\")\n\n tooltips = Bool(default=True, help=\"\"\"\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the current Slider value changes.\n \"\"\")\n\n callback_throttle = Float(default=200, help=\"\"\"\n Number of millseconds to pause between callback calls as the slider is moved.\n \"\"\")\n\n callback_policy = Enum(SliderCallbackPolicy, default=\"throttle\", help=\"\"\"\n When the callback is initiated. This parameter can take on only one of three options:\n\n * \"continuous\": the callback will be executed immediately for each movement of the slider\n * \"throttle\": the callback will be executed at most every ``callback_throttle`` milliseconds.\n * \"mouseup\": the callback will be executed only once when the slider is released.\n\n The \"mouseup\" policy is intended for scenarios in which the callback is expensive in time.\n \"\"\")\n\n bar_color = Color(default=\"#e6e6e6\", help=\"\"\"\n \"\"\")\n\nclass Slider(AbstractSlider):\n \"\"\" Slider-based number selection widget. \"\"\"\n\n start = Float(help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Float(help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n value = Float(help=\"\"\"\n Initial or selected value.\n \"\"\")\n\n step = Float(default=1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n format = Override(default=\"0[.]00\")\n\nclass RangeSlider(AbstractSlider):\n \"\"\" Range-slider based number range selection widget. \"\"\"\n\n value = Tuple(Float, Float, help=\"\"\"\n Initial or selected range.\n \"\"\")\n\n start = Float(help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Float(help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n step = Float(default=1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n format = Override(default=\"0[.]00\")\n\nclass DateSlider(AbstractSlider):\n \"\"\" Slider-based date selection widget. \"\"\"\n\n value = Date(help=\"\"\"\n Initial or selected value.\n \"\"\")\n\n start = Date(help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Date(help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n step = Int(default=1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n format = Override(default=\"%d %b %G\")\n\nclass DateRangeSlider(AbstractSlider):\n \"\"\" Slider-based date range selection widget. \"\"\"\n\n value = Tuple(Date, Date, help=\"\"\"\n Initial or selected range.\n \"\"\")\n\n start = Date(help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Date(help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n step = Int(default=1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n format = Override(default=\"%d %b %G\")\n", "path": "bokeh/models/widgets/sliders.py"}]}
1,917
320
gh_patches_debug_10102
rasdani/github-patches
git_diff
fossasia__open-event-server-4882
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Session allowed to be created without an associated Track **Describe the bug** <!-- A clear and concise description of what the bug is. --> Sessions can be created without being associated with a Track **Expected behavior** <!-- A clear and concise description of what you expected to happen. --> It shouldn't be possible </issue> <code> [start of app/api/sessions.py] 1 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship 2 3 from app.api.bootstrap import api 4 from app.api.events import Event 5 from app.api.helpers.db import safe_query, get_count 6 from app.api.helpers.exceptions import ForbiddenException 7 from app.api.helpers.mail import send_email_new_session, send_email_session_accept_reject 8 from app.api.helpers.notification import send_notif_new_session_organizer, send_notif_session_accept_reject 9 from app.api.helpers.permissions import current_identity 10 from app.api.helpers.query import event_query 11 from app.api.helpers.utilities import require_relationship 12 from app.api.schema.sessions import SessionSchema 13 from app.models import db 14 from app.models.microlocation import Microlocation 15 from app.models.session import Session 16 from app.models.session_type import SessionType 17 from app.models.speaker import Speaker 18 from app.models.track import Track 19 from app.models.user import User 20 from app.settings import get_settings 21 22 23 class SessionListPost(ResourceList): 24 """ 25 List Sessions 26 """ 27 def before_post(self, args, kwargs, data): 28 """ 29 before post method to check for required relationship and proper permission 30 :param args: 31 :param kwargs: 32 :param data: 33 :return: 34 """ 35 require_relationship(['event'], data) 36 data['creator_id'] = current_identity.id 37 if get_count(db.session.query(Event).filter_by(id=int(data['event']), is_sessions_speakers_enabled=False)) > 0: 38 raise ForbiddenException({'pointer': ''}, "Sessions are disabled for this Event") 39 40 def after_create_object(self, session, data, view_kwargs): 41 """ 42 method to send email for creation of new session 43 mails session link to the concerned user 44 :param session: 45 :param data: 46 :param view_kwargs: 47 :return: 48 """ 49 if session.event.get_organizer(): 50 event_name = session.event.name 51 organizer = session.event.get_organizer() 52 organizer_email = organizer.email 53 frontend_url = get_settings()['frontend_url'] 54 link = "{}/events/{}/sessions/{}"\ 55 .format(frontend_url, session.event_id, session.id) 56 send_email_new_session(organizer_email, event_name, link) 57 send_notif_new_session_organizer(organizer, event_name, link) 58 59 decorators = (api.has_permission('create_event'),) 60 schema = SessionSchema 61 data_layer = {'session': db.session, 62 'model': Session, 63 'methods': {'after_create_object': after_create_object 64 }} 65 66 67 class SessionList(ResourceList): 68 """ 69 List Sessions 70 """ 71 72 def query(self, view_kwargs): 73 """ 74 query method for SessionList class 75 :param view_kwargs: 76 :return: 77 """ 78 query_ = self.session.query(Session) 79 if view_kwargs.get('track_id') is not None: 80 track = safe_query(self, Track, 'id', view_kwargs['track_id'], 'track_id') 81 query_ = query_.join(Track).filter(Track.id == track.id) 82 if view_kwargs.get('session_type_id') is not None: 83 session_type = safe_query(self, SessionType, 'id', view_kwargs['session_type_id'], 'session_type_id') 84 query_ = query_.join(SessionType).filter(SessionType.id == session_type.id) 85 if view_kwargs.get('microlocation_id') is not None: 86 microlocation = safe_query(self, Microlocation, 'id', view_kwargs['microlocation_id'], 'microlocation_id') 87 query_ = query_.join(Microlocation).filter(Microlocation.id == microlocation.id) 88 if view_kwargs.get('user_id') is not None: 89 user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id') 90 query_ = query_.join(User).filter(User.id == user.id) 91 query_ = event_query(self, query_, view_kwargs) 92 if view_kwargs.get('speaker_id'): 93 speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id') 94 # session-speaker :: many-to-many relationship 95 query_ = Session.query.filter(Session.speakers.any(id=speaker.id)) 96 97 return query_ 98 99 view_kwargs = True 100 methods = ['GET'] 101 schema = SessionSchema 102 data_layer = {'session': db.session, 103 'model': Session, 104 'methods': { 105 'query': query 106 }} 107 108 109 class SessionDetail(ResourceDetail): 110 """ 111 Session detail by id 112 """ 113 def before_get_object(self, view_kwargs): 114 """ 115 before get method to get the resource id for fetching details 116 :param view_kwargs: 117 :return: 118 """ 119 if view_kwargs.get('event_identifier'): 120 event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'identifier') 121 view_kwargs['event_id'] = event.id 122 123 def after_update_object(self, session, data, view_kwargs): 124 """ Send email if session accepted or rejected """ 125 126 if 'state' in data and data.get('send_email', None) and (session.state == 'accepted' or 127 session.state == 'rejected'): 128 # Email for speaker 129 speakers = session.speakers 130 for speaker in speakers: 131 frontend_url = get_settings()['frontend_url'] 132 link = "{}/events/{}/sessions/{}" \ 133 .format(frontend_url, session.event_id, session.id) 134 send_email_session_accept_reject(speaker.email, session, link) 135 send_notif_session_accept_reject(speaker, session.title, session.state, link) 136 137 # Email for organizer 138 if session.event.get_organizer(): 139 organizer = session.event.get_organizer() 140 organizer_email = organizer.email 141 frontend_url = get_settings()['frontend_url'] 142 link = "{}/events/{}/sessions/{}" \ 143 .format(frontend_url, session.event_id, session.id) 144 send_email_session_accept_reject(organizer_email, session, 145 link) 146 send_notif_session_accept_reject(organizer, session.title, 147 session.state, link) 148 149 decorators = (api.has_permission('is_speaker_for_session', methods="PATCH,DELETE"),) 150 schema = SessionSchema 151 data_layer = {'session': db.session, 152 'model': Session, 153 'methods': {'before_get_object': before_get_object, 154 'after_update_object': after_update_object}} 155 156 157 class SessionRelationshipRequired(ResourceRelationship): 158 """ 159 Session Relationship 160 """ 161 schema = SessionSchema 162 decorators = (api.has_permission('is_speaker_for_session', methods="PATCH,DELETE"),) 163 methods = ['GET', 'PATCH'] 164 data_layer = {'session': db.session, 165 'model': Session} 166 167 168 class SessionRelationshipOptional(ResourceRelationship): 169 """ 170 Session Relationship 171 """ 172 schema = SessionSchema 173 decorators = (api.has_permission('is_speaker_for_session', methods="PATCH,DELETE"),) 174 data_layer = {'session': db.session, 175 'model': Session} 176 [end of app/api/sessions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/api/sessions.py b/app/api/sessions.py --- a/app/api/sessions.py +++ b/app/api/sessions.py @@ -32,7 +32,7 @@ :param data: :return: """ - require_relationship(['event'], data) + require_relationship(['event', 'track'], data) data['creator_id'] = current_identity.id if get_count(db.session.query(Event).filter_by(id=int(data['event']), is_sessions_speakers_enabled=False)) > 0: raise ForbiddenException({'pointer': ''}, "Sessions are disabled for this Event")
{"golden_diff": "diff --git a/app/api/sessions.py b/app/api/sessions.py\n--- a/app/api/sessions.py\n+++ b/app/api/sessions.py\n@@ -32,7 +32,7 @@\n :param data:\n :return:\n \"\"\"\n- require_relationship(['event'], data)\n+ require_relationship(['event', 'track'], data)\n data['creator_id'] = current_identity.id\n if get_count(db.session.query(Event).filter_by(id=int(data['event']), is_sessions_speakers_enabled=False)) > 0:\n raise ForbiddenException({'pointer': ''}, \"Sessions are disabled for this Event\")\n", "issue": "Session allowed to be created without an associated Track\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. -->\r\nSessions can be created without being associated with a Track \r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nIt shouldn't be possible\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app.api.bootstrap import api\nfrom app.api.events import Event\nfrom app.api.helpers.db import safe_query, get_count\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.mail import send_email_new_session, send_email_session_accept_reject\nfrom app.api.helpers.notification import send_notif_new_session_organizer, send_notif_session_accept_reject\nfrom app.api.helpers.permissions import current_identity\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.sessions import SessionSchema\nfrom app.models import db\nfrom app.models.microlocation import Microlocation\nfrom app.models.session import Session\nfrom app.models.session_type import SessionType\nfrom app.models.speaker import Speaker\nfrom app.models.track import Track\nfrom app.models.user import User\nfrom app.settings import get_settings\n\n\nclass SessionListPost(ResourceList):\n \"\"\"\n List Sessions\n \"\"\"\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method to check for required relationship and proper permission\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n data['creator_id'] = current_identity.id\n if get_count(db.session.query(Event).filter_by(id=int(data['event']), is_sessions_speakers_enabled=False)) > 0:\n raise ForbiddenException({'pointer': ''}, \"Sessions are disabled for this Event\")\n\n def after_create_object(self, session, data, view_kwargs):\n \"\"\"\n method to send email for creation of new session\n mails session link to the concerned user\n :param session:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if session.event.get_organizer():\n event_name = session.event.name\n organizer = session.event.get_organizer()\n organizer_email = organizer.email\n frontend_url = get_settings()['frontend_url']\n link = \"{}/events/{}/sessions/{}\"\\\n .format(frontend_url, session.event_id, session.id)\n send_email_new_session(organizer_email, event_name, link)\n send_notif_new_session_organizer(organizer, event_name, link)\n\n decorators = (api.has_permission('create_event'),)\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n 'methods': {'after_create_object': after_create_object\n }}\n\n\nclass SessionList(ResourceList):\n \"\"\"\n List Sessions\n \"\"\"\n\n def query(self, view_kwargs):\n \"\"\"\n query method for SessionList class\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(Session)\n if view_kwargs.get('track_id') is not None:\n track = safe_query(self, Track, 'id', view_kwargs['track_id'], 'track_id')\n query_ = query_.join(Track).filter(Track.id == track.id)\n if view_kwargs.get('session_type_id') is not None:\n session_type = safe_query(self, SessionType, 'id', view_kwargs['session_type_id'], 'session_type_id')\n query_ = query_.join(SessionType).filter(SessionType.id == session_type.id)\n if view_kwargs.get('microlocation_id') is not None:\n microlocation = safe_query(self, Microlocation, 'id', view_kwargs['microlocation_id'], 'microlocation_id')\n query_ = query_.join(Microlocation).filter(Microlocation.id == microlocation.id)\n if view_kwargs.get('user_id') is not None:\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n query_ = query_.join(User).filter(User.id == user.id)\n query_ = event_query(self, query_, view_kwargs)\n if view_kwargs.get('speaker_id'):\n speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')\n # session-speaker :: many-to-many relationship\n query_ = Session.query.filter(Session.speakers.any(id=speaker.id))\n\n return query_\n\n view_kwargs = True\n methods = ['GET']\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n 'methods': {\n 'query': query\n }}\n\n\nclass SessionDetail(ResourceDetail):\n \"\"\"\n Session detail by id\n \"\"\"\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method to get the resource id for fetching details\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'identifier')\n view_kwargs['event_id'] = event.id\n\n def after_update_object(self, session, data, view_kwargs):\n \"\"\" Send email if session accepted or rejected \"\"\"\n\n if 'state' in data and data.get('send_email', None) and (session.state == 'accepted' or\n session.state == 'rejected'):\n # Email for speaker\n speakers = session.speakers\n for speaker in speakers:\n frontend_url = get_settings()['frontend_url']\n link = \"{}/events/{}/sessions/{}\" \\\n .format(frontend_url, session.event_id, session.id)\n send_email_session_accept_reject(speaker.email, session, link)\n send_notif_session_accept_reject(speaker, session.title, session.state, link)\n\n # Email for organizer\n if session.event.get_organizer():\n organizer = session.event.get_organizer()\n organizer_email = organizer.email\n frontend_url = get_settings()['frontend_url']\n link = \"{}/events/{}/sessions/{}\" \\\n .format(frontend_url, session.event_id, session.id)\n send_email_session_accept_reject(organizer_email, session,\n link)\n send_notif_session_accept_reject(organizer, session.title,\n session.state, link)\n\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n 'methods': {'before_get_object': before_get_object,\n 'after_update_object': after_update_object}}\n\n\nclass SessionRelationshipRequired(ResourceRelationship):\n \"\"\"\n Session Relationship\n \"\"\"\n schema = SessionSchema\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n methods = ['GET', 'PATCH']\n data_layer = {'session': db.session,\n 'model': Session}\n\n\nclass SessionRelationshipOptional(ResourceRelationship):\n \"\"\"\n Session Relationship\n \"\"\"\n schema = SessionSchema\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n data_layer = {'session': db.session,\n 'model': Session}\n", "path": "app/api/sessions.py"}]}
2,500
133
gh_patches_debug_13467
rasdani/github-patches
git_diff
python-telegram-bot__python-telegram-bot-155
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> httplib exceptions Running my bot continuously, I sometimes get rare exceptions. This traceback was with Python 2.7 and python-telegram-bot 3.2: ``` File "/home/rahiel/BismillahBot/bismillah.py", line 99, in send_quran bot.sendMessage(chat_id=chat_id, text=text) File "/home/rahiel/BismillahBot/venv/local/lib/python2.7/site-packages/telegram/bot.py", line 127, in decorator result = func(self, *args, **kwargs) File "/home/rahiel/BismillahBot/venv/local/lib/python2.7/site-packages/telegram/bot.py", line 159, in decorator result = request.post(url, data) File "/home/rahiel/BismillahBot/venv/local/lib/python2.7/site-packages/telegram/utils/request.py", line 67, in decorator return func(*args, **kwargs) File "/home/rahiel/BismillahBot/venv/local/lib/python2.7/site-packages/telegram/utils/request.py", line 140, in post result = urlopen(request, timeout=timeout).read() File "/usr/lib/python2.7/urllib2.py", line 127, in urlopen return _opener.open(url, data, timeout) File "/usr/lib/python2.7/urllib2.py", line 404, in open response = self._open(req, data) File "/usr/lib/python2.7/urllib2.py", line 422, in _open '_open', req) File "/usr/lib/python2.7/urllib2.py", line 382, in _call_chain result = func(*args) File "/usr/lib/python2.7/urllib2.py", line 1222, in https_open return self.do_open(httplib.HTTPSConnection, req) File "/usr/lib/python2.7/urllib2.py", line 1187, in do_open r = h.getresponse(buffering=True) File "/usr/lib/python2.7/httplib.py", line 1051, in getresponse response.begin() File "/usr/lib/python2.7/httplib.py", line 415, in begin version, status, reason = self._read_status() File "/usr/lib/python2.7/httplib.py", line 379, in _read_status raise BadStatusLine(line) httplib.BadStatusLine: '' ``` Using version 2.9 of the library I got this traceback in the past: ``` File "/home/rahiel/BismillahBot/bismillah.py", line 122, in upload v = bot.sendAudio(audio=f, **kwargs)["audio"]["file_id"] File "/home/rahiel/BismillahBot/venv/local/lib/python2.7/site-packages/telegram/bot.py", line 126, in decorator result = func(self, *args, **kwargs) File "/home/rahiel/BismillahBot/venv/local/lib/python2.7/site-packages/telegram/bot.py", line 158, in decorator result = request.post(url, data) File "/home/rahiel/BismillahBot/venv/local/lib/python2.7/site-packages/telegram/utils/request.py", line 108, in post message = _parse(error.read()) File "/usr/lib/python2.7/socket.py", line 351, in read data = self._sock.recv(rbufsize) File "/usr/lib/python2.7/httplib.py", line 549, in read return self._read_chunked(amt) File "/usr/lib/python2.7/httplib.py", line 603, in _read_chunked raise IncompleteRead(''.join(value)) httplib.IncompleteRead: IncompleteRead(0 bytes read) ``` Maybe we should catch these exceptions and reraise them as a TelegramError for http errors? I was puzzled by the second traceback, because I was getting it frequently, but that stopped since I updated the library. </issue> <code> [start of telegram/utils/request.py] 1 #!/usr/bin/env python 2 # pylint: disable=no-name-in-module,unused-import 3 # 4 # A library that provides a Python interface to the Telegram Bot API 5 # Copyright (C) 2015-2016 6 # Leandro Toledo de Souza <[email protected]> 7 # 8 # This program is free software: you can redistribute it and/or modify 9 # it under the terms of the GNU Lesser Public License as published by 10 # the Free Software Foundation, either version 3 of the License, or 11 # (at your option) any later version. 12 # 13 # This program is distributed in the hope that it will be useful, 14 # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 # GNU Lesser Public License for more details. 17 # 18 # You should have received a copy of the GNU Lesser Public License 19 # along with this program. If not, see [http://www.gnu.org/licenses/]. 20 21 """This module contains methods to make POST and GET requests""" 22 23 import functools 24 import json 25 import socket 26 from ssl import SSLError 27 28 try: 29 from urllib.request import urlopen, urlretrieve, Request 30 from urllib.error import HTTPError 31 except ImportError: 32 from urllib import urlretrieve 33 from urllib2 import urlopen, Request 34 from urllib2 import HTTPError 35 36 from telegram import (InputFile, TelegramError) 37 38 39 def _parse(json_data): 40 """Try and parse the JSON returned from Telegram and return an empty 41 dictionary if there is any error. 42 43 Args: 44 url: 45 urllib.urlopen object 46 47 Returns: 48 A JSON parsed as Python dict with results. 49 """ 50 decoded_s = json_data.decode('utf-8') 51 try: 52 data = json.loads(decoded_s) 53 except ValueError: 54 raise TelegramError('Invalid server response') 55 56 if not data.get('ok') and data.get('description'): 57 return data['description'] 58 59 return data['result'] 60 61 62 def _try_except_req(func): 63 """Decorator for requests to handle known exceptions""" 64 @functools.wraps(func) 65 def decorator(*args, **kwargs): 66 try: 67 return func(*args, **kwargs) 68 except HTTPError as error: 69 if error.getcode() == 403: 70 raise TelegramError('Unauthorized') 71 if error.getcode() == 502: 72 raise TelegramError('Bad Gateway') 73 74 try: 75 message = _parse(error.read()) 76 except ValueError: 77 message = 'Unknown HTTPError {0}'.format(error.getcode()) 78 79 raise TelegramError(message) 80 except (SSLError, socket.timeout) as error: 81 if "operation timed out" in str(error): 82 raise TelegramError("Timed out") 83 84 raise TelegramError(str(error)) 85 86 return decorator 87 88 89 @_try_except_req 90 def get(url): 91 """Request an URL. 92 Args: 93 url: 94 The web location we want to retrieve. 95 96 Returns: 97 A JSON object. 98 """ 99 result = urlopen(url).read() 100 101 return _parse(result) 102 103 104 @_try_except_req 105 def post(url, 106 data, 107 network_delay=2.): 108 """Request an URL. 109 Args: 110 url: 111 The web location we want to retrieve. 112 data: 113 A dict of (str, unicode) key/value pairs. 114 network_delay: 115 Additional timeout in seconds to allow the response from Telegram to 116 take some time. 117 118 Returns: 119 A JSON object. 120 """ 121 122 # Add time to the timeout of urlopen to allow data to be transferred over 123 # the network. 124 if 'timeout' in data: 125 timeout = data['timeout'] + network_delay 126 else: 127 timeout = None 128 129 if InputFile.is_inputfile(data): 130 data = InputFile(data) 131 request = Request(url, 132 data=data.to_form(), 133 headers=data.headers) 134 else: 135 data = json.dumps(data) 136 request = Request(url, 137 data=data.encode(), 138 headers={'Content-Type': 'application/json'}) 139 140 result = urlopen(request, timeout=timeout).read() 141 return _parse(result) 142 143 144 @_try_except_req 145 def download(url, 146 filename): 147 """Download a file by its URL. 148 Args: 149 url: 150 The web location we want to retrieve. 151 152 filename: 153 The filename wihtin the path to download the file. 154 """ 155 156 urlretrieve(url, filename) 157 [end of telegram/utils/request.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/telegram/utils/request.py b/telegram/utils/request.py --- a/telegram/utils/request.py +++ b/telegram/utils/request.py @@ -25,6 +25,13 @@ import socket from ssl import SSLError +try: + # python2 + from httplib import HTTPException +except ImportError: + # python3 + from http.client import HTTPException + try: from urllib.request import urlopen, urlretrieve, Request from urllib.error import HTTPError @@ -82,6 +89,8 @@ raise TelegramError("Timed out") raise TelegramError(str(error)) + except HTTPException as error: + raise TelegramError('HTTPException: {0!r}'.format(error)) return decorator
{"golden_diff": "diff --git a/telegram/utils/request.py b/telegram/utils/request.py\n--- a/telegram/utils/request.py\n+++ b/telegram/utils/request.py\n@@ -25,6 +25,13 @@\n import socket\n from ssl import SSLError\n \n+try:\n+ # python2\n+ from httplib import HTTPException\n+except ImportError:\n+ # python3\n+ from http.client import HTTPException\n+\n try:\n from urllib.request import urlopen, urlretrieve, Request\n from urllib.error import HTTPError\n@@ -82,6 +89,8 @@\n raise TelegramError(\"Timed out\")\n \n raise TelegramError(str(error))\n+ except HTTPException as error:\n+ raise TelegramError('HTTPException: {0!r}'.format(error))\n \n return decorator\n", "issue": "httplib exceptions\nRunning my bot continuously, I sometimes get rare exceptions. This traceback was with Python 2.7 and python-telegram-bot 3.2:\n\n```\n File \"/home/rahiel/BismillahBot/bismillah.py\", line 99, in send_quran \n bot.sendMessage(chat_id=chat_id, text=text)\n File \"/home/rahiel/BismillahBot/venv/local/lib/python2.7/site-packages/telegram/bot.py\", line 127, in decorator \n result = func(self, *args, **kwargs) \n File \"/home/rahiel/BismillahBot/venv/local/lib/python2.7/site-packages/telegram/bot.py\", line 159, in decorator \n result = request.post(url, data) \n File \"/home/rahiel/BismillahBot/venv/local/lib/python2.7/site-packages/telegram/utils/request.py\", line 67, in decorator \n return func(*args, **kwargs) \n File \"/home/rahiel/BismillahBot/venv/local/lib/python2.7/site-packages/telegram/utils/request.py\", line 140, in post \n result = urlopen(request, timeout=timeout).read() \n File \"/usr/lib/python2.7/urllib2.py\", line 127, in urlopen \n return _opener.open(url, data, timeout) \n File \"/usr/lib/python2.7/urllib2.py\", line 404, in open \n response = self._open(req, data) \n File \"/usr/lib/python2.7/urllib2.py\", line 422, in _open \n '_open', req) \n File \"/usr/lib/python2.7/urllib2.py\", line 382, in _call_chain \n result = func(*args) \n File \"/usr/lib/python2.7/urllib2.py\", line 1222, in https_open \n return self.do_open(httplib.HTTPSConnection, req) \n File \"/usr/lib/python2.7/urllib2.py\", line 1187, in do_open \n r = h.getresponse(buffering=True) \n File \"/usr/lib/python2.7/httplib.py\", line 1051, in getresponse \n response.begin() \n File \"/usr/lib/python2.7/httplib.py\", line 415, in begin \n version, status, reason = self._read_status() \n File \"/usr/lib/python2.7/httplib.py\", line 379, in _read_status \n raise BadStatusLine(line) \nhttplib.BadStatusLine: '' \n```\n\nUsing version 2.9 of the library I got this traceback in the past:\n\n```\n File \"/home/rahiel/BismillahBot/bismillah.py\", line 122, in upload\n v = bot.sendAudio(audio=f, **kwargs)[\"audio\"][\"file_id\"]\n File \"/home/rahiel/BismillahBot/venv/local/lib/python2.7/site-packages/telegram/bot.py\", line 126, in decorator\n result = func(self, *args, **kwargs)\n File \"/home/rahiel/BismillahBot/venv/local/lib/python2.7/site-packages/telegram/bot.py\", line 158, in decorator\n result = request.post(url, data)\n File \"/home/rahiel/BismillahBot/venv/local/lib/python2.7/site-packages/telegram/utils/request.py\", line 108, in post\n message = _parse(error.read())\n File \"/usr/lib/python2.7/socket.py\", line 351, in read\n data = self._sock.recv(rbufsize)\n File \"/usr/lib/python2.7/httplib.py\", line 549, in read\n return self._read_chunked(amt)\n File \"/usr/lib/python2.7/httplib.py\", line 603, in _read_chunked\n raise IncompleteRead(''.join(value))\nhttplib.IncompleteRead: IncompleteRead(0 bytes read)\n```\n\nMaybe we should catch these exceptions and reraise them as a TelegramError for http errors? I was puzzled by the second traceback, because I was getting it frequently, but that stopped since I updated the library.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# pylint: disable=no-name-in-module,unused-import\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2016\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\n\"\"\"This module contains methods to make POST and GET requests\"\"\"\n\nimport functools\nimport json\nimport socket\nfrom ssl import SSLError\n\ntry:\n from urllib.request import urlopen, urlretrieve, Request\n from urllib.error import HTTPError\nexcept ImportError:\n from urllib import urlretrieve\n from urllib2 import urlopen, Request\n from urllib2 import HTTPError\n\nfrom telegram import (InputFile, TelegramError)\n\n\ndef _parse(json_data):\n \"\"\"Try and parse the JSON returned from Telegram and return an empty\n dictionary if there is any error.\n\n Args:\n url:\n urllib.urlopen object\n\n Returns:\n A JSON parsed as Python dict with results.\n \"\"\"\n decoded_s = json_data.decode('utf-8')\n try:\n data = json.loads(decoded_s)\n except ValueError:\n raise TelegramError('Invalid server response')\n\n if not data.get('ok') and data.get('description'):\n return data['description']\n\n return data['result']\n\n\ndef _try_except_req(func):\n \"\"\"Decorator for requests to handle known exceptions\"\"\"\n @functools.wraps(func)\n def decorator(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except HTTPError as error:\n if error.getcode() == 403:\n raise TelegramError('Unauthorized')\n if error.getcode() == 502:\n raise TelegramError('Bad Gateway')\n\n try:\n message = _parse(error.read())\n except ValueError:\n message = 'Unknown HTTPError {0}'.format(error.getcode())\n\n raise TelegramError(message)\n except (SSLError, socket.timeout) as error:\n if \"operation timed out\" in str(error):\n raise TelegramError(\"Timed out\")\n\n raise TelegramError(str(error))\n\n return decorator\n\n\n@_try_except_req\ndef get(url):\n \"\"\"Request an URL.\n Args:\n url:\n The web location we want to retrieve.\n\n Returns:\n A JSON object.\n \"\"\"\n result = urlopen(url).read()\n\n return _parse(result)\n\n\n@_try_except_req\ndef post(url,\n data,\n network_delay=2.):\n \"\"\"Request an URL.\n Args:\n url:\n The web location we want to retrieve.\n data:\n A dict of (str, unicode) key/value pairs.\n network_delay:\n Additional timeout in seconds to allow the response from Telegram to\n take some time.\n\n Returns:\n A JSON object.\n \"\"\"\n\n # Add time to the timeout of urlopen to allow data to be transferred over\n # the network.\n if 'timeout' in data:\n timeout = data['timeout'] + network_delay\n else:\n timeout = None\n\n if InputFile.is_inputfile(data):\n data = InputFile(data)\n request = Request(url,\n data=data.to_form(),\n headers=data.headers)\n else:\n data = json.dumps(data)\n request = Request(url,\n data=data.encode(),\n headers={'Content-Type': 'application/json'})\n\n result = urlopen(request, timeout=timeout).read()\n return _parse(result)\n\n\n@_try_except_req\ndef download(url,\n filename):\n \"\"\"Download a file by its URL.\n Args:\n url:\n The web location we want to retrieve.\n\n filename:\n The filename wihtin the path to download the file.\n \"\"\"\n\n urlretrieve(url, filename)\n", "path": "telegram/utils/request.py"}]}
2,824
174
gh_patches_debug_30011
rasdani/github-patches
git_diff
microsoft__botbuilder-python-1889
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Teams Task Module - Deserialization Error on Teams mobile app for iOS ## Version botbuilder-integration-aiohttp 4.14.0 Python 3.8.6 ## Describe the bug Error when loading Task Module on iOS iOS 14.8.1 / MS Teams v3.20.0 ## To Reproduce 1. Deploy [sample bot 54.teams-task-module](https://github.com/microsoft/BotBuilder-Samples/tree/main/samples/python/54.teams-task-module) 2. Say hello and click on _Adaptive Card_ button 3. Deserialization Error when on iOS iOS 14.8.1 / Microsoft Teams v3.20.0 ![image](https://user-images.githubusercontent.com/4013036/146412591-61399a75-d3d3-4eb6-a0ec-36ffa3cac54c.png) ## Traceback _(file locations prefix intentionally removed)_ ``` File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1293, in _deserialize found_value = key_extractor(attr, attr_desc, data) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1064, in rest_key_extractor return working_data.get(key) AttributeError: 'str' object has no attribute 'get' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "test_teams_task/env/lib/site-packages/botbuilder/core/bot_adapter.py", line 129, in run_pipeline context, callback File "test_teams_task/env/lib/site-packages/botbuilder/core/middleware_set.py", line 69, in receive_activity_with_status return await self.receive_activity_internal(context, callback) File "test_teams_task/env/lib/site-packages/botbuilder/core/middleware_set.py", line 79, in receive_activity_internal return await callback(context) File "test_teams_task/env/lib/site-packages/botbuilder/core/activity_handler.py", line 78, in on_turn invoke_response = await self.on_invoke_activity(turn_context) File "test_teams_task/env/lib/site-packages/botbuilder/core/teams/teams_activity_handler.py", line 155, in on_invoke_activity TaskModuleRequest, turn_context.activity.value File "test_teams_task/env/lib/site-packages/botbuilder/core/serializer_helper.py", line 28, in deserializer_helper return deserializer(msrest_cls.__name__, dict_to_deserialize) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1233, in __call__ return self._deserialize(target_obj, data) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1299, in _deserialize value = self.deserialize_data(raw_value, attr_desc['type']) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1468, in deserialize_data return self._deserialize(obj_type, data) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1303, in _deserialize raise_with_traceback(DeserializationError, msg, err) File "test_teams_task/env/lib/site-packages/msrest/exceptions.py", line 51, in raise_with_traceback raise error.with_traceback(exc_traceback) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1293, in _deserialize found_value = key_extractor(attr, attr_desc, data) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1064, in rest_key_extractor return working_data.get(key) msrest.exceptions.DeserializationError: Unable to deserialize to object: type, AttributeError: 'str' object has no attribute 'get' ``` ## Expected behavior This sample bot raises no error when interacting on the following platforms: - Windows 10 (Desktop app, Firefox, Chrome) - macOS (Chrome) - Android (Mobile app) ![image](https://user-images.githubusercontent.com/4013036/146413680-7bc42c4d-9876-4d18-9a61-7b94b4a5cccb.png) It was possible to interact with Task Module on iOS iOS 14.8.1 / Microsoft Teams v3.20.0 when deploying these samples (python not included): https://docs.microsoft.com/en-us/samples/officedev/microsoft-teams-samples/ms-teams-task-sample/ ## Additional context Initially the error was detected on a bot in production currently deployed in Azure. Since the error message is the same when running [bot sample 54.teams-task-module](https://github.com/microsoft/BotBuilder-Samples/tree/main/samples/python/54.teams-task-module), for the sake of repro we can take this example. </issue> <code> [start of libraries/botbuilder-core/botbuilder/core/serializer_helper.py] 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 from inspect import getmembers 5 from typing import Type 6 from enum import Enum 7 8 from msrest.serialization import Model, Deserializer, Serializer 9 10 import botbuilder.schema as schema 11 import botbuilder.schema.teams as teams_schema 12 13 DEPENDICIES = [ 14 schema_cls 15 for key, schema_cls in getmembers(schema) 16 if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum)) 17 ] 18 DEPENDICIES += [ 19 schema_cls 20 for key, schema_cls in getmembers(teams_schema) 21 if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum)) 22 ] 23 DEPENDICIES_DICT = {dependency.__name__: dependency for dependency in DEPENDICIES} 24 25 26 def deserializer_helper(msrest_cls: Type[Model], dict_to_deserialize: dict) -> Model: 27 deserializer = Deserializer(DEPENDICIES_DICT) 28 return deserializer(msrest_cls.__name__, dict_to_deserialize) 29 30 31 def serializer_helper(object_to_serialize: Model) -> dict: 32 if object_to_serialize is None: 33 return None 34 35 serializer = Serializer(DEPENDICIES_DICT) 36 # pylint: disable=protected-access 37 return serializer._serialize(object_to_serialize) 38 [end of libraries/botbuilder-core/botbuilder/core/serializer_helper.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libraries/botbuilder-core/botbuilder/core/serializer_helper.py b/libraries/botbuilder-core/botbuilder/core/serializer_helper.py --- a/libraries/botbuilder-core/botbuilder/core/serializer_helper.py +++ b/libraries/botbuilder-core/botbuilder/core/serializer_helper.py @@ -1,6 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. - +from copy import copy from inspect import getmembers from typing import Type from enum import Enum @@ -25,6 +25,9 @@ def deserializer_helper(msrest_cls: Type[Model], dict_to_deserialize: dict) -> Model: deserializer = Deserializer(DEPENDICIES_DICT) + _clean_data_for_serialization( + deserializer.dependencies[msrest_cls.__name__], dict_to_deserialize + ) return deserializer(msrest_cls.__name__, dict_to_deserialize) @@ -35,3 +38,21 @@ serializer = Serializer(DEPENDICIES_DICT) # pylint: disable=protected-access return serializer._serialize(object_to_serialize) + + +def _clean_data_for_serialization(msrest_cls: Type[Model], dict_to_deserialize: dict): + # pylint: disable=protected-access + # Clean channel response of empty strings for expected objects. + if not isinstance(dict_to_deserialize, dict): + return + serialization_model = copy(msrest_cls._attribute_map) + for key, value in msrest_cls._attribute_map.items(): + if key != value["key"]: + serialization_model[value["key"]] = value + for prop, prop_value in dict_to_deserialize.items(): + if ( + prop in serialization_model + and serialization_model[prop]["type"] in DEPENDICIES_DICT + and not prop_value + ): + dict_to_deserialize[prop] = None
{"golden_diff": "diff --git a/libraries/botbuilder-core/botbuilder/core/serializer_helper.py b/libraries/botbuilder-core/botbuilder/core/serializer_helper.py\n--- a/libraries/botbuilder-core/botbuilder/core/serializer_helper.py\n+++ b/libraries/botbuilder-core/botbuilder/core/serializer_helper.py\n@@ -1,6 +1,6 @@\n # Copyright (c) Microsoft Corporation. All rights reserved.\n # Licensed under the MIT License.\n-\n+from copy import copy\n from inspect import getmembers\n from typing import Type\n from enum import Enum\n@@ -25,6 +25,9 @@\n \n def deserializer_helper(msrest_cls: Type[Model], dict_to_deserialize: dict) -> Model:\n deserializer = Deserializer(DEPENDICIES_DICT)\n+ _clean_data_for_serialization(\n+ deserializer.dependencies[msrest_cls.__name__], dict_to_deserialize\n+ )\n return deserializer(msrest_cls.__name__, dict_to_deserialize)\n \n \n@@ -35,3 +38,21 @@\n serializer = Serializer(DEPENDICIES_DICT)\n # pylint: disable=protected-access\n return serializer._serialize(object_to_serialize)\n+\n+\n+def _clean_data_for_serialization(msrest_cls: Type[Model], dict_to_deserialize: dict):\n+ # pylint: disable=protected-access\n+ # Clean channel response of empty strings for expected objects.\n+ if not isinstance(dict_to_deserialize, dict):\n+ return\n+ serialization_model = copy(msrest_cls._attribute_map)\n+ for key, value in msrest_cls._attribute_map.items():\n+ if key != value[\"key\"]:\n+ serialization_model[value[\"key\"]] = value\n+ for prop, prop_value in dict_to_deserialize.items():\n+ if (\n+ prop in serialization_model\n+ and serialization_model[prop][\"type\"] in DEPENDICIES_DICT\n+ and not prop_value\n+ ):\n+ dict_to_deserialize[prop] = None\n", "issue": "Teams Task Module - Deserialization Error on Teams mobile app for iOS\n## Version\r\nbotbuilder-integration-aiohttp 4.14.0\r\nPython 3.8.6 \r\n\r\n## Describe the bug\r\nError when loading Task Module on iOS iOS 14.8.1 / MS Teams v3.20.0\r\n\r\n## To Reproduce\r\n1. Deploy [sample bot 54.teams-task-module](https://github.com/microsoft/BotBuilder-Samples/tree/main/samples/python/54.teams-task-module) \r\n2. Say hello and click on _Adaptive Card_ button\r\n3. Deserialization Error when on iOS iOS 14.8.1 / Microsoft Teams v3.20.0\r\n![image](https://user-images.githubusercontent.com/4013036/146412591-61399a75-d3d3-4eb6-a0ec-36ffa3cac54c.png)\r\n\r\n## Traceback\r\n_(file locations prefix intentionally removed)_\r\n```\r\n File \"test_teams_task/env/lib/site-packages/msrest/serialization.py\", line 1293, in _deserialize\r\n found_value = key_extractor(attr, attr_desc, data)\r\n File \"test_teams_task/env/lib/site-packages/msrest/serialization.py\", line 1064, in rest_key_extractor\r\n return working_data.get(key)\r\nAttributeError: 'str' object has no attribute 'get'\r\nDuring handling of the above exception, another exception occurred:\r\nTraceback (most recent call last):\r\n File \"test_teams_task/env/lib/site-packages/botbuilder/core/bot_adapter.py\", line 129, in run_pipeline\r\n context, callback\r\n File \"test_teams_task/env/lib/site-packages/botbuilder/core/middleware_set.py\", line 69, in receive_activity_with_status\r\n return await self.receive_activity_internal(context, callback)\r\n File \"test_teams_task/env/lib/site-packages/botbuilder/core/middleware_set.py\", line 79, in receive_activity_internal\r\n return await callback(context)\r\n File \"test_teams_task/env/lib/site-packages/botbuilder/core/activity_handler.py\", line 78, in on_turn\r\n invoke_response = await self.on_invoke_activity(turn_context)\r\n File \"test_teams_task/env/lib/site-packages/botbuilder/core/teams/teams_activity_handler.py\", line 155, in on_invoke_activity\r\n TaskModuleRequest, turn_context.activity.value\r\n File \"test_teams_task/env/lib/site-packages/botbuilder/core/serializer_helper.py\", line 28, in deserializer_helper\r\n return deserializer(msrest_cls.__name__, dict_to_deserialize)\r\n File \"test_teams_task/env/lib/site-packages/msrest/serialization.py\", line 1233, in __call__\r\n return self._deserialize(target_obj, data)\r\n File \"test_teams_task/env/lib/site-packages/msrest/serialization.py\", line 1299, in _deserialize\r\n value = self.deserialize_data(raw_value, attr_desc['type'])\r\n File \"test_teams_task/env/lib/site-packages/msrest/serialization.py\", line 1468, in deserialize_data\r\n return self._deserialize(obj_type, data)\r\n File \"test_teams_task/env/lib/site-packages/msrest/serialization.py\", line 1303, in _deserialize\r\n raise_with_traceback(DeserializationError, msg, err)\r\n File \"test_teams_task/env/lib/site-packages/msrest/exceptions.py\", line 51, in raise_with_traceback\r\n raise error.with_traceback(exc_traceback)\r\n File \"test_teams_task/env/lib/site-packages/msrest/serialization.py\", line 1293, in _deserialize\r\n found_value = key_extractor(attr, attr_desc, data)\r\n File \"test_teams_task/env/lib/site-packages/msrest/serialization.py\", line 1064, in rest_key_extractor\r\n return working_data.get(key)\r\nmsrest.exceptions.DeserializationError: Unable to deserialize to object: type, AttributeError: 'str' object has no attribute 'get'\r\n```\r\n\r\n## Expected behavior\r\nThis sample bot raises no error when interacting on the following platforms:\r\n- Windows 10 (Desktop app, Firefox, Chrome)\r\n- macOS (Chrome)\r\n- Android (Mobile app)\r\n![image](https://user-images.githubusercontent.com/4013036/146413680-7bc42c4d-9876-4d18-9a61-7b94b4a5cccb.png)\r\n\r\nIt was possible to interact with Task Module on iOS iOS 14.8.1 / Microsoft Teams v3.20.0 when deploying these samples (python not included):\r\nhttps://docs.microsoft.com/en-us/samples/officedev/microsoft-teams-samples/ms-teams-task-sample/\r\n\r\n## Additional context\r\nInitially the error was detected on a bot in production currently deployed in Azure. Since the error message is the same when running [bot sample 54.teams-task-module](https://github.com/microsoft/BotBuilder-Samples/tree/main/samples/python/54.teams-task-module), for the sake of repro we can take this example. \r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom inspect import getmembers\nfrom typing import Type\nfrom enum import Enum\n\nfrom msrest.serialization import Model, Deserializer, Serializer\n\nimport botbuilder.schema as schema\nimport botbuilder.schema.teams as teams_schema\n\nDEPENDICIES = [\n schema_cls\n for key, schema_cls in getmembers(schema)\n if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n]\nDEPENDICIES += [\n schema_cls\n for key, schema_cls in getmembers(teams_schema)\n if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n]\nDEPENDICIES_DICT = {dependency.__name__: dependency for dependency in DEPENDICIES}\n\n\ndef deserializer_helper(msrest_cls: Type[Model], dict_to_deserialize: dict) -> Model:\n deserializer = Deserializer(DEPENDICIES_DICT)\n return deserializer(msrest_cls.__name__, dict_to_deserialize)\n\n\ndef serializer_helper(object_to_serialize: Model) -> dict:\n if object_to_serialize is None:\n return None\n\n serializer = Serializer(DEPENDICIES_DICT)\n # pylint: disable=protected-access\n return serializer._serialize(object_to_serialize)\n", "path": "libraries/botbuilder-core/botbuilder/core/serializer_helper.py"}]}
2,006
428
gh_patches_debug_2288
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-1577
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> "Your Books" doesn't display my most recent title **Describe the bug** For the last couple of months, your books is not showing anything I've read recently **To Reproduce** ![image](https://user-images.githubusercontent.com/1735650/138808998-67a648b2-c114-45cc-8de4-5d024ab28451.png) **Expected behavior** Expect to see the mostly recently read book on your books **Screenshots** If applicable, add screenshots to help explain your problem. **Instance** Bookwrym.social **Additional context** Add any other context about the problem here. --- **Desktop (please complete the following information):** - OS: [e.g. iOS] - Browser [e.g. chrome, safari] - Version [e.g. 22] **Smartphone (please complete the following information):** - Device: [e.g. iPhone6] - OS: [e.g. iOS8.1] - Browser [e.g. stock browser, safari] - Version [e.g. 22] "Your Books" doesn't display my most recent title **Describe the bug** For the last couple of months, your books is not showing anything I've read recently **To Reproduce** ![image](https://user-images.githubusercontent.com/1735650/138808998-67a648b2-c114-45cc-8de4-5d024ab28451.png) **Expected behavior** Expect to see the mostly recently read book on your books **Screenshots** If applicable, add screenshots to help explain your problem. **Instance** Bookwrym.social **Additional context** Add any other context about the problem here. --- **Desktop (please complete the following information):** - OS: [e.g. iOS] - Browser [e.g. chrome, safari] - Version [e.g. 22] **Smartphone (please complete the following information):** - Device: [e.g. iPhone6] - OS: [e.g. iOS8.1] - Browser [e.g. stock browser, safari] - Version [e.g. 22] </issue> <code> [start of bookwyrm/views/feed.py] 1 """ non-interactive pages """ 2 from django.contrib.auth.decorators import login_required 3 from django.core.paginator import Paginator 4 from django.db.models import Q 5 from django.http import HttpResponseNotFound, Http404 6 from django.shortcuts import get_object_or_404 7 from django.template.response import TemplateResponse 8 from django.utils import timezone 9 from django.utils.decorators import method_decorator 10 from django.views import View 11 12 from bookwyrm import activitystreams, forms, models 13 from bookwyrm.activitypub import ActivitypubResponse 14 from bookwyrm.settings import PAGE_LENGTH, STREAMS 15 from bookwyrm.suggested_users import suggested_users 16 from .helpers import get_user_from_username 17 from .helpers import is_api_request, is_bookwyrm_request 18 19 20 # pylint: disable= no-self-use 21 @method_decorator(login_required, name="dispatch") 22 class Feed(View): 23 """activity stream""" 24 25 def get(self, request, tab): 26 """user's homepage with activity feed""" 27 tab = [s for s in STREAMS if s["key"] == tab] 28 tab = tab[0] if tab else STREAMS[0] 29 30 activities = activitystreams.streams[tab["key"]].get_activity_stream( 31 request.user 32 ) 33 paginated = Paginator(activities, PAGE_LENGTH) 34 35 suggestions = suggested_users.get_suggestions(request.user) 36 37 data = { 38 **feed_page_data(request.user), 39 **{ 40 "user": request.user, 41 "activities": paginated.get_page(request.GET.get("page")), 42 "suggested_users": suggestions, 43 "tab": tab, 44 "streams": STREAMS, 45 "goal_form": forms.GoalForm(), 46 "path": f"/{tab['key']}", 47 }, 48 } 49 return TemplateResponse(request, "feed/feed.html", data) 50 51 52 @method_decorator(login_required, name="dispatch") 53 class DirectMessage(View): 54 """dm view""" 55 56 def get(self, request, username=None): 57 """like a feed but for dms only""" 58 # remove fancy subclasses of status, keep just good ol' notes 59 activities = ( 60 models.Status.privacy_filter(request.user, privacy_levels=["direct"]) 61 .filter( 62 review__isnull=True, 63 comment__isnull=True, 64 quotation__isnull=True, 65 generatednote__isnull=True, 66 ) 67 .order_by("-published_date") 68 ) 69 70 user = None 71 if username: 72 try: 73 user = get_user_from_username(request.user, username) 74 except Http404: 75 pass 76 if user: 77 activities = activities.filter(Q(user=user) | Q(mention_users=user)) 78 79 paginated = Paginator(activities, PAGE_LENGTH) 80 data = { 81 **feed_page_data(request.user), 82 **{ 83 "user": request.user, 84 "partner": user, 85 "activities": paginated.get_page(request.GET.get("page")), 86 "path": "/direct-messages", 87 }, 88 } 89 return TemplateResponse(request, "feed/direct_messages.html", data) 90 91 92 class Status(View): 93 """get posting""" 94 95 def get(self, request, username, status_id): 96 """display a particular status (and replies, etc)""" 97 user = get_user_from_username(request.user, username) 98 status = get_object_or_404( 99 models.Status.objects.select_subclasses(), 100 user=user, 101 id=status_id, 102 deleted=False, 103 ) 104 # make sure the user is authorized to see the status 105 status.raise_visible_to_user(request.user) 106 107 if is_api_request(request): 108 return ActivitypubResponse( 109 status.to_activity(pure=not is_bookwyrm_request(request)) 110 ) 111 112 visible_thread = ( 113 models.Status.privacy_filter(request.user) 114 .filter(thread_id=status.thread_id) 115 .values_list("id", flat=True) 116 ) 117 visible_thread = list(visible_thread) 118 119 ancestors = models.Status.objects.select_subclasses().raw( 120 """ 121 WITH RECURSIVE get_thread(depth, id, path) AS ( 122 123 SELECT 1, st.id, ARRAY[st.id] 124 FROM bookwyrm_status st 125 WHERE id = '%s' AND id = ANY(%s) 126 127 UNION 128 129 SELECT (gt.depth + 1), st.reply_parent_id, path || st.id 130 FROM get_thread gt, bookwyrm_status st 131 132 WHERE st.id = gt.id AND depth < 5 AND st.id = ANY(%s) 133 134 ) 135 136 SELECT * FROM get_thread ORDER BY path DESC; 137 """, 138 params=[status.reply_parent_id or 0, visible_thread, visible_thread], 139 ) 140 children = models.Status.objects.select_subclasses().raw( 141 """ 142 WITH RECURSIVE get_thread(depth, id, path) AS ( 143 144 SELECT 1, st.id, ARRAY[st.id] 145 FROM bookwyrm_status st 146 WHERE reply_parent_id = '%s' AND id = ANY(%s) 147 148 UNION 149 150 SELECT (gt.depth + 1), st.id, path || st.id 151 FROM get_thread gt, bookwyrm_status st 152 153 WHERE st.reply_parent_id = gt.id AND depth < 5 AND st.id = ANY(%s) 154 155 ) 156 157 SELECT * FROM get_thread ORDER BY path; 158 """, 159 params=[status.id, visible_thread, visible_thread], 160 ) 161 162 data = { 163 **feed_page_data(request.user), 164 **{ 165 "status": status, 166 "children": children, 167 "ancestors": ancestors, 168 }, 169 } 170 return TemplateResponse(request, "feed/status.html", data) 171 172 173 class Replies(View): 174 """replies page (a json view of status)""" 175 176 def get(self, request, username, status_id): 177 """ordered collection of replies to a status""" 178 # the html view is the same as Status 179 if not is_api_request(request): 180 status_view = Status.as_view() 181 return status_view(request, username, status_id) 182 183 # the json view is different than Status 184 status = models.Status.objects.get(id=status_id) 185 if status.user.localname != username: 186 return HttpResponseNotFound() 187 status.raise_visible_to_user(request.user) 188 189 return ActivitypubResponse(status.to_replies(**request.GET)) 190 191 192 def feed_page_data(user): 193 """info we need for every feed page""" 194 if not user.is_authenticated: 195 return {} 196 197 goal = models.AnnualGoal.objects.filter(user=user, year=timezone.now().year).first() 198 return { 199 "suggested_books": get_suggested_books(user), 200 "goal": goal, 201 "goal_form": forms.GoalForm(), 202 } 203 204 205 def get_suggested_books(user, max_books=5): 206 """helper to get a user's recent books""" 207 book_count = 0 208 preset_shelves = [("reading", max_books), ("read", 2), ("to-read", max_books)] 209 suggested_books = [] 210 for (preset, shelf_max) in preset_shelves: 211 limit = ( 212 shelf_max 213 if shelf_max < (max_books - book_count) 214 else max_books - book_count 215 ) 216 shelf = user.shelf_set.get(identifier=preset) 217 if not shelf.books.exists(): 218 continue 219 220 shelf_preview = { 221 "name": shelf.name, 222 "identifier": shelf.identifier, 223 "books": models.Edition.viewer_aware_objects(user) 224 .filter( 225 shelfbook__shelf=shelf, 226 ) 227 .prefetch_related("authors")[:limit], 228 } 229 suggested_books.append(shelf_preview) 230 book_count += len(shelf_preview["books"]) 231 return suggested_books 232 [end of bookwyrm/views/feed.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bookwyrm/views/feed.py b/bookwyrm/views/feed.py --- a/bookwyrm/views/feed.py +++ b/bookwyrm/views/feed.py @@ -224,6 +224,7 @@ .filter( shelfbook__shelf=shelf, ) + .order_by("-shelfbook__shelved_date") .prefetch_related("authors")[:limit], } suggested_books.append(shelf_preview)
{"golden_diff": "diff --git a/bookwyrm/views/feed.py b/bookwyrm/views/feed.py\n--- a/bookwyrm/views/feed.py\n+++ b/bookwyrm/views/feed.py\n@@ -224,6 +224,7 @@\n .filter(\n shelfbook__shelf=shelf,\n )\n+ .order_by(\"-shelfbook__shelved_date\")\n .prefetch_related(\"authors\")[:limit],\n }\n suggested_books.append(shelf_preview)\n", "issue": "\"Your Books\" doesn't display my most recent title\n**Describe the bug**\r\nFor the last couple of months, your books is not showing anything I've read recently \r\n\r\n**To Reproduce**\r\n![image](https://user-images.githubusercontent.com/1735650/138808998-67a648b2-c114-45cc-8de4-5d024ab28451.png)\r\n\r\n**Expected behavior**\r\nExpect to see the mostly recently read book on your books\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Instance**\r\nBookwrym.social\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\r\n---\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [e.g. iOS]\r\n - Browser [e.g. chrome, safari]\r\n - Version [e.g. 22]\r\n\r\n**Smartphone (please complete the following information):**\r\n - Device: [e.g. iPhone6]\r\n - OS: [e.g. iOS8.1]\r\n - Browser [e.g. stock browser, safari]\r\n - Version [e.g. 22]\r\n\n\"Your Books\" doesn't display my most recent title\n**Describe the bug**\r\nFor the last couple of months, your books is not showing anything I've read recently \r\n\r\n**To Reproduce**\r\n![image](https://user-images.githubusercontent.com/1735650/138808998-67a648b2-c114-45cc-8de4-5d024ab28451.png)\r\n\r\n**Expected behavior**\r\nExpect to see the mostly recently read book on your books\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Instance**\r\nBookwrym.social\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\r\n---\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [e.g. iOS]\r\n - Browser [e.g. chrome, safari]\r\n - Version [e.g. 22]\r\n\r\n**Smartphone (please complete the following information):**\r\n - Device: [e.g. iPhone6]\r\n - OS: [e.g. iOS8.1]\r\n - Browser [e.g. stock browser, safari]\r\n - Version [e.g. 22]\r\n\n", "before_files": [{"content": "\"\"\" non-interactive pages \"\"\"\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\nfrom django.http import HttpResponseNotFound, Http404\nfrom django.shortcuts import get_object_or_404\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import activitystreams, forms, models\nfrom bookwyrm.activitypub import ActivitypubResponse\nfrom bookwyrm.settings import PAGE_LENGTH, STREAMS\nfrom bookwyrm.suggested_users import suggested_users\nfrom .helpers import get_user_from_username\nfrom .helpers import is_api_request, is_bookwyrm_request\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass Feed(View):\n \"\"\"activity stream\"\"\"\n\n def get(self, request, tab):\n \"\"\"user's homepage with activity feed\"\"\"\n tab = [s for s in STREAMS if s[\"key\"] == tab]\n tab = tab[0] if tab else STREAMS[0]\n\n activities = activitystreams.streams[tab[\"key\"]].get_activity_stream(\n request.user\n )\n paginated = Paginator(activities, PAGE_LENGTH)\n\n suggestions = suggested_users.get_suggestions(request.user)\n\n data = {\n **feed_page_data(request.user),\n **{\n \"user\": request.user,\n \"activities\": paginated.get_page(request.GET.get(\"page\")),\n \"suggested_users\": suggestions,\n \"tab\": tab,\n \"streams\": STREAMS,\n \"goal_form\": forms.GoalForm(),\n \"path\": f\"/{tab['key']}\",\n },\n }\n return TemplateResponse(request, \"feed/feed.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass DirectMessage(View):\n \"\"\"dm view\"\"\"\n\n def get(self, request, username=None):\n \"\"\"like a feed but for dms only\"\"\"\n # remove fancy subclasses of status, keep just good ol' notes\n activities = (\n models.Status.privacy_filter(request.user, privacy_levels=[\"direct\"])\n .filter(\n review__isnull=True,\n comment__isnull=True,\n quotation__isnull=True,\n generatednote__isnull=True,\n )\n .order_by(\"-published_date\")\n )\n\n user = None\n if username:\n try:\n user = get_user_from_username(request.user, username)\n except Http404:\n pass\n if user:\n activities = activities.filter(Q(user=user) | Q(mention_users=user))\n\n paginated = Paginator(activities, PAGE_LENGTH)\n data = {\n **feed_page_data(request.user),\n **{\n \"user\": request.user,\n \"partner\": user,\n \"activities\": paginated.get_page(request.GET.get(\"page\")),\n \"path\": \"/direct-messages\",\n },\n }\n return TemplateResponse(request, \"feed/direct_messages.html\", data)\n\n\nclass Status(View):\n \"\"\"get posting\"\"\"\n\n def get(self, request, username, status_id):\n \"\"\"display a particular status (and replies, etc)\"\"\"\n user = get_user_from_username(request.user, username)\n status = get_object_or_404(\n models.Status.objects.select_subclasses(),\n user=user,\n id=status_id,\n deleted=False,\n )\n # make sure the user is authorized to see the status\n status.raise_visible_to_user(request.user)\n\n if is_api_request(request):\n return ActivitypubResponse(\n status.to_activity(pure=not is_bookwyrm_request(request))\n )\n\n visible_thread = (\n models.Status.privacy_filter(request.user)\n .filter(thread_id=status.thread_id)\n .values_list(\"id\", flat=True)\n )\n visible_thread = list(visible_thread)\n\n ancestors = models.Status.objects.select_subclasses().raw(\n \"\"\"\n WITH RECURSIVE get_thread(depth, id, path) AS (\n\n SELECT 1, st.id, ARRAY[st.id]\n FROM bookwyrm_status st\n WHERE id = '%s' AND id = ANY(%s)\n\n UNION\n\n SELECT (gt.depth + 1), st.reply_parent_id, path || st.id\n FROM get_thread gt, bookwyrm_status st\n\n WHERE st.id = gt.id AND depth < 5 AND st.id = ANY(%s)\n\n )\n\n SELECT * FROM get_thread ORDER BY path DESC;\n \"\"\",\n params=[status.reply_parent_id or 0, visible_thread, visible_thread],\n )\n children = models.Status.objects.select_subclasses().raw(\n \"\"\"\n WITH RECURSIVE get_thread(depth, id, path) AS (\n\n SELECT 1, st.id, ARRAY[st.id]\n FROM bookwyrm_status st\n WHERE reply_parent_id = '%s' AND id = ANY(%s)\n\n UNION\n\n SELECT (gt.depth + 1), st.id, path || st.id\n FROM get_thread gt, bookwyrm_status st\n\n WHERE st.reply_parent_id = gt.id AND depth < 5 AND st.id = ANY(%s)\n\n )\n\n SELECT * FROM get_thread ORDER BY path;\n \"\"\",\n params=[status.id, visible_thread, visible_thread],\n )\n\n data = {\n **feed_page_data(request.user),\n **{\n \"status\": status,\n \"children\": children,\n \"ancestors\": ancestors,\n },\n }\n return TemplateResponse(request, \"feed/status.html\", data)\n\n\nclass Replies(View):\n \"\"\"replies page (a json view of status)\"\"\"\n\n def get(self, request, username, status_id):\n \"\"\"ordered collection of replies to a status\"\"\"\n # the html view is the same as Status\n if not is_api_request(request):\n status_view = Status.as_view()\n return status_view(request, username, status_id)\n\n # the json view is different than Status\n status = models.Status.objects.get(id=status_id)\n if status.user.localname != username:\n return HttpResponseNotFound()\n status.raise_visible_to_user(request.user)\n\n return ActivitypubResponse(status.to_replies(**request.GET))\n\n\ndef feed_page_data(user):\n \"\"\"info we need for every feed page\"\"\"\n if not user.is_authenticated:\n return {}\n\n goal = models.AnnualGoal.objects.filter(user=user, year=timezone.now().year).first()\n return {\n \"suggested_books\": get_suggested_books(user),\n \"goal\": goal,\n \"goal_form\": forms.GoalForm(),\n }\n\n\ndef get_suggested_books(user, max_books=5):\n \"\"\"helper to get a user's recent books\"\"\"\n book_count = 0\n preset_shelves = [(\"reading\", max_books), (\"read\", 2), (\"to-read\", max_books)]\n suggested_books = []\n for (preset, shelf_max) in preset_shelves:\n limit = (\n shelf_max\n if shelf_max < (max_books - book_count)\n else max_books - book_count\n )\n shelf = user.shelf_set.get(identifier=preset)\n if not shelf.books.exists():\n continue\n\n shelf_preview = {\n \"name\": shelf.name,\n \"identifier\": shelf.identifier,\n \"books\": models.Edition.viewer_aware_objects(user)\n .filter(\n shelfbook__shelf=shelf,\n )\n .prefetch_related(\"authors\")[:limit],\n }\n suggested_books.append(shelf_preview)\n book_count += len(shelf_preview[\"books\"])\n return suggested_books\n", "path": "bookwyrm/views/feed.py"}]}
3,260
99
gh_patches_debug_25990
rasdani/github-patches
git_diff
pwndbg__pwndbg-1826
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> heap_config should give an example format for GLIBC version The UX for the glibc version setting is not great. 1. The `heap_config` should give user an example format of GLIBC version we do expect. Currently it only shows this: ``` pwndbg> heap_config Name Value (Def) Documentation ----------------------------------------------------------- glibc '' GLIBC version for heuristics ... ``` 2. When one sets the `glibc` parameter, we should have a setter that will validate it against the expected version format. Currently it does not: ``` pwndbg> set glibc as Set GLIBC version for heuristics to 'as'. ``` </issue> <code> [start of pwndbg/glibc.py] 1 """ 2 Get information about the GLibc 3 """ 4 5 import functools 6 import os 7 import re 8 from typing import Optional 9 from typing import Tuple 10 11 import gdb 12 from elftools.elf.relocation import Relocation 13 14 import pwndbg.gdblib.config 15 import pwndbg.gdblib.elf 16 import pwndbg.gdblib.file 17 import pwndbg.gdblib.info 18 import pwndbg.gdblib.memory 19 import pwndbg.gdblib.proc 20 import pwndbg.gdblib.symbol 21 import pwndbg.heap 22 import pwndbg.lib.cache 23 import pwndbg.search 24 25 safe_lnk = pwndbg.gdblib.config.add_param( 26 "safe-linking", 27 None, 28 "whether glibc use safe-linking (on/off/auto)", 29 param_class=gdb.PARAM_AUTO_BOOLEAN, 30 ) 31 32 glibc_version = pwndbg.gdblib.config.add_param( 33 "glibc", "", "GLIBC version for heuristics", scope="heap" 34 ) 35 36 37 @pwndbg.gdblib.proc.OnlyWhenRunning 38 def get_version() -> Optional[Tuple[int, ...]]: 39 if glibc_version.value: 40 ret = re.search(r"(\d+)\.(\d+)", glibc_version.value) 41 if ret: 42 return tuple(int(_) for _ in ret.groups()) 43 else: 44 raise ValueError( 45 f"Invalid GLIBC version: `{glibc_version.value}`, you should provide something like: 2.31 or 2.34" 46 ) 47 return _get_version() 48 49 50 @pwndbg.gdblib.proc.OnlyWhenRunning 51 @pwndbg.lib.cache.cache_until("start", "objfile") 52 def _get_version() -> Optional[Tuple[int, ...]]: 53 if pwndbg.heap.current.libc_has_debug_syms(): 54 addr = pwndbg.gdblib.symbol.address("__libc_version") 55 if addr is not None: 56 ver = pwndbg.gdblib.memory.string(addr) 57 return tuple(int(_) for _ in ver.split(b".")) 58 libc_filename = get_libc_filename_from_info_sharedlibrary() 59 if not libc_filename: 60 return None 61 result = pwndbg.gdblib.elf.dump_section_by_name(libc_filename, ".rodata", try_local_path=True) 62 if not result: 63 return None 64 _, _, data = result 65 banner_start = data.find(b"GNU C Library") 66 if banner_start == -1: 67 return None 68 banner = data[banner_start : data.find(b"\x00", banner_start)] 69 ret = re.search(rb"release version (\d+)\.(\d+)", banner) 70 return tuple(int(_) for _ in ret.groups()) if ret else None 71 72 73 @pwndbg.gdblib.proc.OnlyWhenRunning 74 @pwndbg.lib.cache.cache_until("start", "objfile") 75 def get_libc_filename_from_info_sharedlibrary() -> Optional[str]: 76 """ 77 Get the filename of the libc by parsing the output of `info sharedlibrary`. 78 """ 79 possible_libc_path = [] 80 for path in pwndbg.gdblib.info.sharedlibrary_paths(): 81 basename = os.path.basename( 82 path[7:] if path.startswith("target:") else path 83 ) # "target:" prefix is for remote debugging 84 if basename == "libc.so.6": 85 # The default filename of libc should be libc.so.6, so if we found it, we just return it directly. 86 return path 87 elif re.search(r"^libc6?[-_\.]", basename): 88 # Maybe user loaded the libc with LD_PRELOAD. 89 # Some common libc names: libc-2.36.so, libc6_2.36-0ubuntu4_amd64.so, libc.so 90 possible_libc_path.append( 91 path 92 ) # We don't return it, maybe there is a libc.so.6 and this match is just a false positive. 93 # TODO: This might fail if user use LD_PRELOAD to load libc with a weird name or there are multiple shared libraries match the pattern. 94 # (But do we really need to support this case? Maybe we can wait until users really need it :P.) 95 if possible_libc_path: 96 return possible_libc_path[0] # just return the first match for now :) 97 return None 98 99 100 @pwndbg.gdblib.proc.OnlyWhenRunning 101 @pwndbg.lib.cache.cache_until("start", "objfile") 102 def dump_elf_data_section() -> Optional[Tuple[int, int, bytes]]: 103 """ 104 Dump .data section of libc ELF file 105 """ 106 libc_filename = get_libc_filename_from_info_sharedlibrary() 107 if not libc_filename: 108 # libc not loaded yet, or it's static linked 109 return None 110 return pwndbg.gdblib.elf.dump_section_by_name(libc_filename, ".data", try_local_path=True) 111 112 113 @pwndbg.gdblib.proc.OnlyWhenRunning 114 @pwndbg.lib.cache.cache_until("start", "objfile") 115 def dump_relocations_by_section_name(section_name: str) -> Optional[Tuple[Relocation, ...]]: 116 """ 117 Dump relocations of a section by section name of libc ELF file 118 """ 119 libc_filename = get_libc_filename_from_info_sharedlibrary() 120 if not libc_filename: 121 # libc not loaded yet, or it's static linked 122 return None 123 return pwndbg.gdblib.elf.dump_relocations_by_section_name( 124 libc_filename, section_name, try_local_path=True 125 ) 126 127 128 @pwndbg.gdblib.proc.OnlyWhenRunning 129 @pwndbg.lib.cache.cache_until("start", "objfile") 130 def get_data_section_address() -> int: 131 """ 132 Find .data section address of libc 133 """ 134 libc_filename = get_libc_filename_from_info_sharedlibrary() 135 if not libc_filename: 136 # libc not loaded yet, or it's static linked 137 return 0 138 # TODO: If we are debugging a remote process, this might not work if GDB cannot load the so file 139 out = pwndbg.gdblib.info.files() 140 for line in out.splitlines(): 141 if line.endswith(" is .data in " + libc_filename): 142 return int(line.split()[0], 16) 143 return 0 144 145 146 @pwndbg.gdblib.proc.OnlyWhenRunning 147 @pwndbg.lib.cache.cache_until("start", "objfile") 148 def get_got_section_address() -> int: 149 """ 150 Find .got section address of libc 151 """ 152 libc_filename = get_libc_filename_from_info_sharedlibrary() 153 if not libc_filename: 154 # libc not loaded yet, or it's static linked 155 return 0 156 # TODO: If we are debugging a remote process, this might not work if GDB cannot load the so file 157 out = pwndbg.gdblib.info.files() 158 for line in out.splitlines(): 159 if line.endswith(" is .got in " + libc_filename): 160 return int(line.split()[0], 16) 161 return 0 162 163 164 def OnlyWhenGlibcLoaded(function): 165 @functools.wraps(function) 166 def _OnlyWhenGlibcLoaded(*a, **kw): 167 if get_version() is not None: 168 return function(*a, **kw) 169 else: 170 print(f"{function.__name__}: GLibc not loaded yet.") 171 172 return _OnlyWhenGlibcLoaded 173 174 175 @OnlyWhenGlibcLoaded 176 def check_safe_linking(): 177 """ 178 Safe-linking is a glibc 2.32 mitigation; see: 179 - https://lanph3re.blogspot.com/2020/08/blog-post.html 180 - https://research.checkpoint.com/2020/safe-linking-eliminating-a-20-year-old-malloc-exploit-primitive/ 181 """ 182 return (get_version() >= (2, 32) or safe_lnk) and safe_lnk is not False 183 [end of pwndbg/glibc.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwndbg/glibc.py b/pwndbg/glibc.py --- a/pwndbg/glibc.py +++ b/pwndbg/glibc.py @@ -21,6 +21,7 @@ import pwndbg.heap import pwndbg.lib.cache import pwndbg.search +from pwndbg.color import message safe_lnk = pwndbg.gdblib.config.add_param( "safe-linking", @@ -30,21 +31,28 @@ ) glibc_version = pwndbg.gdblib.config.add_param( - "glibc", "", "GLIBC version for heuristics", scope="heap" + "glibc", "", "GLIBC version for heap heuristics resolution (e.g. 2.31)", scope="heap" ) [email protected](glibc_version) +def set_glibc_version() -> None: + ret = re.search(r"(\d+)\.(\d+)", glibc_version.value) + if ret: + glibc_version.value = tuple(map(int, ret.groups())) + return + + print( + message.warn( + f"Invalid GLIBC version: `{glibc_version.value}`, you should provide something like: 2.31 or 2.34" + ) + ) + glibc_version.revert_default() + + @pwndbg.gdblib.proc.OnlyWhenRunning def get_version() -> Optional[Tuple[int, ...]]: - if glibc_version.value: - ret = re.search(r"(\d+)\.(\d+)", glibc_version.value) - if ret: - return tuple(int(_) for _ in ret.groups()) - else: - raise ValueError( - f"Invalid GLIBC version: `{glibc_version.value}`, you should provide something like: 2.31 or 2.34" - ) - return _get_version() + return glibc_version or _get_version() @pwndbg.gdblib.proc.OnlyWhenRunning
{"golden_diff": "diff --git a/pwndbg/glibc.py b/pwndbg/glibc.py\n--- a/pwndbg/glibc.py\n+++ b/pwndbg/glibc.py\n@@ -21,6 +21,7 @@\n import pwndbg.heap\n import pwndbg.lib.cache\n import pwndbg.search\n+from pwndbg.color import message\n \n safe_lnk = pwndbg.gdblib.config.add_param(\n \"safe-linking\",\n@@ -30,21 +31,28 @@\n )\n \n glibc_version = pwndbg.gdblib.config.add_param(\n- \"glibc\", \"\", \"GLIBC version for heuristics\", scope=\"heap\"\n+ \"glibc\", \"\", \"GLIBC version for heap heuristics resolution (e.g. 2.31)\", scope=\"heap\"\n )\n \n \[email protected](glibc_version)\n+def set_glibc_version() -> None:\n+ ret = re.search(r\"(\\d+)\\.(\\d+)\", glibc_version.value)\n+ if ret:\n+ glibc_version.value = tuple(map(int, ret.groups()))\n+ return\n+\n+ print(\n+ message.warn(\n+ f\"Invalid GLIBC version: `{glibc_version.value}`, you should provide something like: 2.31 or 2.34\"\n+ )\n+ )\n+ glibc_version.revert_default()\n+\n+\n @pwndbg.gdblib.proc.OnlyWhenRunning\n def get_version() -> Optional[Tuple[int, ...]]:\n- if glibc_version.value:\n- ret = re.search(r\"(\\d+)\\.(\\d+)\", glibc_version.value)\n- if ret:\n- return tuple(int(_) for _ in ret.groups())\n- else:\n- raise ValueError(\n- f\"Invalid GLIBC version: `{glibc_version.value}`, you should provide something like: 2.31 or 2.34\"\n- )\n- return _get_version()\n+ return glibc_version or _get_version()\n \n \n @pwndbg.gdblib.proc.OnlyWhenRunning\n", "issue": "heap_config should give an example format for GLIBC version\nThe UX for the glibc version setting is not great.\r\n\r\n1. The `heap_config` should give user an example format of GLIBC version we do expect. Currently it only shows this:\r\n```\r\npwndbg> heap_config\r\nName Value (Def) Documentation\r\n-----------------------------------------------------------\r\nglibc '' GLIBC version for heuristics\r\n...\r\n```\r\n\r\n2. When one sets the `glibc` parameter, we should have a setter that will validate it against the expected version format. Currently it does not:\r\n```\r\npwndbg> set glibc as\r\nSet GLIBC version for heuristics to 'as'.\r\n```\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nGet information about the GLibc\n\"\"\"\n\nimport functools\nimport os\nimport re\nfrom typing import Optional\nfrom typing import Tuple\n\nimport gdb\nfrom elftools.elf.relocation import Relocation\n\nimport pwndbg.gdblib.config\nimport pwndbg.gdblib.elf\nimport pwndbg.gdblib.file\nimport pwndbg.gdblib.info\nimport pwndbg.gdblib.memory\nimport pwndbg.gdblib.proc\nimport pwndbg.gdblib.symbol\nimport pwndbg.heap\nimport pwndbg.lib.cache\nimport pwndbg.search\n\nsafe_lnk = pwndbg.gdblib.config.add_param(\n \"safe-linking\",\n None,\n \"whether glibc use safe-linking (on/off/auto)\",\n param_class=gdb.PARAM_AUTO_BOOLEAN,\n)\n\nglibc_version = pwndbg.gdblib.config.add_param(\n \"glibc\", \"\", \"GLIBC version for heuristics\", scope=\"heap\"\n)\n\n\[email protected]\ndef get_version() -> Optional[Tuple[int, ...]]:\n if glibc_version.value:\n ret = re.search(r\"(\\d+)\\.(\\d+)\", glibc_version.value)\n if ret:\n return tuple(int(_) for _ in ret.groups())\n else:\n raise ValueError(\n f\"Invalid GLIBC version: `{glibc_version.value}`, you should provide something like: 2.31 or 2.34\"\n )\n return _get_version()\n\n\[email protected]\[email protected]_until(\"start\", \"objfile\")\ndef _get_version() -> Optional[Tuple[int, ...]]:\n if pwndbg.heap.current.libc_has_debug_syms():\n addr = pwndbg.gdblib.symbol.address(\"__libc_version\")\n if addr is not None:\n ver = pwndbg.gdblib.memory.string(addr)\n return tuple(int(_) for _ in ver.split(b\".\"))\n libc_filename = get_libc_filename_from_info_sharedlibrary()\n if not libc_filename:\n return None\n result = pwndbg.gdblib.elf.dump_section_by_name(libc_filename, \".rodata\", try_local_path=True)\n if not result:\n return None\n _, _, data = result\n banner_start = data.find(b\"GNU C Library\")\n if banner_start == -1:\n return None\n banner = data[banner_start : data.find(b\"\\x00\", banner_start)]\n ret = re.search(rb\"release version (\\d+)\\.(\\d+)\", banner)\n return tuple(int(_) for _ in ret.groups()) if ret else None\n\n\[email protected]\[email protected]_until(\"start\", \"objfile\")\ndef get_libc_filename_from_info_sharedlibrary() -> Optional[str]:\n \"\"\"\n Get the filename of the libc by parsing the output of `info sharedlibrary`.\n \"\"\"\n possible_libc_path = []\n for path in pwndbg.gdblib.info.sharedlibrary_paths():\n basename = os.path.basename(\n path[7:] if path.startswith(\"target:\") else path\n ) # \"target:\" prefix is for remote debugging\n if basename == \"libc.so.6\":\n # The default filename of libc should be libc.so.6, so if we found it, we just return it directly.\n return path\n elif re.search(r\"^libc6?[-_\\.]\", basename):\n # Maybe user loaded the libc with LD_PRELOAD.\n # Some common libc names: libc-2.36.so, libc6_2.36-0ubuntu4_amd64.so, libc.so\n possible_libc_path.append(\n path\n ) # We don't return it, maybe there is a libc.so.6 and this match is just a false positive.\n # TODO: This might fail if user use LD_PRELOAD to load libc with a weird name or there are multiple shared libraries match the pattern.\n # (But do we really need to support this case? Maybe we can wait until users really need it :P.)\n if possible_libc_path:\n return possible_libc_path[0] # just return the first match for now :)\n return None\n\n\[email protected]\[email protected]_until(\"start\", \"objfile\")\ndef dump_elf_data_section() -> Optional[Tuple[int, int, bytes]]:\n \"\"\"\n Dump .data section of libc ELF file\n \"\"\"\n libc_filename = get_libc_filename_from_info_sharedlibrary()\n if not libc_filename:\n # libc not loaded yet, or it's static linked\n return None\n return pwndbg.gdblib.elf.dump_section_by_name(libc_filename, \".data\", try_local_path=True)\n\n\[email protected]\[email protected]_until(\"start\", \"objfile\")\ndef dump_relocations_by_section_name(section_name: str) -> Optional[Tuple[Relocation, ...]]:\n \"\"\"\n Dump relocations of a section by section name of libc ELF file\n \"\"\"\n libc_filename = get_libc_filename_from_info_sharedlibrary()\n if not libc_filename:\n # libc not loaded yet, or it's static linked\n return None\n return pwndbg.gdblib.elf.dump_relocations_by_section_name(\n libc_filename, section_name, try_local_path=True\n )\n\n\[email protected]\[email protected]_until(\"start\", \"objfile\")\ndef get_data_section_address() -> int:\n \"\"\"\n Find .data section address of libc\n \"\"\"\n libc_filename = get_libc_filename_from_info_sharedlibrary()\n if not libc_filename:\n # libc not loaded yet, or it's static linked\n return 0\n # TODO: If we are debugging a remote process, this might not work if GDB cannot load the so file\n out = pwndbg.gdblib.info.files()\n for line in out.splitlines():\n if line.endswith(\" is .data in \" + libc_filename):\n return int(line.split()[0], 16)\n return 0\n\n\[email protected]\[email protected]_until(\"start\", \"objfile\")\ndef get_got_section_address() -> int:\n \"\"\"\n Find .got section address of libc\n \"\"\"\n libc_filename = get_libc_filename_from_info_sharedlibrary()\n if not libc_filename:\n # libc not loaded yet, or it's static linked\n return 0\n # TODO: If we are debugging a remote process, this might not work if GDB cannot load the so file\n out = pwndbg.gdblib.info.files()\n for line in out.splitlines():\n if line.endswith(\" is .got in \" + libc_filename):\n return int(line.split()[0], 16)\n return 0\n\n\ndef OnlyWhenGlibcLoaded(function):\n @functools.wraps(function)\n def _OnlyWhenGlibcLoaded(*a, **kw):\n if get_version() is not None:\n return function(*a, **kw)\n else:\n print(f\"{function.__name__}: GLibc not loaded yet.\")\n\n return _OnlyWhenGlibcLoaded\n\n\n@OnlyWhenGlibcLoaded\ndef check_safe_linking():\n \"\"\"\n Safe-linking is a glibc 2.32 mitigation; see:\n - https://lanph3re.blogspot.com/2020/08/blog-post.html\n - https://research.checkpoint.com/2020/safe-linking-eliminating-a-20-year-old-malloc-exploit-primitive/\n \"\"\"\n return (get_version() >= (2, 32) or safe_lnk) and safe_lnk is not False\n", "path": "pwndbg/glibc.py"}]}
2,838
455
gh_patches_debug_35119
rasdani/github-patches
git_diff
Azure__azure-cli-extensions-74
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> az webapp new throws an exception when it can't detect a project type. ### Extension name (the extension in question) webapp ### Description of issue (in as much detail as possible) This should fail gracefully: ``` bash-4.3# cd / bash-4.3# ls appp bin etc lib media proc run srv tmp var azure-cli dev home linuxrc mnt root sbin sys usr bash-4.3# az webapp new -n foo list index out of range Traceback (most recent call last): File "/usr/local/lib/python3.6/site-packages/knack/cli.py", line 194, in invoke cmd_result = self.invocation.execute(args) File "/usr/local/lib/python3.6/site-packages/azure/cli/core/commands/__init__.py", line 331, in execute six.reraise(*sys.exc_info()) File "/usr/local/lib/python3.6/site-packages/six.py", line 693, in reraise raise value File "/usr/local/lib/python3.6/site-packages/azure/cli/core/commands/__init__.py", line 304, in execute result = cmd(params) File "/usr/local/lib/python3.6/site-packages/azure/cli/core/commands/__init__.py", line 168, in __call__ return super(AzCliCommand, self).__call__(*args, **kwargs) File "/usr/local/lib/python3.6/site-packages/knack/commands.py", line 109, in __call__ return self.handler(*args, **kwargs) File "/usr/local/lib/python3.6/site-packages/azure/cli/core/__init__.py", line 348, in default_command_handler result = op(**command_args) File "/root/.azure/cliextensions/webapp/azext_webapp/custom.py", line 50, in create_deploy_webapp lang_details = get_lang_from_content(src_dir) File "/root/.azure/cliextensions/webapp/azext_webapp/create_util.py", line 113, in get_lang_from_content package_netcore_file = os.path.join(src_path, glob.glob("*.csproj")[0]) IndexError: list index out of range bash-4.3# ``` Steps to reproduce: run `az webapp new -n foo` in a *non-empty* directory that does not contain a `package.json` or a `*.csproj` file. </issue> <code> [start of src/webapp/azext_webapp/create_util.py] 1 # -------------------------------------------------------------------------------------------- 2 # Copyright (c) Microsoft Corporation. All rights reserved. 3 # Licensed under the MIT License. See License.txt in the project root for license information. 4 # -------------------------------------------------------------------------------------------- 5 6 import os 7 import zipfile 8 from azure.cli.core.commands.client_factory import get_mgmt_service_client 9 from azure.mgmt.resource.resources.models import ResourceGroup 10 from ._constants import ( 11 NETCORE_VERSION_DEFAULT, 12 NETCORE_VERSIONS, 13 NODE_VERSION_DEFAULT, 14 NODE_VERSIONS, 15 NETCORE_RUNTIME_NAME, 16 NODE_RUNTIME_NAME) 17 18 19 def _resource_client_factory(cli_ctx, **_): 20 from azure.cli.core.profiles import ResourceType 21 return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES) 22 23 24 def web_client_factory(cli_ctx, **_): 25 from azure.mgmt.web import WebSiteManagementClient 26 return get_mgmt_service_client(cli_ctx, WebSiteManagementClient) 27 28 29 def zip_contents_from_dir(dirPath, lang): 30 relroot = os.path.abspath(os.path.join(dirPath, os.pardir)) 31 path_and_file = os.path.splitdrive(dirPath)[1] 32 file_val = os.path.split(path_and_file)[1] 33 zip_file_path = relroot + "\\" + file_val + ".zip" 34 abs_src = os.path.abspath(dirPath) 35 with zipfile.ZipFile("{}".format(zip_file_path), "w", zipfile.ZIP_DEFLATED) as zf: 36 for dirname, subdirs, files in os.walk(dirPath): 37 # skip node_modules folder for Node apps, 38 # since zip_deployment will perfom the build operation 39 if lang.lower() == NODE_RUNTIME_NAME and 'node_modules' in subdirs: 40 subdirs.remove('node_modules') 41 elif lang.lower() == NETCORE_RUNTIME_NAME: 42 if 'bin' in subdirs: 43 subdirs.remove('bin') 44 elif 'obj' in subdirs: 45 subdirs.remove('obj') 46 for filename in files: 47 absname = os.path.abspath(os.path.join(dirname, filename)) 48 arcname = absname[len(abs_src) + 1:] 49 zf.write(absname, arcname) 50 return zip_file_path 51 52 53 def get_runtime_version_details(file_path, lang_name): 54 version_detected = None 55 version_to_create = None 56 if lang_name.lower() == NETCORE_RUNTIME_NAME: 57 # method returns list in DESC, pick the first 58 version_detected = parse_netcore_version(file_path)[0] 59 version_to_create = detect_netcore_version_tocreate(version_detected) 60 elif lang_name.lower() == NODE_RUNTIME_NAME: 61 version_detected = parse_node_version(file_path)[0] 62 version_to_create = detect_node_version_tocreate(version_detected) 63 return {'detected': version_detected, 'to_create': version_to_create} 64 65 66 def create_resource_group(cmd, rg_name, location): 67 rcf = _resource_client_factory(cmd.cli_ctx) 68 rg_params = ResourceGroup(location=location) 69 return rcf.resource_groups.create_or_update(rg_name, rg_params) 70 71 72 def check_resource_group_exists(cmd, rg_name): 73 rcf = _resource_client_factory(cmd.cli_ctx) 74 return rcf.resource_groups.check_existence(rg_name) 75 76 77 def check_resource_group_supports_os(cmd, rg_name, location, is_linux): 78 # get all appservice plans from RG 79 client = web_client_factory(cmd.cli_ctx) 80 plans = list(client.app_service_plans.list_by_resource_group(rg_name)) 81 for item in plans: 82 # for Linux if an app with reserved==False exists, ASP doesn't support Linux 83 if is_linux and item.location == location and not item.reserved: 84 return False 85 elif not is_linux and item.location == location and item.reserved: 86 return False 87 return True 88 89 90 def check_if_asp_exists(cmd, rg_name, asp_name): 91 # get all appservice plans from RG 92 client = web_client_factory(cmd.cli_ctx) 93 for item in list(client.app_service_plans.list_by_resource_group(rg_name)): 94 if item.name == asp_name: 95 return True 96 return False 97 98 99 def check_app_exists(cmd, rg_name, app_name): 100 client = web_client_factory(cmd.cli_ctx) 101 for item in list(client.web_apps.list_by_resource_group(rg_name)): 102 if item.name == app_name: 103 return True 104 return False 105 106 107 def get_lang_from_content(src_path): 108 import glob 109 # NODE: package.json should exisit in the application root dir 110 # NETCORE: NETCORE.csproj should exist in the root dir 111 runtime_details_dict = dict.fromkeys(['language', 'file_loc', 'default_sku']) 112 package_json_file = os.path.join(src_path, 'package.json') 113 package_netcore_file = os.path.join(src_path, glob.glob("*.csproj")[0]) 114 if os.path.isfile(package_json_file): 115 runtime_details_dict['language'] = NODE_RUNTIME_NAME 116 runtime_details_dict['file_loc'] = package_json_file 117 runtime_details_dict['default_sku'] = 'S1' 118 elif os.path.isfile(package_netcore_file): 119 runtime_details_dict['language'] = NETCORE_RUNTIME_NAME 120 runtime_details_dict['file_loc'] = package_netcore_file 121 runtime_details_dict['default_sku'] = 'F1' 122 return runtime_details_dict 123 124 125 def parse_netcore_version(file_path): 126 import xml.etree.ElementTree as ET 127 import re 128 version_detected = ['0.0'] 129 parsed_file = ET.parse(file_path) 130 root = parsed_file.getroot() 131 for target_ver in root.iter('TargetFramework'): 132 version_detected = re.findall(r"\d+\.\d+", target_ver.text) 133 # incase of multiple versions detected, return list in descending order 134 version_detected = sorted(version_detected, key=float, reverse=True) 135 return version_detected 136 137 138 def parse_node_version(file_path): 139 import json 140 import re 141 version_detected = ['0.0'] 142 with open(file_path) as data_file: 143 data = [] 144 for d in find_key_in_json(json.load(data_file), 'node'): 145 non_decimal = re.compile(r'[^\d.]+') 146 # remove the string ~ or > that sometimes exists in version value 147 c = non_decimal.sub('', d) 148 # reduce the version to '6.0' from '6.0.0' 149 data.append(c[:3]) 150 version_detected = sorted(data, key=float, reverse=True) 151 return version_detected 152 153 154 def detect_netcore_version_tocreate(detected_ver): 155 if detected_ver in NETCORE_VERSIONS: 156 return detected_ver 157 return NETCORE_VERSION_DEFAULT 158 159 160 def detect_node_version_tocreate(detected_ver): 161 if detected_ver in NODE_VERSIONS: 162 return detected_ver 163 # get major version & get the closest version from supported list 164 major_ver = float(detected_ver.split('.')[0]) 165 if major_ver < 4: 166 return NODE_VERSION_DEFAULT 167 elif major_ver >= 4 and major_ver < 6: 168 return '4.5' 169 elif major_ver >= 6 and major_ver < 8: 170 return '6.9' 171 return NODE_VERSION_DEFAULT 172 173 174 def find_key_in_json(json_data, key): 175 for k, v in json_data.items(): 176 if key in k: 177 yield v 178 elif isinstance(v, dict): 179 for id_val in find_key_in_json(v, key): 180 yield id_val 181 [end of src/webapp/azext_webapp/create_util.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/webapp/azext_webapp/create_util.py b/src/webapp/azext_webapp/create_util.py --- a/src/webapp/azext_webapp/create_util.py +++ b/src/webapp/azext_webapp/create_util.py @@ -106,16 +106,17 @@ def get_lang_from_content(src_path): import glob - # NODE: package.json should exisit in the application root dir - # NETCORE: NETCORE.csproj should exist in the root dir + # NODE: package.json should exist in the application root dir + # NETCORE: *.csproj should exist in the application root dir runtime_details_dict = dict.fromkeys(['language', 'file_loc', 'default_sku']) package_json_file = os.path.join(src_path, 'package.json') - package_netcore_file = os.path.join(src_path, glob.glob("*.csproj")[0]) + package_netcore_glob = glob.glob("*.csproj") if os.path.isfile(package_json_file): runtime_details_dict['language'] = NODE_RUNTIME_NAME runtime_details_dict['file_loc'] = package_json_file runtime_details_dict['default_sku'] = 'S1' - elif os.path.isfile(package_netcore_file): + elif package_netcore_glob: + package_netcore_file = os.path.join(src_path, package_netcore_glob[0]) runtime_details_dict['language'] = NETCORE_RUNTIME_NAME runtime_details_dict['file_loc'] = package_netcore_file runtime_details_dict['default_sku'] = 'F1' @@ -138,7 +139,6 @@ def parse_node_version(file_path): import json import re - version_detected = ['0.0'] with open(file_path) as data_file: data = [] for d in find_key_in_json(json.load(data_file), 'node'): @@ -148,7 +148,7 @@ # reduce the version to '6.0' from '6.0.0' data.append(c[:3]) version_detected = sorted(data, key=float, reverse=True) - return version_detected + return version_detected or ['0.0'] def detect_netcore_version_tocreate(detected_ver):
{"golden_diff": "diff --git a/src/webapp/azext_webapp/create_util.py b/src/webapp/azext_webapp/create_util.py\n--- a/src/webapp/azext_webapp/create_util.py\n+++ b/src/webapp/azext_webapp/create_util.py\n@@ -106,16 +106,17 @@\n \n def get_lang_from_content(src_path):\n import glob\n- # NODE: package.json should exisit in the application root dir\n- # NETCORE: NETCORE.csproj should exist in the root dir\n+ # NODE: package.json should exist in the application root dir\n+ # NETCORE: *.csproj should exist in the application root dir\n runtime_details_dict = dict.fromkeys(['language', 'file_loc', 'default_sku'])\n package_json_file = os.path.join(src_path, 'package.json')\n- package_netcore_file = os.path.join(src_path, glob.glob(\"*.csproj\")[0])\n+ package_netcore_glob = glob.glob(\"*.csproj\")\n if os.path.isfile(package_json_file):\n runtime_details_dict['language'] = NODE_RUNTIME_NAME\n runtime_details_dict['file_loc'] = package_json_file\n runtime_details_dict['default_sku'] = 'S1'\n- elif os.path.isfile(package_netcore_file):\n+ elif package_netcore_glob:\n+ package_netcore_file = os.path.join(src_path, package_netcore_glob[0])\n runtime_details_dict['language'] = NETCORE_RUNTIME_NAME\n runtime_details_dict['file_loc'] = package_netcore_file\n runtime_details_dict['default_sku'] = 'F1'\n@@ -138,7 +139,6 @@\n def parse_node_version(file_path):\n import json\n import re\n- version_detected = ['0.0']\n with open(file_path) as data_file:\n data = []\n for d in find_key_in_json(json.load(data_file), 'node'):\n@@ -148,7 +148,7 @@\n # reduce the version to '6.0' from '6.0.0'\n data.append(c[:3])\n version_detected = sorted(data, key=float, reverse=True)\n- return version_detected\n+ return version_detected or ['0.0']\n \n \n def detect_netcore_version_tocreate(detected_ver):\n", "issue": "az webapp new throws an exception when it can't detect a project type.\n### Extension name (the extension in question)\r\nwebapp\r\n\r\n### Description of issue (in as much detail as possible)\r\nThis should fail gracefully:\r\n\r\n```\r\nbash-4.3# cd /\r\nbash-4.3# ls\r\nappp bin etc lib media proc run srv tmp var\r\nazure-cli dev home linuxrc mnt root sbin sys usr\r\nbash-4.3# az webapp new -n foo\r\nlist index out of range\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/site-packages/knack/cli.py\", line 194, in invoke\r\n cmd_result = self.invocation.execute(args)\r\n File \"/usr/local/lib/python3.6/site-packages/azure/cli/core/commands/__init__.py\", line 331, in execute\r\n six.reraise(*sys.exc_info())\r\n File \"/usr/local/lib/python3.6/site-packages/six.py\", line 693, in reraise\r\n raise value\r\n File \"/usr/local/lib/python3.6/site-packages/azure/cli/core/commands/__init__.py\", line 304, in execute\r\n result = cmd(params)\r\n File \"/usr/local/lib/python3.6/site-packages/azure/cli/core/commands/__init__.py\", line 168, in __call__\r\n return super(AzCliCommand, self).__call__(*args, **kwargs)\r\n File \"/usr/local/lib/python3.6/site-packages/knack/commands.py\", line 109, in __call__\r\n return self.handler(*args, **kwargs)\r\n File \"/usr/local/lib/python3.6/site-packages/azure/cli/core/__init__.py\", line 348, in default_command_handler\r\n result = op(**command_args)\r\n File \"/root/.azure/cliextensions/webapp/azext_webapp/custom.py\", line 50, in create_deploy_webapp\r\n lang_details = get_lang_from_content(src_dir)\r\n File \"/root/.azure/cliextensions/webapp/azext_webapp/create_util.py\", line 113, in get_lang_from_content\r\n package_netcore_file = os.path.join(src_path, glob.glob(\"*.csproj\")[0])\r\nIndexError: list index out of range\r\nbash-4.3# \r\n```\r\n\r\nSteps to reproduce: run `az webapp new -n foo` in a *non-empty* directory that does not contain a `package.json` or a `*.csproj` file.\r\n\n", "before_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nimport os\nimport zipfile\nfrom azure.cli.core.commands.client_factory import get_mgmt_service_client\nfrom azure.mgmt.resource.resources.models import ResourceGroup\nfrom ._constants import (\n NETCORE_VERSION_DEFAULT,\n NETCORE_VERSIONS,\n NODE_VERSION_DEFAULT,\n NODE_VERSIONS,\n NETCORE_RUNTIME_NAME,\n NODE_RUNTIME_NAME)\n\n\ndef _resource_client_factory(cli_ctx, **_):\n from azure.cli.core.profiles import ResourceType\n return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)\n\n\ndef web_client_factory(cli_ctx, **_):\n from azure.mgmt.web import WebSiteManagementClient\n return get_mgmt_service_client(cli_ctx, WebSiteManagementClient)\n\n\ndef zip_contents_from_dir(dirPath, lang):\n relroot = os.path.abspath(os.path.join(dirPath, os.pardir))\n path_and_file = os.path.splitdrive(dirPath)[1]\n file_val = os.path.split(path_and_file)[1]\n zip_file_path = relroot + \"\\\\\" + file_val + \".zip\"\n abs_src = os.path.abspath(dirPath)\n with zipfile.ZipFile(\"{}\".format(zip_file_path), \"w\", zipfile.ZIP_DEFLATED) as zf:\n for dirname, subdirs, files in os.walk(dirPath):\n # skip node_modules folder for Node apps,\n # since zip_deployment will perfom the build operation\n if lang.lower() == NODE_RUNTIME_NAME and 'node_modules' in subdirs:\n subdirs.remove('node_modules')\n elif lang.lower() == NETCORE_RUNTIME_NAME:\n if 'bin' in subdirs:\n subdirs.remove('bin')\n elif 'obj' in subdirs:\n subdirs.remove('obj')\n for filename in files:\n absname = os.path.abspath(os.path.join(dirname, filename))\n arcname = absname[len(abs_src) + 1:]\n zf.write(absname, arcname)\n return zip_file_path\n\n\ndef get_runtime_version_details(file_path, lang_name):\n version_detected = None\n version_to_create = None\n if lang_name.lower() == NETCORE_RUNTIME_NAME:\n # method returns list in DESC, pick the first\n version_detected = parse_netcore_version(file_path)[0]\n version_to_create = detect_netcore_version_tocreate(version_detected)\n elif lang_name.lower() == NODE_RUNTIME_NAME:\n version_detected = parse_node_version(file_path)[0]\n version_to_create = detect_node_version_tocreate(version_detected)\n return {'detected': version_detected, 'to_create': version_to_create}\n\n\ndef create_resource_group(cmd, rg_name, location):\n rcf = _resource_client_factory(cmd.cli_ctx)\n rg_params = ResourceGroup(location=location)\n return rcf.resource_groups.create_or_update(rg_name, rg_params)\n\n\ndef check_resource_group_exists(cmd, rg_name):\n rcf = _resource_client_factory(cmd.cli_ctx)\n return rcf.resource_groups.check_existence(rg_name)\n\n\ndef check_resource_group_supports_os(cmd, rg_name, location, is_linux):\n # get all appservice plans from RG\n client = web_client_factory(cmd.cli_ctx)\n plans = list(client.app_service_plans.list_by_resource_group(rg_name))\n for item in plans:\n # for Linux if an app with reserved==False exists, ASP doesn't support Linux\n if is_linux and item.location == location and not item.reserved:\n return False\n elif not is_linux and item.location == location and item.reserved:\n return False\n return True\n\n\ndef check_if_asp_exists(cmd, rg_name, asp_name):\n # get all appservice plans from RG\n client = web_client_factory(cmd.cli_ctx)\n for item in list(client.app_service_plans.list_by_resource_group(rg_name)):\n if item.name == asp_name:\n return True\n return False\n\n\ndef check_app_exists(cmd, rg_name, app_name):\n client = web_client_factory(cmd.cli_ctx)\n for item in list(client.web_apps.list_by_resource_group(rg_name)):\n if item.name == app_name:\n return True\n return False\n\n\ndef get_lang_from_content(src_path):\n import glob\n # NODE: package.json should exisit in the application root dir\n # NETCORE: NETCORE.csproj should exist in the root dir\n runtime_details_dict = dict.fromkeys(['language', 'file_loc', 'default_sku'])\n package_json_file = os.path.join(src_path, 'package.json')\n package_netcore_file = os.path.join(src_path, glob.glob(\"*.csproj\")[0])\n if os.path.isfile(package_json_file):\n runtime_details_dict['language'] = NODE_RUNTIME_NAME\n runtime_details_dict['file_loc'] = package_json_file\n runtime_details_dict['default_sku'] = 'S1'\n elif os.path.isfile(package_netcore_file):\n runtime_details_dict['language'] = NETCORE_RUNTIME_NAME\n runtime_details_dict['file_loc'] = package_netcore_file\n runtime_details_dict['default_sku'] = 'F1'\n return runtime_details_dict\n\n\ndef parse_netcore_version(file_path):\n import xml.etree.ElementTree as ET\n import re\n version_detected = ['0.0']\n parsed_file = ET.parse(file_path)\n root = parsed_file.getroot()\n for target_ver in root.iter('TargetFramework'):\n version_detected = re.findall(r\"\\d+\\.\\d+\", target_ver.text)\n # incase of multiple versions detected, return list in descending order\n version_detected = sorted(version_detected, key=float, reverse=True)\n return version_detected\n\n\ndef parse_node_version(file_path):\n import json\n import re\n version_detected = ['0.0']\n with open(file_path) as data_file:\n data = []\n for d in find_key_in_json(json.load(data_file), 'node'):\n non_decimal = re.compile(r'[^\\d.]+')\n # remove the string ~ or > that sometimes exists in version value\n c = non_decimal.sub('', d)\n # reduce the version to '6.0' from '6.0.0'\n data.append(c[:3])\n version_detected = sorted(data, key=float, reverse=True)\n return version_detected\n\n\ndef detect_netcore_version_tocreate(detected_ver):\n if detected_ver in NETCORE_VERSIONS:\n return detected_ver\n return NETCORE_VERSION_DEFAULT\n\n\ndef detect_node_version_tocreate(detected_ver):\n if detected_ver in NODE_VERSIONS:\n return detected_ver\n # get major version & get the closest version from supported list\n major_ver = float(detected_ver.split('.')[0])\n if major_ver < 4:\n return NODE_VERSION_DEFAULT\n elif major_ver >= 4 and major_ver < 6:\n return '4.5'\n elif major_ver >= 6 and major_ver < 8:\n return '6.9'\n return NODE_VERSION_DEFAULT\n\n\ndef find_key_in_json(json_data, key):\n for k, v in json_data.items():\n if key in k:\n yield v\n elif isinstance(v, dict):\n for id_val in find_key_in_json(v, key):\n yield id_val\n", "path": "src/webapp/azext_webapp/create_util.py"}]}
3,124
499
gh_patches_debug_41802
rasdani/github-patches
git_diff
ESMCI__cime-1034
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> check_input_data not supporting input files outside of input data repo @erichlf has asked for support for the following use case: * Create a partition file in his home directory * Point namelist files to use this partition file * check_input_data should find this file but it doesn't because it insists on looking in the input data repo </issue> <code> [start of utils/python/CIME/check_input_data.py] 1 """ 2 API for checking input for testcase 3 """ 4 5 from CIME.XML.standard_module_setup import * 6 from CIME.utils import get_model, SharedArea 7 8 import fnmatch, glob, shutil 9 10 logger = logging.getLogger(__name__) 11 12 # Should probably be in XML somewhere 13 SVN_LOCS = { 14 "acme" : "https://acme-svn2.ornl.gov/acme-repo/acme/inputdata", 15 "cesm" : "https://svn-ccsm-inputdata.cgd.ucar.edu/trunk/inputdata" 16 } 17 18 def find_files(rootdir, pattern): 19 """ 20 recursively find all files matching a pattern 21 """ 22 result = [] 23 for root, _, files in os.walk(rootdir): 24 for filename in files: 25 if (fnmatch.fnmatch(filename, pattern)): 26 result.append(os.path.join(root, filename)) 27 28 return result 29 30 def download_if_in_repo(svn_loc, input_data_root, rel_path): 31 """ 32 Return True if successfully downloaded 33 """ 34 rel_path = rel_path.strip('/') 35 full_url = os.path.join(svn_loc, rel_path) 36 37 full_path = os.path.join(input_data_root, rel_path) 38 logging.info("Trying to download file: '%s' to path '%s'" % (full_url, full_path)) 39 # Make sure local path exists, create if it does not 40 if(not os.path.exists(os.path.dirname(full_path))): 41 os.makedirs(os.path.dirname(full_path)) 42 43 stat, out, err = run_cmd("svn --non-interactive --trust-server-cert ls %s" % full_url) 44 if (stat != 0): 45 logging.warning("FAIL: SVN repo '%s' does not have file '%s'\nReason:%s\n%s\n" % (svn_loc, full_url, out, err)) 46 return False 47 else: 48 # Use umask to make sure files are group read/writable. As long as parent directories 49 # have +s, then everything should work. 50 with SharedArea(): 51 stat, output, errput = \ 52 run_cmd("svn --non-interactive --trust-server-cert export %s %s" % (full_url, full_path)) 53 if (stat != 0): 54 logging.warning("svn export failed with output: %s and errput %s\n" % (output, errput)) 55 return False 56 else: 57 logging.info("SUCCESS\n") 58 return True 59 60 ############################################################################### 61 def check_all_input_data(case): 62 ############################################################################### 63 64 success = check_input_data(case=case, download=True) 65 expect(success, "Failed to download input data") 66 67 get_refcase = case.get_value("GET_REFCASE") 68 run_type = case.get_value("RUN_TYPE") 69 continue_run = case.get_value("CONTINUE_RUN") 70 71 # We do not fully populate the inputdata directory on every 72 # machine and do not expect every user to download the 3TB+ of 73 # data in our inputdata repository. This code checks for the 74 # existence of inputdata in the local inputdata directory and 75 # attempts to download data from the server if it's needed and 76 # missing. 77 if get_refcase and run_type != "startup" and not continue_run: 78 din_loc_root = case.get_value("DIN_LOC_ROOT") 79 run_refdate = case.get_value("RUN_REFDATE") 80 run_refcase = case.get_value("RUN_REFCASE") 81 run_refdir = case.get_value("RUN_REFDIR") 82 rundir = case.get_value("RUNDIR") 83 84 refdir = os.path.join(din_loc_root, run_refdir, run_refcase, run_refdate) 85 expect(os.path.isdir(refdir), 86 """ 87 ***************************************************************** 88 prestage ERROR: $refdir is not on local disk 89 obtain this data from the svn input data repository 90 > mkdir -p %s 91 > cd %s 92 > cd .. 93 > svn export --force https://svn-ccsm-inputdata.cgd.ucar.edu/trunk/inputdata/%s 94 or set GET_REFCASE to FALSE in env_run.xml 95 and prestage the restart data to $RUNDIR manually 96 *****************************************************************""" % (refdir, refdir, refdir)) 97 98 logger.info(" - Prestaging REFCASE (%s) to %s" % (refdir, rundir)) 99 100 # prestage the reference case's files. 101 102 if (not os.path.exists(rundir)): 103 logger.debug("Creating run directory: %s"%rundir) 104 os.makedirs(rundir) 105 106 for rcfile in glob.iglob(os.path.join(refdir,"*%s*"%run_refcase)): 107 logger.debug("Staging file %s"%rcfile) 108 rcbaseline = os.path.basename(rcfile) 109 if not os.path.exists("%s/%s" % (rundir, rcbaseline)): 110 os.symlink(rcfile, "%s/%s" % ((rundir, rcbaseline))) 111 112 # copy the refcases' rpointer files to the run directory 113 for rpointerfile in glob.iglob(os.path.join("%s","*rpointer*") % (refdir)): 114 logger.debug("Copy rpointer %s"%rpointerfile) 115 shutil.copy(rpointerfile, rundir) 116 117 118 for cam2file in glob.iglob(os.path.join("%s","*.cam2.*") % rundir): 119 camfile = cam2file.replace("cam2", "cam") 120 os.symlink(cam2file, camfile) 121 122 def check_input_data(case, svn_loc=None, input_data_root=None, data_list_dir="Buildconf", download=False): 123 """ 124 Return True if no files missing 125 """ 126 # Fill in defaults as needed 127 svn_loc = SVN_LOCS[get_model()] if svn_loc is None else svn_loc 128 input_data_root = case.get_value("DIN_LOC_ROOT") if input_data_root is None else input_data_root 129 130 expect(os.path.isdir(input_data_root), "Invalid input_data_root directory: '%s'" % input_data_root) 131 expect(os.path.isdir(data_list_dir), "Invalid data_list_dir directory: '%s'" % data_list_dir) 132 133 data_list_files = find_files(data_list_dir, "*.input_data_list") 134 expect(data_list_files, "No .input_data_list files found in dir '%s'" % data_list_dir) 135 136 no_files_missing = True 137 for data_list_file in data_list_files: 138 logging.info("Loading input file: '%s'" % data_list_file) 139 with open(data_list_file, "r") as fd: 140 lines = fd.readlines() 141 142 for line in lines: 143 line = line.strip() 144 if (line and not line.startswith("#")): 145 tokens = line.split('=') 146 description, full_path = tokens[0].strip(), tokens[1].strip() 147 if(full_path): 148 # expand xml variables 149 full_path = case.get_resolved_value(full_path) 150 rel_path = full_path.replace(input_data_root, "") 151 152 # There are some special values of rel_path that 153 # we need to ignore - some of the component models 154 # set things like 'NULL' or 'same_as_TS' - 155 # basically if rel_path does not contain '/' (a 156 # directory tree) you can assume it's a special 157 # value and ignore it (perhaps with a warning) 158 if ("/" in rel_path and not os.path.exists(full_path)): 159 model = os.path.basename(data_list_file).split('.')[0] 160 logging.warning("Model %s missing file %s = '%s'" % (model,description,full_path)) 161 162 if (download): 163 success = download_if_in_repo(svn_loc, input_data_root, rel_path) 164 if (not success): 165 # If ACME, try CESM repo as backup 166 if (get_model() == "acme" and svn_loc != SVN_LOCS["cesm"]): 167 success = download_if_in_repo(SVN_LOCS["cesm"], input_data_root, rel_path) 168 if (not success): 169 no_files_missing = False 170 else: 171 no_files_missing = False 172 # if not download 173 else: 174 no_files_missing = False 175 else: 176 logging.debug("Already had input file: '%s'" % full_path) 177 178 else: 179 model = os.path.basename(data_list_file).split('.')[0] 180 logging.warning("Model %s no file specified for %s"%(model,description)) 181 182 return no_files_missing 183 [end of utils/python/CIME/check_input_data.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/utils/python/CIME/check_input_data.py b/utils/python/CIME/check_input_data.py --- a/utils/python/CIME/check_input_data.py +++ b/utils/python/CIME/check_input_data.py @@ -148,32 +148,45 @@ # expand xml variables full_path = case.get_resolved_value(full_path) rel_path = full_path.replace(input_data_root, "") + model = os.path.basename(data_list_file).split('.')[0] - # There are some special values of rel_path that - # we need to ignore - some of the component models - # set things like 'NULL' or 'same_as_TS' - - # basically if rel_path does not contain '/' (a - # directory tree) you can assume it's a special - # value and ignore it (perhaps with a warning) - if ("/" in rel_path and not os.path.exists(full_path)): - model = os.path.basename(data_list_file).split('.')[0] - logging.warning("Model %s missing file %s = '%s'" % (model,description,full_path)) - - if (download): - success = download_if_in_repo(svn_loc, input_data_root, rel_path) - if (not success): - # If ACME, try CESM repo as backup - if (get_model() == "acme" and svn_loc != SVN_LOCS["cesm"]): - success = download_if_in_repo(SVN_LOCS["cesm"], input_data_root, rel_path) - if (not success): - no_files_missing = False - else: - no_files_missing = False - # if not download - else: + if ("/" in rel_path and rel_path == full_path): + # User pointing to a file outside of input_data_root, we cannot determine + # rel_path, and so cannot download the file. If it already exists, we can + # proceed + if not os.path.exists(full_path): + logging.warning(" Model %s missing file %s = '%s'" % (model, description, full_path)) + if download: + logging.warning(" Cannot download file since it lives outside of the input_data_root '%s'" % input_data_root) no_files_missing = False + else: + logging.info(" Found input file: '%s'" % full_path) + else: - logging.debug("Already had input file: '%s'" % full_path) + # There are some special values of rel_path that + # we need to ignore - some of the component models + # set things like 'NULL' or 'same_as_TS' - + # basically if rel_path does not contain '/' (a + # directory tree) you can assume it's a special + # value and ignore it (perhaps with a warning) + if ("/" in rel_path and not os.path.exists(full_path)): + logging.warning(" Model %s missing file %s = '%s'" % (model,description,full_path)) + + if (download): + success = download_if_in_repo(svn_loc, input_data_root, rel_path) + if (not success): + # If ACME, try CESM repo as backup + if (get_model() == "acme" and svn_loc != SVN_LOCS["cesm"]): + success = download_if_in_repo(SVN_LOCS["cesm"], input_data_root, rel_path) + if (not success): + no_files_missing = False + else: + no_files_missing = False + # if not download + else: + no_files_missing = False + else: + logging.info(" Already had input file: '%s'" % full_path) else: model = os.path.basename(data_list_file).split('.')[0]
{"golden_diff": "diff --git a/utils/python/CIME/check_input_data.py b/utils/python/CIME/check_input_data.py\n--- a/utils/python/CIME/check_input_data.py\n+++ b/utils/python/CIME/check_input_data.py\n@@ -148,32 +148,45 @@\n # expand xml variables\n full_path = case.get_resolved_value(full_path)\n rel_path = full_path.replace(input_data_root, \"\")\n+ model = os.path.basename(data_list_file).split('.')[0]\n \n- # There are some special values of rel_path that\n- # we need to ignore - some of the component models\n- # set things like 'NULL' or 'same_as_TS' -\n- # basically if rel_path does not contain '/' (a\n- # directory tree) you can assume it's a special\n- # value and ignore it (perhaps with a warning)\n- if (\"/\" in rel_path and not os.path.exists(full_path)):\n- model = os.path.basename(data_list_file).split('.')[0]\n- logging.warning(\"Model %s missing file %s = '%s'\" % (model,description,full_path))\n-\n- if (download):\n- success = download_if_in_repo(svn_loc, input_data_root, rel_path)\n- if (not success):\n- # If ACME, try CESM repo as backup\n- if (get_model() == \"acme\" and svn_loc != SVN_LOCS[\"cesm\"]):\n- success = download_if_in_repo(SVN_LOCS[\"cesm\"], input_data_root, rel_path)\n- if (not success):\n- no_files_missing = False\n- else:\n- no_files_missing = False\n- # if not download\n- else:\n+ if (\"/\" in rel_path and rel_path == full_path):\n+ # User pointing to a file outside of input_data_root, we cannot determine\n+ # rel_path, and so cannot download the file. If it already exists, we can\n+ # proceed\n+ if not os.path.exists(full_path):\n+ logging.warning(\" Model %s missing file %s = '%s'\" % (model, description, full_path))\n+ if download:\n+ logging.warning(\" Cannot download file since it lives outside of the input_data_root '%s'\" % input_data_root)\n no_files_missing = False\n+ else:\n+ logging.info(\" Found input file: '%s'\" % full_path)\n+\n else:\n- logging.debug(\"Already had input file: '%s'\" % full_path)\n+ # There are some special values of rel_path that\n+ # we need to ignore - some of the component models\n+ # set things like 'NULL' or 'same_as_TS' -\n+ # basically if rel_path does not contain '/' (a\n+ # directory tree) you can assume it's a special\n+ # value and ignore it (perhaps with a warning)\n+ if (\"/\" in rel_path and not os.path.exists(full_path)):\n+ logging.warning(\" Model %s missing file %s = '%s'\" % (model,description,full_path))\n+\n+ if (download):\n+ success = download_if_in_repo(svn_loc, input_data_root, rel_path)\n+ if (not success):\n+ # If ACME, try CESM repo as backup\n+ if (get_model() == \"acme\" and svn_loc != SVN_LOCS[\"cesm\"]):\n+ success = download_if_in_repo(SVN_LOCS[\"cesm\"], input_data_root, rel_path)\n+ if (not success):\n+ no_files_missing = False\n+ else:\n+ no_files_missing = False\n+ # if not download\n+ else:\n+ no_files_missing = False\n+ else:\n+ logging.info(\" Already had input file: '%s'\" % full_path)\n \n else:\n model = os.path.basename(data_list_file).split('.')[0]\n", "issue": "check_input_data not supporting input files outside of input data repo\n@erichlf has asked for support for the following use case:\r\n* Create a partition file in his home directory\r\n* Point namelist files to use this partition file\r\n* check_input_data should find this file but it doesn't because it insists on looking in the input data repo\n", "before_files": [{"content": "\"\"\"\nAPI for checking input for testcase\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import get_model, SharedArea\n\nimport fnmatch, glob, shutil\n\nlogger = logging.getLogger(__name__)\n\n# Should probably be in XML somewhere\nSVN_LOCS = {\n \"acme\" : \"https://acme-svn2.ornl.gov/acme-repo/acme/inputdata\",\n \"cesm\" : \"https://svn-ccsm-inputdata.cgd.ucar.edu/trunk/inputdata\"\n}\n\ndef find_files(rootdir, pattern):\n \"\"\"\n recursively find all files matching a pattern\n \"\"\"\n result = []\n for root, _, files in os.walk(rootdir):\n for filename in files:\n if (fnmatch.fnmatch(filename, pattern)):\n result.append(os.path.join(root, filename))\n\n return result\n\ndef download_if_in_repo(svn_loc, input_data_root, rel_path):\n \"\"\"\n Return True if successfully downloaded\n \"\"\"\n rel_path = rel_path.strip('/')\n full_url = os.path.join(svn_loc, rel_path)\n\n full_path = os.path.join(input_data_root, rel_path)\n logging.info(\"Trying to download file: '%s' to path '%s'\" % (full_url, full_path))\n # Make sure local path exists, create if it does not\n if(not os.path.exists(os.path.dirname(full_path))):\n os.makedirs(os.path.dirname(full_path))\n\n stat, out, err = run_cmd(\"svn --non-interactive --trust-server-cert ls %s\" % full_url)\n if (stat != 0):\n logging.warning(\"FAIL: SVN repo '%s' does not have file '%s'\\nReason:%s\\n%s\\n\" % (svn_loc, full_url, out, err))\n return False\n else:\n # Use umask to make sure files are group read/writable. As long as parent directories\n # have +s, then everything should work.\n with SharedArea():\n stat, output, errput = \\\n run_cmd(\"svn --non-interactive --trust-server-cert export %s %s\" % (full_url, full_path))\n if (stat != 0):\n logging.warning(\"svn export failed with output: %s and errput %s\\n\" % (output, errput))\n return False\n else:\n logging.info(\"SUCCESS\\n\")\n return True\n\n###############################################################################\ndef check_all_input_data(case):\n###############################################################################\n\n success = check_input_data(case=case, download=True)\n expect(success, \"Failed to download input data\")\n\n get_refcase = case.get_value(\"GET_REFCASE\")\n run_type = case.get_value(\"RUN_TYPE\")\n continue_run = case.get_value(\"CONTINUE_RUN\")\n\n # We do not fully populate the inputdata directory on every\n # machine and do not expect every user to download the 3TB+ of\n # data in our inputdata repository. This code checks for the\n # existence of inputdata in the local inputdata directory and\n # attempts to download data from the server if it's needed and\n # missing.\n if get_refcase and run_type != \"startup\" and not continue_run:\n din_loc_root = case.get_value(\"DIN_LOC_ROOT\")\n run_refdate = case.get_value(\"RUN_REFDATE\")\n run_refcase = case.get_value(\"RUN_REFCASE\")\n run_refdir = case.get_value(\"RUN_REFDIR\")\n rundir = case.get_value(\"RUNDIR\")\n\n refdir = os.path.join(din_loc_root, run_refdir, run_refcase, run_refdate)\n expect(os.path.isdir(refdir),\n\"\"\"\n*****************************************************************\nprestage ERROR: $refdir is not on local disk\nobtain this data from the svn input data repository\n> mkdir -p %s\n> cd %s\n> cd ..\n> svn export --force https://svn-ccsm-inputdata.cgd.ucar.edu/trunk/inputdata/%s\nor set GET_REFCASE to FALSE in env_run.xml\nand prestage the restart data to $RUNDIR manually\n*****************************************************************\"\"\" % (refdir, refdir, refdir))\n\n logger.info(\" - Prestaging REFCASE (%s) to %s\" % (refdir, rundir))\n\n # prestage the reference case's files.\n\n if (not os.path.exists(rundir)):\n logger.debug(\"Creating run directory: %s\"%rundir)\n os.makedirs(rundir)\n\n for rcfile in glob.iglob(os.path.join(refdir,\"*%s*\"%run_refcase)):\n logger.debug(\"Staging file %s\"%rcfile)\n rcbaseline = os.path.basename(rcfile)\n if not os.path.exists(\"%s/%s\" % (rundir, rcbaseline)):\n os.symlink(rcfile, \"%s/%s\" % ((rundir, rcbaseline)))\n\n # copy the refcases' rpointer files to the run directory\n for rpointerfile in glob.iglob(os.path.join(\"%s\",\"*rpointer*\") % (refdir)):\n logger.debug(\"Copy rpointer %s\"%rpointerfile)\n shutil.copy(rpointerfile, rundir)\n\n\n for cam2file in glob.iglob(os.path.join(\"%s\",\"*.cam2.*\") % rundir):\n camfile = cam2file.replace(\"cam2\", \"cam\")\n os.symlink(cam2file, camfile)\n\ndef check_input_data(case, svn_loc=None, input_data_root=None, data_list_dir=\"Buildconf\", download=False):\n \"\"\"\n Return True if no files missing\n \"\"\"\n # Fill in defaults as needed\n svn_loc = SVN_LOCS[get_model()] if svn_loc is None else svn_loc\n input_data_root = case.get_value(\"DIN_LOC_ROOT\") if input_data_root is None else input_data_root\n\n expect(os.path.isdir(input_data_root), \"Invalid input_data_root directory: '%s'\" % input_data_root)\n expect(os.path.isdir(data_list_dir), \"Invalid data_list_dir directory: '%s'\" % data_list_dir)\n\n data_list_files = find_files(data_list_dir, \"*.input_data_list\")\n expect(data_list_files, \"No .input_data_list files found in dir '%s'\" % data_list_dir)\n\n no_files_missing = True\n for data_list_file in data_list_files:\n logging.info(\"Loading input file: '%s'\" % data_list_file)\n with open(data_list_file, \"r\") as fd:\n lines = fd.readlines()\n\n for line in lines:\n line = line.strip()\n if (line and not line.startswith(\"#\")):\n tokens = line.split('=')\n description, full_path = tokens[0].strip(), tokens[1].strip()\n if(full_path):\n # expand xml variables\n full_path = case.get_resolved_value(full_path)\n rel_path = full_path.replace(input_data_root, \"\")\n\n # There are some special values of rel_path that\n # we need to ignore - some of the component models\n # set things like 'NULL' or 'same_as_TS' -\n # basically if rel_path does not contain '/' (a\n # directory tree) you can assume it's a special\n # value and ignore it (perhaps with a warning)\n if (\"/\" in rel_path and not os.path.exists(full_path)):\n model = os.path.basename(data_list_file).split('.')[0]\n logging.warning(\"Model %s missing file %s = '%s'\" % (model,description,full_path))\n\n if (download):\n success = download_if_in_repo(svn_loc, input_data_root, rel_path)\n if (not success):\n # If ACME, try CESM repo as backup\n if (get_model() == \"acme\" and svn_loc != SVN_LOCS[\"cesm\"]):\n success = download_if_in_repo(SVN_LOCS[\"cesm\"], input_data_root, rel_path)\n if (not success):\n no_files_missing = False\n else:\n no_files_missing = False\n # if not download\n else:\n no_files_missing = False\n else:\n logging.debug(\"Already had input file: '%s'\" % full_path)\n\n else:\n model = os.path.basename(data_list_file).split('.')[0]\n logging.warning(\"Model %s no file specified for %s\"%(model,description))\n\n return no_files_missing\n", "path": "utils/python/CIME/check_input_data.py"}]}
2,872
860
gh_patches_debug_17031
rasdani/github-patches
git_diff
paperless-ngx__paperless-ngx-680
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Mail rule filter attachment filename is case sensitive ### Description The "Mail rules" interface says the Filter attachment filename should be case insensitive: > Only consume documents which entirely match this filename if specified. Wildcards such as *.pdf or \*invoice\* are allowed. Case insensitive. The latest revision of mail.py uses fnmatch, which follows the operating system's rules for case sensitivity: https://github.com/paperless-ngx/paperless-ngx/blob/a728502988fe26da9aa2844916dcfcc2455d0be2/src/paperless_mail/mail.py#L286-L288 Issue ported from https://github.com/jonaswinkler/paperless-ng/issues/1394 ### Expected behavior Mail attachments should be matched case insensitive to the Filter attachment filename. ### Steps to reproduce 1. Set up paperless to consume from a mailbox 2. Set up a mail rule with a filter attachment filename of *.pdf 3. Send an attachment ending in *.PDF ### Webserver logs _No response_ ### Screenshots _No response_ ### Paperless-ngx version 1.6.0 ### Host OS Oracle Linux 8 ### Installation method Docker ### Browser _No response_ ### Configuration changes _No response_ ### Other _No response_ </issue> <code> [start of src/paperless_mail/mail.py] 1 import os 2 import tempfile 3 from datetime import date 4 from datetime import timedelta 5 from fnmatch import fnmatch 6 7 import magic 8 import pathvalidate 9 from django.conf import settings 10 from django.db import DatabaseError 11 from django_q.tasks import async_task 12 from documents.loggers import LoggingMixin 13 from documents.models import Correspondent 14 from documents.parsers import is_mime_type_supported 15 from imap_tools import AND 16 from imap_tools import MailBox 17 from imap_tools import MailboxFolderSelectError 18 from imap_tools import MailBoxUnencrypted 19 from imap_tools import MailMessage 20 from imap_tools import MailMessageFlags 21 from paperless_mail.models import MailAccount 22 from paperless_mail.models import MailRule 23 24 25 class MailError(Exception): 26 pass 27 28 29 class BaseMailAction: 30 def get_criteria(self): 31 return {} 32 33 def post_consume(self, M, message_uids, parameter): 34 pass # pragma: nocover 35 36 37 class DeleteMailAction(BaseMailAction): 38 def post_consume(self, M, message_uids, parameter): 39 M.delete(message_uids) 40 41 42 class MarkReadMailAction(BaseMailAction): 43 def get_criteria(self): 44 return {"seen": False} 45 46 def post_consume(self, M, message_uids, parameter): 47 M.flag(message_uids, [MailMessageFlags.SEEN], True) 48 49 50 class MoveMailAction(BaseMailAction): 51 def post_consume(self, M, message_uids, parameter): 52 M.move(message_uids, parameter) 53 54 55 class FlagMailAction(BaseMailAction): 56 def get_criteria(self): 57 return {"flagged": False} 58 59 def post_consume(self, M, message_uids, parameter): 60 M.flag(message_uids, [MailMessageFlags.FLAGGED], True) 61 62 63 def get_rule_action(rule): 64 if rule.action == MailRule.ACTION_FLAG: 65 return FlagMailAction() 66 elif rule.action == MailRule.ACTION_DELETE: 67 return DeleteMailAction() 68 elif rule.action == MailRule.ACTION_MOVE: 69 return MoveMailAction() 70 elif rule.action == MailRule.ACTION_MARK_READ: 71 return MarkReadMailAction() 72 else: 73 raise NotImplementedError("Unknown action.") # pragma: nocover 74 75 76 def make_criterias(rule): 77 maximum_age = date.today() - timedelta(days=rule.maximum_age) 78 criterias = {} 79 if rule.maximum_age > 0: 80 criterias["date_gte"] = maximum_age 81 if rule.filter_from: 82 criterias["from_"] = rule.filter_from 83 if rule.filter_subject: 84 criterias["subject"] = rule.filter_subject 85 if rule.filter_body: 86 criterias["body"] = rule.filter_body 87 88 return {**criterias, **get_rule_action(rule).get_criteria()} 89 90 91 def get_mailbox(server, port, security): 92 if security == MailAccount.IMAP_SECURITY_NONE: 93 mailbox = MailBoxUnencrypted(server, port) 94 elif security == MailAccount.IMAP_SECURITY_STARTTLS: 95 mailbox = MailBox(server, port, starttls=True) 96 elif security == MailAccount.IMAP_SECURITY_SSL: 97 mailbox = MailBox(server, port) 98 else: 99 raise NotImplementedError("Unknown IMAP security") # pragma: nocover 100 return mailbox 101 102 103 class MailAccountHandler(LoggingMixin): 104 105 logging_name = "paperless_mail" 106 107 def _correspondent_from_name(self, name): 108 try: 109 return Correspondent.objects.get_or_create(name=name)[0] 110 except DatabaseError as e: 111 self.log("error", f"Error while retrieving correspondent {name}: {e}") 112 return None 113 114 def get_title(self, message, att, rule): 115 if rule.assign_title_from == MailRule.TITLE_FROM_SUBJECT: 116 return message.subject 117 118 elif rule.assign_title_from == MailRule.TITLE_FROM_FILENAME: 119 return os.path.splitext(os.path.basename(att.filename))[0] 120 121 else: 122 raise NotImplementedError( 123 "Unknown title selector.", 124 ) # pragma: nocover 125 126 def get_correspondent(self, message: MailMessage, rule): 127 c_from = rule.assign_correspondent_from 128 129 if c_from == MailRule.CORRESPONDENT_FROM_NOTHING: 130 return None 131 132 elif c_from == MailRule.CORRESPONDENT_FROM_EMAIL: 133 return self._correspondent_from_name(message.from_) 134 135 elif c_from == MailRule.CORRESPONDENT_FROM_NAME: 136 from_values = message.from_values 137 if from_values is not None and len(from_values.name) > 0: 138 return self._correspondent_from_name(from_values.name) 139 else: 140 return self._correspondent_from_name(message.from_) 141 142 elif c_from == MailRule.CORRESPONDENT_FROM_CUSTOM: 143 return rule.assign_correspondent 144 145 else: 146 raise NotImplementedError( 147 "Unknwown correspondent selector", 148 ) # pragma: nocover 149 150 def handle_mail_account(self, account): 151 152 self.renew_logging_group() 153 154 self.log("debug", f"Processing mail account {account}") 155 156 total_processed_files = 0 157 158 with get_mailbox( 159 account.imap_server, 160 account.imap_port, 161 account.imap_security, 162 ) as M: 163 164 try: 165 M.login(account.username, account.password) 166 except Exception: 167 raise MailError(f"Error while authenticating account {account}") 168 169 self.log( 170 "debug", 171 f"Account {account}: Processing " f"{account.rules.count()} rule(s)", 172 ) 173 174 for rule in account.rules.order_by("order"): 175 try: 176 total_processed_files += self.handle_mail_rule(M, rule) 177 except Exception as e: 178 self.log( 179 "error", 180 f"Rule {rule}: Error while processing rule: {e}", 181 exc_info=True, 182 ) 183 184 return total_processed_files 185 186 def handle_mail_rule(self, M, rule): 187 188 self.log("debug", f"Rule {rule}: Selecting folder {rule.folder}") 189 190 try: 191 M.folder.set(rule.folder) 192 except MailboxFolderSelectError: 193 raise MailError( 194 f"Rule {rule}: Folder {rule.folder} " 195 f"does not exist in account {rule.account}", 196 ) 197 198 criterias = make_criterias(rule) 199 200 self.log( 201 "debug", 202 f"Rule {rule}: Searching folder with criteria " f"{str(AND(**criterias))}", 203 ) 204 205 try: 206 messages = M.fetch( 207 criteria=AND(**criterias), 208 mark_seen=False, 209 charset=rule.account.character_set, 210 ) 211 except Exception: 212 raise MailError(f"Rule {rule}: Error while fetching folder {rule.folder}") 213 214 post_consume_messages = [] 215 216 mails_processed = 0 217 total_processed_files = 0 218 219 for message in messages: 220 try: 221 processed_files = self.handle_message(message, rule) 222 if processed_files > 0: 223 post_consume_messages.append(message.uid) 224 225 total_processed_files += processed_files 226 mails_processed += 1 227 except Exception as e: 228 self.log( 229 "error", 230 f"Rule {rule}: Error while processing mail " f"{message.uid}: {e}", 231 exc_info=True, 232 ) 233 234 self.log("debug", f"Rule {rule}: Processed {mails_processed} matching mail(s)") 235 236 self.log( 237 "debug", 238 f"Rule {rule}: Running mail actions on " 239 f"{len(post_consume_messages)} mails", 240 ) 241 242 try: 243 get_rule_action(rule).post_consume( 244 M, 245 post_consume_messages, 246 rule.action_parameter, 247 ) 248 249 except Exception as e: 250 raise MailError( 251 f"Rule {rule}: Error while processing post-consume actions: " f"{e}", 252 ) 253 254 return total_processed_files 255 256 def handle_message(self, message, rule): 257 if not message.attachments: 258 return 0 259 260 self.log( 261 "debug", 262 f"Rule {rule}: " 263 f"Processing mail {message.subject} from {message.from_} with " 264 f"{len(message.attachments)} attachment(s)", 265 ) 266 267 correspondent = self.get_correspondent(message, rule) 268 tag = rule.assign_tag 269 doc_type = rule.assign_document_type 270 271 processed_attachments = 0 272 273 for att in message.attachments: 274 275 if ( 276 not att.content_disposition == "attachment" 277 and rule.attachment_type == MailRule.ATTACHMENT_TYPE_ATTACHMENTS_ONLY 278 ): 279 self.log( 280 "debug", 281 f"Rule {rule}: " 282 f"Skipping attachment {att.filename} " 283 f"with content disposition {att.content_disposition}", 284 ) 285 continue 286 287 if rule.filter_attachment_filename: 288 if not fnmatch(att.filename, rule.filter_attachment_filename): 289 continue 290 291 title = self.get_title(message, att, rule) 292 293 # don't trust the content type of the attachment. Could be 294 # generic application/octet-stream. 295 mime_type = magic.from_buffer(att.payload, mime=True) 296 297 if is_mime_type_supported(mime_type): 298 299 os.makedirs(settings.SCRATCH_DIR, exist_ok=True) 300 _, temp_filename = tempfile.mkstemp( 301 prefix="paperless-mail-", 302 dir=settings.SCRATCH_DIR, 303 ) 304 with open(temp_filename, "wb") as f: 305 f.write(att.payload) 306 307 self.log( 308 "info", 309 f"Rule {rule}: " 310 f"Consuming attachment {att.filename} from mail " 311 f"{message.subject} from {message.from_}", 312 ) 313 314 async_task( 315 "documents.tasks.consume_file", 316 path=temp_filename, 317 override_filename=pathvalidate.sanitize_filename( 318 att.filename, 319 ), 320 override_title=title, 321 override_correspondent_id=correspondent.id 322 if correspondent 323 else None, 324 override_document_type_id=doc_type.id if doc_type else None, 325 override_tag_ids=[tag.id] if tag else None, 326 task_name=att.filename[:100], 327 ) 328 329 processed_attachments += 1 330 else: 331 self.log( 332 "debug", 333 f"Rule {rule}: " 334 f"Skipping attachment {att.filename} " 335 f"since guessed mime type {mime_type} is not supported " 336 f"by paperless", 337 ) 338 339 return processed_attachments 340 [end of src/paperless_mail/mail.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/paperless_mail/mail.py b/src/paperless_mail/mail.py --- a/src/paperless_mail/mail.py +++ b/src/paperless_mail/mail.py @@ -253,7 +253,7 @@ return total_processed_files - def handle_message(self, message, rule): + def handle_message(self, message, rule) -> int: if not message.attachments: return 0 @@ -285,7 +285,12 @@ continue if rule.filter_attachment_filename: - if not fnmatch(att.filename, rule.filter_attachment_filename): + # Force the filename and pattern to the lowercase + # as this is system dependent otherwise + if not fnmatch( + att.filename.lower(), + rule.filter_attachment_filename.lower(), + ): continue title = self.get_title(message, att, rule)
{"golden_diff": "diff --git a/src/paperless_mail/mail.py b/src/paperless_mail/mail.py\n--- a/src/paperless_mail/mail.py\n+++ b/src/paperless_mail/mail.py\n@@ -253,7 +253,7 @@\n \n return total_processed_files\n \n- def handle_message(self, message, rule):\n+ def handle_message(self, message, rule) -> int:\n if not message.attachments:\n return 0\n \n@@ -285,7 +285,12 @@\n continue\n \n if rule.filter_attachment_filename:\n- if not fnmatch(att.filename, rule.filter_attachment_filename):\n+ # Force the filename and pattern to the lowercase\n+ # as this is system dependent otherwise\n+ if not fnmatch(\n+ att.filename.lower(),\n+ rule.filter_attachment_filename.lower(),\n+ ):\n continue\n \n title = self.get_title(message, att, rule)\n", "issue": "[BUG] Mail rule filter attachment filename is case sensitive\n### Description\r\n\r\nThe \"Mail rules\" interface says the Filter attachment filename should be case insensitive:\r\n\r\n> Only consume documents which entirely match this filename if specified. Wildcards such as *.pdf or \\*invoice\\* are allowed. Case insensitive.\r\n\r\nThe latest revision of mail.py uses fnmatch, which follows the operating system's rules for case sensitivity:\r\n\r\nhttps://github.com/paperless-ngx/paperless-ngx/blob/a728502988fe26da9aa2844916dcfcc2455d0be2/src/paperless_mail/mail.py#L286-L288\r\n\r\nIssue ported from https://github.com/jonaswinkler/paperless-ng/issues/1394\r\n\r\n### Expected behavior\r\n\r\nMail attachments should be matched case insensitive to the Filter attachment filename.\r\n\r\n### Steps to reproduce\r\n\r\n1. Set up paperless to consume from a mailbox\r\n2. Set up a mail rule with a filter attachment filename of *.pdf\r\n3. Send an attachment ending in *.PDF\r\n\r\n### Webserver logs\r\n\r\n_No response_\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Paperless-ngx version\r\n\r\n1.6.0\r\n\r\n### Host OS\r\n\r\nOracle Linux 8\r\n\r\n### Installation method\r\n\r\nDocker\r\n\r\n### Browser\r\n\r\n_No response_\r\n\r\n### Configuration changes\r\n\r\n_No response_\r\n\r\n### Other\r\n\r\n_No response_\n", "before_files": [{"content": "import os\nimport tempfile\nfrom datetime import date\nfrom datetime import timedelta\nfrom fnmatch import fnmatch\n\nimport magic\nimport pathvalidate\nfrom django.conf import settings\nfrom django.db import DatabaseError\nfrom django_q.tasks import async_task\nfrom documents.loggers import LoggingMixin\nfrom documents.models import Correspondent\nfrom documents.parsers import is_mime_type_supported\nfrom imap_tools import AND\nfrom imap_tools import MailBox\nfrom imap_tools import MailboxFolderSelectError\nfrom imap_tools import MailBoxUnencrypted\nfrom imap_tools import MailMessage\nfrom imap_tools import MailMessageFlags\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\n\n\nclass MailError(Exception):\n pass\n\n\nclass BaseMailAction:\n def get_criteria(self):\n return {}\n\n def post_consume(self, M, message_uids, parameter):\n pass # pragma: nocover\n\n\nclass DeleteMailAction(BaseMailAction):\n def post_consume(self, M, message_uids, parameter):\n M.delete(message_uids)\n\n\nclass MarkReadMailAction(BaseMailAction):\n def get_criteria(self):\n return {\"seen\": False}\n\n def post_consume(self, M, message_uids, parameter):\n M.flag(message_uids, [MailMessageFlags.SEEN], True)\n\n\nclass MoveMailAction(BaseMailAction):\n def post_consume(self, M, message_uids, parameter):\n M.move(message_uids, parameter)\n\n\nclass FlagMailAction(BaseMailAction):\n def get_criteria(self):\n return {\"flagged\": False}\n\n def post_consume(self, M, message_uids, parameter):\n M.flag(message_uids, [MailMessageFlags.FLAGGED], True)\n\n\ndef get_rule_action(rule):\n if rule.action == MailRule.ACTION_FLAG:\n return FlagMailAction()\n elif rule.action == MailRule.ACTION_DELETE:\n return DeleteMailAction()\n elif rule.action == MailRule.ACTION_MOVE:\n return MoveMailAction()\n elif rule.action == MailRule.ACTION_MARK_READ:\n return MarkReadMailAction()\n else:\n raise NotImplementedError(\"Unknown action.\") # pragma: nocover\n\n\ndef make_criterias(rule):\n maximum_age = date.today() - timedelta(days=rule.maximum_age)\n criterias = {}\n if rule.maximum_age > 0:\n criterias[\"date_gte\"] = maximum_age\n if rule.filter_from:\n criterias[\"from_\"] = rule.filter_from\n if rule.filter_subject:\n criterias[\"subject\"] = rule.filter_subject\n if rule.filter_body:\n criterias[\"body\"] = rule.filter_body\n\n return {**criterias, **get_rule_action(rule).get_criteria()}\n\n\ndef get_mailbox(server, port, security):\n if security == MailAccount.IMAP_SECURITY_NONE:\n mailbox = MailBoxUnencrypted(server, port)\n elif security == MailAccount.IMAP_SECURITY_STARTTLS:\n mailbox = MailBox(server, port, starttls=True)\n elif security == MailAccount.IMAP_SECURITY_SSL:\n mailbox = MailBox(server, port)\n else:\n raise NotImplementedError(\"Unknown IMAP security\") # pragma: nocover\n return mailbox\n\n\nclass MailAccountHandler(LoggingMixin):\n\n logging_name = \"paperless_mail\"\n\n def _correspondent_from_name(self, name):\n try:\n return Correspondent.objects.get_or_create(name=name)[0]\n except DatabaseError as e:\n self.log(\"error\", f\"Error while retrieving correspondent {name}: {e}\")\n return None\n\n def get_title(self, message, att, rule):\n if rule.assign_title_from == MailRule.TITLE_FROM_SUBJECT:\n return message.subject\n\n elif rule.assign_title_from == MailRule.TITLE_FROM_FILENAME:\n return os.path.splitext(os.path.basename(att.filename))[0]\n\n else:\n raise NotImplementedError(\n \"Unknown title selector.\",\n ) # pragma: nocover\n\n def get_correspondent(self, message: MailMessage, rule):\n c_from = rule.assign_correspondent_from\n\n if c_from == MailRule.CORRESPONDENT_FROM_NOTHING:\n return None\n\n elif c_from == MailRule.CORRESPONDENT_FROM_EMAIL:\n return self._correspondent_from_name(message.from_)\n\n elif c_from == MailRule.CORRESPONDENT_FROM_NAME:\n from_values = message.from_values\n if from_values is not None and len(from_values.name) > 0:\n return self._correspondent_from_name(from_values.name)\n else:\n return self._correspondent_from_name(message.from_)\n\n elif c_from == MailRule.CORRESPONDENT_FROM_CUSTOM:\n return rule.assign_correspondent\n\n else:\n raise NotImplementedError(\n \"Unknwown correspondent selector\",\n ) # pragma: nocover\n\n def handle_mail_account(self, account):\n\n self.renew_logging_group()\n\n self.log(\"debug\", f\"Processing mail account {account}\")\n\n total_processed_files = 0\n\n with get_mailbox(\n account.imap_server,\n account.imap_port,\n account.imap_security,\n ) as M:\n\n try:\n M.login(account.username, account.password)\n except Exception:\n raise MailError(f\"Error while authenticating account {account}\")\n\n self.log(\n \"debug\",\n f\"Account {account}: Processing \" f\"{account.rules.count()} rule(s)\",\n )\n\n for rule in account.rules.order_by(\"order\"):\n try:\n total_processed_files += self.handle_mail_rule(M, rule)\n except Exception as e:\n self.log(\n \"error\",\n f\"Rule {rule}: Error while processing rule: {e}\",\n exc_info=True,\n )\n\n return total_processed_files\n\n def handle_mail_rule(self, M, rule):\n\n self.log(\"debug\", f\"Rule {rule}: Selecting folder {rule.folder}\")\n\n try:\n M.folder.set(rule.folder)\n except MailboxFolderSelectError:\n raise MailError(\n f\"Rule {rule}: Folder {rule.folder} \"\n f\"does not exist in account {rule.account}\",\n )\n\n criterias = make_criterias(rule)\n\n self.log(\n \"debug\",\n f\"Rule {rule}: Searching folder with criteria \" f\"{str(AND(**criterias))}\",\n )\n\n try:\n messages = M.fetch(\n criteria=AND(**criterias),\n mark_seen=False,\n charset=rule.account.character_set,\n )\n except Exception:\n raise MailError(f\"Rule {rule}: Error while fetching folder {rule.folder}\")\n\n post_consume_messages = []\n\n mails_processed = 0\n total_processed_files = 0\n\n for message in messages:\n try:\n processed_files = self.handle_message(message, rule)\n if processed_files > 0:\n post_consume_messages.append(message.uid)\n\n total_processed_files += processed_files\n mails_processed += 1\n except Exception as e:\n self.log(\n \"error\",\n f\"Rule {rule}: Error while processing mail \" f\"{message.uid}: {e}\",\n exc_info=True,\n )\n\n self.log(\"debug\", f\"Rule {rule}: Processed {mails_processed} matching mail(s)\")\n\n self.log(\n \"debug\",\n f\"Rule {rule}: Running mail actions on \"\n f\"{len(post_consume_messages)} mails\",\n )\n\n try:\n get_rule_action(rule).post_consume(\n M,\n post_consume_messages,\n rule.action_parameter,\n )\n\n except Exception as e:\n raise MailError(\n f\"Rule {rule}: Error while processing post-consume actions: \" f\"{e}\",\n )\n\n return total_processed_files\n\n def handle_message(self, message, rule):\n if not message.attachments:\n return 0\n\n self.log(\n \"debug\",\n f\"Rule {rule}: \"\n f\"Processing mail {message.subject} from {message.from_} with \"\n f\"{len(message.attachments)} attachment(s)\",\n )\n\n correspondent = self.get_correspondent(message, rule)\n tag = rule.assign_tag\n doc_type = rule.assign_document_type\n\n processed_attachments = 0\n\n for att in message.attachments:\n\n if (\n not att.content_disposition == \"attachment\"\n and rule.attachment_type == MailRule.ATTACHMENT_TYPE_ATTACHMENTS_ONLY\n ):\n self.log(\n \"debug\",\n f\"Rule {rule}: \"\n f\"Skipping attachment {att.filename} \"\n f\"with content disposition {att.content_disposition}\",\n )\n continue\n\n if rule.filter_attachment_filename:\n if not fnmatch(att.filename, rule.filter_attachment_filename):\n continue\n\n title = self.get_title(message, att, rule)\n\n # don't trust the content type of the attachment. Could be\n # generic application/octet-stream.\n mime_type = magic.from_buffer(att.payload, mime=True)\n\n if is_mime_type_supported(mime_type):\n\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n _, temp_filename = tempfile.mkstemp(\n prefix=\"paperless-mail-\",\n dir=settings.SCRATCH_DIR,\n )\n with open(temp_filename, \"wb\") as f:\n f.write(att.payload)\n\n self.log(\n \"info\",\n f\"Rule {rule}: \"\n f\"Consuming attachment {att.filename} from mail \"\n f\"{message.subject} from {message.from_}\",\n )\n\n async_task(\n \"documents.tasks.consume_file\",\n path=temp_filename,\n override_filename=pathvalidate.sanitize_filename(\n att.filename,\n ),\n override_title=title,\n override_correspondent_id=correspondent.id\n if correspondent\n else None,\n override_document_type_id=doc_type.id if doc_type else None,\n override_tag_ids=[tag.id] if tag else None,\n task_name=att.filename[:100],\n )\n\n processed_attachments += 1\n else:\n self.log(\n \"debug\",\n f\"Rule {rule}: \"\n f\"Skipping attachment {att.filename} \"\n f\"since guessed mime type {mime_type} is not supported \"\n f\"by paperless\",\n )\n\n return processed_attachments\n", "path": "src/paperless_mail/mail.py"}]}
3,964
198
gh_patches_debug_4889
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-4990
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove $ before shell commands in docs The developers have a [preference](https://github.com/rtfd/readthedocs.org/pull/4676#discussion_r221400605) to not have a `$` before shell commands in the docs. This makes it easier to copy and paste from our docs. We should remove it everywhere. The following command should show it everywhere. grep -Ri " $ " docs/*.rst docs/*/*.rst </issue> <code> [start of docs/conf.py] 1 # -*- coding: utf-8 -*- 2 3 from __future__ import division, print_function, unicode_literals 4 5 import os 6 import sys 7 8 import sphinx_rtd_theme 9 from recommonmark.parser import CommonMarkParser 10 11 sys.path.insert(0, os.path.abspath('..')) 12 sys.path.append(os.path.dirname(__file__)) 13 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "readthedocs.settings.dev") 14 15 from django.conf import settings 16 from django.utils import timezone 17 18 import django 19 django.setup() 20 21 22 sys.path.append(os.path.abspath('_ext')) 23 extensions = [ 24 'sphinx.ext.autosectionlabel', 25 'sphinx.ext.autodoc', 26 'sphinx.ext.intersphinx', 27 'sphinxcontrib.httpdomain', 28 'djangodocs', 29 'doc_extensions', 30 'sphinx_tabs.tabs', 31 ] 32 templates_path = ['_templates'] 33 34 source_suffix = ['.rst', '.md'] 35 source_parsers = { 36 '.md': CommonMarkParser, 37 } 38 39 master_doc = 'index' 40 project = u'Read the Docs' 41 copyright = '2010-{}, Read the Docs, Inc & contributors'.format( 42 timezone.now().year 43 ) 44 version = '2.7' 45 release = version 46 exclude_patterns = ['_build'] 47 default_role = 'obj' 48 intersphinx_mapping = { 49 'python': ('http://python.readthedocs.io/en/latest/', None), 50 'django': ('http://django.readthedocs.io/en/1.9.x/', None), 51 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None), 52 } 53 htmlhelp_basename = 'ReadTheDocsdoc' 54 latex_documents = [ 55 ('index', 'ReadTheDocs.tex', u'Read the Docs Documentation', 56 u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'), 57 ] 58 man_pages = [ 59 ('index', 'read-the-docs', u'Read the Docs Documentation', 60 [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1) 61 ] 62 63 exclude_patterns = [ 64 # 'api' # needed for ``make gettext`` to not die. 65 ] 66 67 language = 'en' 68 69 locale_dirs = [ 70 'locale/', 71 ] 72 gettext_compact = False 73 74 html_theme = 'sphinx_rtd_theme' 75 html_static_path = ['_static'] 76 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 77 html_logo = 'img/logo.svg' 78 html_theme_options = { 79 'logo_only': True, 80 'display_version': False, 81 } 82 83 # Activate autosectionlabel plugin 84 autosectionlabel_prefix_document = True 85 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -28,6 +28,7 @@ 'djangodocs', 'doc_extensions', 'sphinx_tabs.tabs', + 'sphinx-prompt', ] templates_path = ['_templates'] @@ -82,3 +83,7 @@ # Activate autosectionlabel plugin autosectionlabel_prefix_document = True + + +def setup(app): + app.add_stylesheet('css/sphinx_prompt_css.css')
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -28,6 +28,7 @@\n 'djangodocs',\n 'doc_extensions',\n 'sphinx_tabs.tabs',\n+ 'sphinx-prompt',\n ]\n templates_path = ['_templates']\n \n@@ -82,3 +83,7 @@\n \n # Activate autosectionlabel plugin\n autosectionlabel_prefix_document = True\n+\n+\n+def setup(app):\n+ app.add_stylesheet('css/sphinx_prompt_css.css')\n", "issue": "Remove $ before shell commands in docs\nThe developers have a [preference](https://github.com/rtfd/readthedocs.org/pull/4676#discussion_r221400605) to not have a `$` before shell commands in the docs. This makes it easier to copy and paste from our docs. We should remove it everywhere. The following command should show it everywhere.\r\n\r\n grep -Ri \" $ \" docs/*.rst docs/*/*.rst\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, unicode_literals\n\nimport os\nimport sys\n\nimport sphinx_rtd_theme\nfrom recommonmark.parser import CommonMarkParser\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.dirname(__file__))\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"readthedocs.settings.dev\")\n\nfrom django.conf import settings\nfrom django.utils import timezone\n\nimport django\ndjango.setup()\n\n\nsys.path.append(os.path.abspath('_ext'))\nextensions = [\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.httpdomain',\n 'djangodocs',\n 'doc_extensions',\n 'sphinx_tabs.tabs',\n]\ntemplates_path = ['_templates']\n\nsource_suffix = ['.rst', '.md']\nsource_parsers = {\n '.md': CommonMarkParser,\n}\n\nmaster_doc = 'index'\nproject = u'Read the Docs'\ncopyright = '2010-{}, Read the Docs, Inc & contributors'.format(\n timezone.now().year\n)\nversion = '2.7'\nrelease = version\nexclude_patterns = ['_build']\ndefault_role = 'obj'\nintersphinx_mapping = {\n 'python': ('http://python.readthedocs.io/en/latest/', None),\n 'django': ('http://django.readthedocs.io/en/1.9.x/', None),\n 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None),\n}\nhtmlhelp_basename = 'ReadTheDocsdoc'\nlatex_documents = [\n ('index', 'ReadTheDocs.tex', u'Read the Docs Documentation',\n u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),\n]\nman_pages = [\n ('index', 'read-the-docs', u'Read the Docs Documentation',\n [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1)\n]\n\nexclude_patterns = [\n # 'api' # needed for ``make gettext`` to not die.\n]\n\nlanguage = 'en'\n\nlocale_dirs = [\n 'locale/',\n]\ngettext_compact = False\n\nhtml_theme = 'sphinx_rtd_theme'\nhtml_static_path = ['_static']\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_logo = 'img/logo.svg'\nhtml_theme_options = {\n 'logo_only': True,\n 'display_version': False,\n}\n\n# Activate autosectionlabel plugin\nautosectionlabel_prefix_document = True\n", "path": "docs/conf.py"}]}
1,344
122
gh_patches_debug_36067
rasdani/github-patches
git_diff
mampfes__hacs_waste_collection_schedule-594
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> provide custom HEADERS in ical source Hello, I'm trying to download an ics schedule from my providers homepage. It only works when a "Referer":"<url>" Header is set. Otherwise a 403 Error occure. I manually changed the fixed Header (currently the user-agent) in the ics.py, but that will be overridden in future updates, i guess. My question (or feature request, i suppose) is, is there a way to provide such a custom Header in the source configuration for ics? </issue> <code> [start of custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py] 1 import datetime 2 import logging 3 from os import getcwd 4 from pathlib import Path 5 6 import requests 7 from waste_collection_schedule import Collection # type: ignore[attr-defined] 8 from waste_collection_schedule.service.ICS import ICS 9 from waste_collection_schedule.service.ICS_v1 import ICS_v1 10 11 TITLE = "ICS" 12 DESCRIPTION = "Source for ICS based schedules." 13 URL = None 14 TEST_CASES = { 15 "Dortmund, Dudenstr. 5": { 16 "url": "https://www.edg.de/ical/kalender.ics?Strasse=Dudenstr.&Hausnummer=5&Erinnerung=-1&Abfallart=1,2,3,4" 17 }, 18 "Leipzig, Sandgrubenweg 27": { 19 "url": "https://stadtreinigung-leipzig.de/wir-kommen-zu-ihnen/abfallkalender/ical.ics?position_nos=38296&name=Sandgrubenweg%2027" 20 }, 21 "Ludwigsburg": { 22 "url": "https://www.avl-ludwigsburg.de/fileadmin/Files/Abfallkalender/ICS/Privat/Privat_{%Y}_Ossweil.ics" 23 }, 24 "Esslingen, Bahnhof": { 25 "url": "https://api.abfall.io/?kh=DaA02103019b46345f1998698563DaAd&t=ics&s=1a862df26f6943997cef90233877a4fe" 26 }, 27 "Test File": { 28 # Path is used here to allow to call the Source from any location. 29 # This is not required in a yaml configuration! 30 "file": str(Path(__file__).resolve().parents[1].joinpath("test/test.ics")) 31 }, 32 "Test File (recurring)": { 33 # Path is used here to allow to call the Source from any location. 34 # This is not required in a yaml configuration! 35 "file": str(Path(__file__).resolve().parents[1].joinpath("test/recurring.ics")) 36 }, 37 "München, Bahnstr. 11": { 38 "url": "https://www.awm-muenchen.de/entsorgen/abfuhrkalender?tx_awmabfuhrkalender_abfuhrkalender%5Bhausnummer%5D=11&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BB%5D=1%2F2%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BP%5D=1%2F2%3BG&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BR%5D=001%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bsection%5D=ics&tx_awmabfuhrkalender_abfuhrkalender%5Bsinglestandplatz%5D=false&tx_awmabfuhrkalender_abfuhrkalender%5Bstandplatzwahl%5D=true&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bbio%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bpapier%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Brestmuell%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstrasse%5D=bahnstr.&tx_awmabfuhrkalender_abfuhrkalender%5Byear%5D={%Y}", 39 "version": 1, 40 }, 41 "Buxtehude, Am Berg": { 42 "url": "https://abfall.landkreis-stade.de/api_v2/collection_dates/1/ort/10/strasse/90/hausnummern/1/abfallarten/R02-R04-B02-D04-D12-P04-R12-R14-W0-R22-R24-R31/kalender.ics" 43 }, 44 # "Hausmüllinfo: ASR Chemnitz": { 45 # "url": "https://asc.hausmuell.info/ics/ics.php", 46 # "method": "POST", 47 # "params": { 48 # "hidden_id_egebiet": 439087, 49 # "input_ort": "Chemnitz", 50 # "input_str": "Straße der Nationen", 51 # "input_hnr": 2, 52 # "hidden_send_btn": "ics", 53 # # "hiddenYear": 2021, 54 # "hidden_id_ort": 10, 55 # "hidden_id_ortsteil": 0, 56 # "hidden_id_str": 17814, 57 # "hidden_id_hnr": 5538100, 58 # "hidden_kalenderart": "privat", 59 # "showBinsBio": "on", 60 # "showBinsRest": "on", 61 # "showBinsRest_rc": "on", 62 # "showBinsPapier": "on", 63 # "showBinsOrganic": "on", 64 # "showBinsXmas": "on", 65 # "showBinsDsd": "on", 66 # "showBinsProb": "on", 67 # }, 68 # "year_field": "hiddenYear", 69 # }, 70 "Abfall Zollernalbkreis, Ebingen": { 71 "url": "https://www.abfallkalender-zak.de", 72 "params": { 73 "city": "2,3,4", 74 "street": "3", 75 "types[]": [ 76 "restmuell", 77 "gelbersack", 78 "papiertonne", 79 "biomuell", 80 "gruenabfall", 81 "schadstoffsammlung", 82 "altpapiersammlung", 83 "schrottsammlung", 84 "weihnachtsbaeume", 85 "elektrosammlung", 86 ], 87 "go_ics": "Download", 88 }, 89 "year_field": "year", 90 }, 91 "Detmold": { 92 "url": "https://abfuhrkalender.detmold.de/icsmaker.php", 93 "method": "GET", 94 "params": {"strid": 338}, 95 "year_field": "year", 96 }, 97 "EAW Rheingau Taunus": { 98 "url": "https://www.eaw-rheingau-taunus.de/abfallsammlung/abfuhrtermine/feed.ics?tx_vierwdeaw_garbagecalendarics%5Baction%5D=ics&tx_vierwdeaw_garbagecalendarics%5Bcontroller%5D=GarbageCalendar&tx_vierwdeaw_garbagecalendarics%5Bstreet%5D=38", 99 "split_at": ",", 100 }, 101 "Recollect, Ottawa": { 102 "url": "https://recollect.a.ssl.fastly.net/api/places/BCCDF30E-578B-11E4-AD38-5839C200407A/services/208/events.en.ics", 103 "split_at": "\\, [and ]*", 104 }, 105 "Frankfurt am Main, Achenbachstrasse 3": { 106 "url": "https://www.fes-frankfurt.de/abfallkalender/QWNoZW5iYWNoc3RyLnwzfDYwNTk2.ics" 107 }, 108 "Erlensee, Am Haspel": { 109 "url": "https://sperrmuell.erlensee.de/?type=reminder", 110 "method": "POST", 111 "params": { 112 "street": 8, 113 "eventType[]": [27, 23, 19, 20, 21, 24, 22, 25, 26], 114 "timeframe": 23, 115 "download": "ical", 116 }, 117 }, 118 } 119 120 121 HEADERS = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"} 122 _LOGGER = logging.getLogger(__name__) 123 124 125 class Source: 126 def __init__( 127 self, 128 url=None, 129 file=None, 130 offset=None, 131 params=None, 132 year_field=None, 133 method="GET", 134 regex=None, 135 split_at=None, 136 version=2, 137 verify_ssl=True, 138 ): 139 self._url = url 140 self._file = file 141 if bool(self._url is not None) == bool(self._file is not None): 142 raise RuntimeError("Specify either url or file") 143 if version == 1: 144 self._ics = ICS_v1(offset=offset, split_at=split_at, regex=regex) 145 else: 146 self._ics = ICS(offset=offset, split_at=split_at, regex=regex) 147 self._params = params 148 self._year_field = year_field # replace this field in params with current year 149 self._method = method # The method to send the params 150 self._verify_ssl = verify_ssl 151 152 def fetch(self): 153 if self._url is not None: 154 if "{%Y}" in self._url or self._year_field is not None: 155 # url contains wildcard or params contains year field 156 now = datetime.datetime.now() 157 158 # replace year in url 159 url = self._url.replace("{%Y}", str(now.year)) 160 161 # replace year in params 162 if self._year_field is not None: 163 if self._params is None: 164 raise RuntimeError("year_field specified without params") 165 self._params[self._year_field] = str(now.year) 166 167 entries = self.fetch_url(url, self._params) 168 169 if now.month == 12: 170 # also get data for next year if we are already in december 171 url = self._url.replace("{%Y}", str(now.year + 1)) 172 if self._year_field is not None: 173 self._params[self._year_field] = str(now.year + 1) 174 175 try: 176 entries.extend(self.fetch_url(url, self._params)) 177 except Exception: 178 # ignore if fetch for next year fails 179 pass 180 return entries 181 else: 182 return self.fetch_url(self._url, self._params) 183 elif self._file is not None: 184 return self.fetch_file(self._file) 185 186 def fetch_url(self, url, params=None): 187 # get ics file 188 if self._method == "GET": 189 r = requests.get( 190 url, params=params, headers=HEADERS, verify=self._verify_ssl 191 ) 192 elif self._method == "POST": 193 r = requests.post( 194 url, data=params, headers=HEADERS, verify=self._verify_ssl 195 ) 196 else: 197 raise RuntimeError( 198 "Error: unknown method to fetch URL, use GET or POST; got {self._method}" 199 ) 200 r.raise_for_status() 201 202 r.encoding = "utf-8" # requests doesn't guess the encoding correctly 203 return self._convert(r.text) 204 205 def fetch_file(self, file): 206 try: 207 f = open(file) 208 except FileNotFoundError as e: 209 _LOGGER.error(f"Working directory: '{getcwd()}'") 210 raise 211 return self._convert(f.read()) 212 213 def _convert(self, data): 214 dates = self._ics.convert(data) 215 216 entries = [] 217 for d in dates: 218 entries.append(Collection(d[0], d[1])) 219 return entries 220 [end of custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py --- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py +++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py @@ -19,7 +19,7 @@ "url": "https://stadtreinigung-leipzig.de/wir-kommen-zu-ihnen/abfallkalender/ical.ics?position_nos=38296&name=Sandgrubenweg%2027" }, "Ludwigsburg": { - "url": "https://www.avl-ludwigsburg.de/fileadmin/Files/Abfallkalender/ICS/Privat/Privat_{%Y}_Ossweil.ics" + "url": "https://kundenportal.avl-lb.de/WasteManagementLudwigsburg/WasteManagementServiceServlet?ApplicationName=Calendar&SubmitAction=sync&StandortID=950230001&AboID=8188&Fra=BT;RT;PT;LT;GT" }, "Esslingen, Bahnhof": { "url": "https://api.abfall.io/?kh=DaA02103019b46345f1998698563DaAd&t=ics&s=1a862df26f6943997cef90233877a4fe" @@ -135,6 +135,7 @@ split_at=None, version=2, verify_ssl=True, + headers={}, ): self._url = url self._file = file @@ -148,6 +149,8 @@ self._year_field = year_field # replace this field in params with current year self._method = method # The method to send the params self._verify_ssl = verify_ssl + self._headers = HEADERS + self._headers.update(headers) def fetch(self): if self._url is not None: @@ -187,11 +190,11 @@ # get ics file if self._method == "GET": r = requests.get( - url, params=params, headers=HEADERS, verify=self._verify_ssl + url, params=params, headers=self._headers, verify=self._verify_ssl ) elif self._method == "POST": r = requests.post( - url, data=params, headers=HEADERS, verify=self._verify_ssl + url, data=params, headers=self._headers, verify=self._verify_ssl ) else: raise RuntimeError(
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py\n@@ -19,7 +19,7 @@\n \"url\": \"https://stadtreinigung-leipzig.de/wir-kommen-zu-ihnen/abfallkalender/ical.ics?position_nos=38296&name=Sandgrubenweg%2027\"\n },\n \"Ludwigsburg\": {\n- \"url\": \"https://www.avl-ludwigsburg.de/fileadmin/Files/Abfallkalender/ICS/Privat/Privat_{%Y}_Ossweil.ics\"\n+ \"url\": \"https://kundenportal.avl-lb.de/WasteManagementLudwigsburg/WasteManagementServiceServlet?ApplicationName=Calendar&SubmitAction=sync&StandortID=950230001&AboID=8188&Fra=BT;RT;PT;LT;GT\"\n },\n \"Esslingen, Bahnhof\": {\n \"url\": \"https://api.abfall.io/?kh=DaA02103019b46345f1998698563DaAd&t=ics&s=1a862df26f6943997cef90233877a4fe\"\n@@ -135,6 +135,7 @@\n split_at=None,\n version=2,\n verify_ssl=True,\n+ headers={},\n ):\n self._url = url\n self._file = file\n@@ -148,6 +149,8 @@\n self._year_field = year_field # replace this field in params with current year\n self._method = method # The method to send the params\n self._verify_ssl = verify_ssl\n+ self._headers = HEADERS\n+ self._headers.update(headers)\n \n def fetch(self):\n if self._url is not None:\n@@ -187,11 +190,11 @@\n # get ics file\n if self._method == \"GET\":\n r = requests.get(\n- url, params=params, headers=HEADERS, verify=self._verify_ssl\n+ url, params=params, headers=self._headers, verify=self._verify_ssl\n )\n elif self._method == \"POST\":\n r = requests.post(\n- url, data=params, headers=HEADERS, verify=self._verify_ssl\n+ url, data=params, headers=self._headers, verify=self._verify_ssl\n )\n else:\n raise RuntimeError(\n", "issue": "provide custom HEADERS in ical source\nHello, \r\n\r\nI'm trying to download an ics schedule from my providers homepage. It only works when a \"Referer\":\"<url>\" Header is set. Otherwise a 403 Error occure. I manually changed the fixed Header (currently the user-agent) in the ics.py, but that will be overridden in future updates, i guess.\r\n\r\nMy question (or feature request, i suppose) is, is there a way to provide such a custom Header in the source configuration for ics?\n", "before_files": [{"content": "import datetime\nimport logging\nfrom os import getcwd\nfrom pathlib import Path\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\nfrom waste_collection_schedule.service.ICS_v1 import ICS_v1\n\nTITLE = \"ICS\"\nDESCRIPTION = \"Source for ICS based schedules.\"\nURL = None\nTEST_CASES = {\n \"Dortmund, Dudenstr. 5\": {\n \"url\": \"https://www.edg.de/ical/kalender.ics?Strasse=Dudenstr.&Hausnummer=5&Erinnerung=-1&Abfallart=1,2,3,4\"\n },\n \"Leipzig, Sandgrubenweg 27\": {\n \"url\": \"https://stadtreinigung-leipzig.de/wir-kommen-zu-ihnen/abfallkalender/ical.ics?position_nos=38296&name=Sandgrubenweg%2027\"\n },\n \"Ludwigsburg\": {\n \"url\": \"https://www.avl-ludwigsburg.de/fileadmin/Files/Abfallkalender/ICS/Privat/Privat_{%Y}_Ossweil.ics\"\n },\n \"Esslingen, Bahnhof\": {\n \"url\": \"https://api.abfall.io/?kh=DaA02103019b46345f1998698563DaAd&t=ics&s=1a862df26f6943997cef90233877a4fe\"\n },\n \"Test File\": {\n # Path is used here to allow to call the Source from any location.\n # This is not required in a yaml configuration!\n \"file\": str(Path(__file__).resolve().parents[1].joinpath(\"test/test.ics\"))\n },\n \"Test File (recurring)\": {\n # Path is used here to allow to call the Source from any location.\n # This is not required in a yaml configuration!\n \"file\": str(Path(__file__).resolve().parents[1].joinpath(\"test/recurring.ics\"))\n },\n \"M\u00fcnchen, Bahnstr. 11\": {\n \"url\": \"https://www.awm-muenchen.de/entsorgen/abfuhrkalender?tx_awmabfuhrkalender_abfuhrkalender%5Bhausnummer%5D=11&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BB%5D=1%2F2%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BP%5D=1%2F2%3BG&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BR%5D=001%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bsection%5D=ics&tx_awmabfuhrkalender_abfuhrkalender%5Bsinglestandplatz%5D=false&tx_awmabfuhrkalender_abfuhrkalender%5Bstandplatzwahl%5D=true&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bbio%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bpapier%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Brestmuell%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstrasse%5D=bahnstr.&tx_awmabfuhrkalender_abfuhrkalender%5Byear%5D={%Y}\",\n \"version\": 1,\n },\n \"Buxtehude, Am Berg\": {\n \"url\": \"https://abfall.landkreis-stade.de/api_v2/collection_dates/1/ort/10/strasse/90/hausnummern/1/abfallarten/R02-R04-B02-D04-D12-P04-R12-R14-W0-R22-R24-R31/kalender.ics\"\n },\n # \"Hausm\u00fcllinfo: ASR Chemnitz\": {\n # \"url\": \"https://asc.hausmuell.info/ics/ics.php\",\n # \"method\": \"POST\",\n # \"params\": {\n # \"hidden_id_egebiet\": 439087,\n # \"input_ort\": \"Chemnitz\",\n # \"input_str\": \"Stra\u00dfe der Nationen\",\n # \"input_hnr\": 2,\n # \"hidden_send_btn\": \"ics\",\n # # \"hiddenYear\": 2021,\n # \"hidden_id_ort\": 10,\n # \"hidden_id_ortsteil\": 0,\n # \"hidden_id_str\": 17814,\n # \"hidden_id_hnr\": 5538100,\n # \"hidden_kalenderart\": \"privat\",\n # \"showBinsBio\": \"on\",\n # \"showBinsRest\": \"on\",\n # \"showBinsRest_rc\": \"on\",\n # \"showBinsPapier\": \"on\",\n # \"showBinsOrganic\": \"on\",\n # \"showBinsXmas\": \"on\",\n # \"showBinsDsd\": \"on\",\n # \"showBinsProb\": \"on\",\n # },\n # \"year_field\": \"hiddenYear\",\n # },\n \"Abfall Zollernalbkreis, Ebingen\": {\n \"url\": \"https://www.abfallkalender-zak.de\",\n \"params\": {\n \"city\": \"2,3,4\",\n \"street\": \"3\",\n \"types[]\": [\n \"restmuell\",\n \"gelbersack\",\n \"papiertonne\",\n \"biomuell\",\n \"gruenabfall\",\n \"schadstoffsammlung\",\n \"altpapiersammlung\",\n \"schrottsammlung\",\n \"weihnachtsbaeume\",\n \"elektrosammlung\",\n ],\n \"go_ics\": \"Download\",\n },\n \"year_field\": \"year\",\n },\n \"Detmold\": {\n \"url\": \"https://abfuhrkalender.detmold.de/icsmaker.php\",\n \"method\": \"GET\",\n \"params\": {\"strid\": 338},\n \"year_field\": \"year\",\n },\n \"EAW Rheingau Taunus\": {\n \"url\": \"https://www.eaw-rheingau-taunus.de/abfallsammlung/abfuhrtermine/feed.ics?tx_vierwdeaw_garbagecalendarics%5Baction%5D=ics&tx_vierwdeaw_garbagecalendarics%5Bcontroller%5D=GarbageCalendar&tx_vierwdeaw_garbagecalendarics%5Bstreet%5D=38\",\n \"split_at\": \",\",\n },\n \"Recollect, Ottawa\": {\n \"url\": \"https://recollect.a.ssl.fastly.net/api/places/BCCDF30E-578B-11E4-AD38-5839C200407A/services/208/events.en.ics\",\n \"split_at\": \"\\\\, [and ]*\",\n },\n \"Frankfurt am Main, Achenbachstrasse 3\": {\n \"url\": \"https://www.fes-frankfurt.de/abfallkalender/QWNoZW5iYWNoc3RyLnwzfDYwNTk2.ics\"\n },\n \"Erlensee, Am Haspel\": {\n \"url\": \"https://sperrmuell.erlensee.de/?type=reminder\",\n \"method\": \"POST\",\n \"params\": {\n \"street\": 8,\n \"eventType[]\": [27, 23, 19, 20, 21, 24, 22, 25, 26],\n \"timeframe\": 23,\n \"download\": \"ical\",\n },\n },\n}\n\n\nHEADERS = {\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64)\"}\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(\n self,\n url=None,\n file=None,\n offset=None,\n params=None,\n year_field=None,\n method=\"GET\",\n regex=None,\n split_at=None,\n version=2,\n verify_ssl=True,\n ):\n self._url = url\n self._file = file\n if bool(self._url is not None) == bool(self._file is not None):\n raise RuntimeError(\"Specify either url or file\")\n if version == 1:\n self._ics = ICS_v1(offset=offset, split_at=split_at, regex=regex)\n else:\n self._ics = ICS(offset=offset, split_at=split_at, regex=regex)\n self._params = params\n self._year_field = year_field # replace this field in params with current year\n self._method = method # The method to send the params\n self._verify_ssl = verify_ssl\n\n def fetch(self):\n if self._url is not None:\n if \"{%Y}\" in self._url or self._year_field is not None:\n # url contains wildcard or params contains year field\n now = datetime.datetime.now()\n\n # replace year in url\n url = self._url.replace(\"{%Y}\", str(now.year))\n\n # replace year in params\n if self._year_field is not None:\n if self._params is None:\n raise RuntimeError(\"year_field specified without params\")\n self._params[self._year_field] = str(now.year)\n\n entries = self.fetch_url(url, self._params)\n\n if now.month == 12:\n # also get data for next year if we are already in december\n url = self._url.replace(\"{%Y}\", str(now.year + 1))\n if self._year_field is not None:\n self._params[self._year_field] = str(now.year + 1)\n\n try:\n entries.extend(self.fetch_url(url, self._params))\n except Exception:\n # ignore if fetch for next year fails\n pass\n return entries\n else:\n return self.fetch_url(self._url, self._params)\n elif self._file is not None:\n return self.fetch_file(self._file)\n\n def fetch_url(self, url, params=None):\n # get ics file\n if self._method == \"GET\":\n r = requests.get(\n url, params=params, headers=HEADERS, verify=self._verify_ssl\n )\n elif self._method == \"POST\":\n r = requests.post(\n url, data=params, headers=HEADERS, verify=self._verify_ssl\n )\n else:\n raise RuntimeError(\n \"Error: unknown method to fetch URL, use GET or POST; got {self._method}\"\n )\n r.raise_for_status()\n\n r.encoding = \"utf-8\" # requests doesn't guess the encoding correctly\n return self._convert(r.text)\n\n def fetch_file(self, file):\n try:\n f = open(file)\n except FileNotFoundError as e:\n _LOGGER.error(f\"Working directory: '{getcwd()}'\")\n raise\n return self._convert(f.read())\n\n def _convert(self, data):\n dates = self._ics.convert(data)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py"}]}
3,928
629