Dataset Viewer
Auto-converted to Parquet
repo
stringlengths
10
49
pull_number
int64
1
1.11k
instance_id
stringlengths
14
53
issue_numbers
stringlengths
5
18
base_commit
stringlengths
40
40
patch
stringlengths
157
698k
test_patch
stringlengths
95
589k
problem_statement
stringlengths
10
8.34k
hints_text
stringclasses
181 values
created_at
timestamp[us]date
2012-09-08 14:57:53
2025-04-20 02:55:45
version
stringclasses
1 value
maykinmedia/django-log-outgoing-requests
35
maykinmedia__django-log-outgoing-requests-35
['34']
978b5938099a53ea7e452e9c957883aa66144dd6
diff --git a/log_outgoing_requests/handlers.py b/log_outgoing_requests/handlers.py index 4dbbca4..697b811 100644 --- a/log_outgoing_requests/handlers.py +++ b/log_outgoing_requests/handlers.py @@ -92,14 +92,18 @@ def emit(self, record: AnyLogRecord): "url": request.url if request else "(unknown)", "hostname": parsed_url.netloc if parsed_url else "(unknown)", "params": parsed_url.params if parsed_url else "(unknown)", - "status_code": response.status_code if response else None, + "status_code": response.status_code if response is not None else None, "method": request.method if request else "(unknown)", "timestamp": timestamp, - "response_ms": int(response.elapsed.total_seconds() * 1000) - if response - else 0, + "response_ms": ( + int(response.elapsed.total_seconds() * 1000) + if response is not None + else 0 + ), "req_headers": self.format_headers(scrubbed_req_headers), - "res_headers": self.format_headers(response.headers if response else {}), + "res_headers": self.format_headers( + response.headers if response is not None else {} + ), "trace": "\n".join(format_exception(exception)) if exception else "", } @@ -121,7 +125,7 @@ def emit(self, record: AnyLogRecord): # check response if ( - response + response is not None and ( processed_response_body := process_body(response, config) ).allow_saving_to_db diff --git a/log_outgoing_requests/utils.py b/log_outgoing_requests/utils.py index be00f5b..6663613 100644 --- a/log_outgoing_requests/utils.py +++ b/log_outgoing_requests/utils.py @@ -113,9 +113,9 @@ def check_content_type(content_type: str) -> bool: For patterns containing a wildcard ("text/*"), check if `content_type.pattern` is a substring of any pattern contained in the list. """ - allowed_content_types: Iterable[ - ContentType - ] = settings.LOG_OUTGOING_REQUESTS_CONTENT_TYPES + allowed_content_types: Iterable[ContentType] = ( + settings.LOG_OUTGOING_REQUESTS_CONTENT_TYPES + ) regular_patterns = [ item.pattern for item in allowed_content_types if not item.pattern.endswith("*") ] @@ -133,9 +133,9 @@ def get_default_encoding(content_type_pattern: str) -> str: """ Get the default encoding for the `ContentType` with the associated pattern. """ - allowed_content_types: Iterable[ - ContentType - ] = settings.LOG_OUTGOING_REQUESTS_CONTENT_TYPES + allowed_content_types: Iterable[ContentType] = ( + settings.LOG_OUTGOING_REQUESTS_CONTENT_TYPES + ) regular_types = [ item for item in allowed_content_types if not item.pattern.endswith("*")
diff --git a/tests/conftest.py b/tests/conftest.py index 0cf6fba..84041c1 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -69,6 +69,25 @@ def request_mock_kwargs(): } [email protected] +def request_mock_kwargs_error(): + return { + "url": "http://example.com:8000/some-path-that-doesnt-exist?version=2.0", + "status_code": 404, + "content": b"404 Not Found", + "request_headers": { + "Authorization": "test", + "Content-Type": "application/json", + "Content-Length": "24", + }, + "headers": { + "Date": "Tue, 21 Mar 2023 15:24:08 GMT", + "Content-Type": "text/plain", + "Content-Length": "13", + }, + } + + @pytest.fixture def request_mock_kwargs_binary(): return { diff --git a/tests/test_logging.py b/tests/test_logging.py index 9c62774..46030c0 100644 --- a/tests/test_logging.py +++ b/tests/test_logging.py @@ -1,6 +1,8 @@ """Integration tests for the core functionality of the library""" +import datetime import logging +from unittest.mock import patch import pytest import requests @@ -10,6 +12,11 @@ from log_outgoing_requests.models import OutgoingRequestsLog +def set_elapsed(response, *args, **kwargs): + response.elapsed = datetime.timedelta(seconds=2) + return response + + # # Local pytest fixtures # @@ -111,6 +118,53 @@ def test_data_is_saved(request_mock_kwargs, request_variants, expected_headers): assert request_log.res_body_encoding == "utf-8" [email protected]_db +@freeze_time("2021-10-18 13:00:00") +def test_data_is_saved_for_error_response( + request_mock_kwargs_error, request_variants, expected_headers +): + for method, request_func, request_mock in request_variants: + request_mock(**request_mock_kwargs_error) + with patch( + "requests.sessions.default_hooks", return_value={"response": [set_elapsed]} + ): + response = request_func( + request_mock_kwargs_error["url"], + headers=request_mock_kwargs_error["request_headers"], + json={"test": "request data"}, + ) + + assert response.status_code == 404 + + request_log = OutgoingRequestsLog.objects.last() + + assert request_log.method == method + assert request_log.status_code == 404 + assert request_log.hostname == "example.com:8000" + assert request_log.params == "" + assert request_log.query_params == "version=2.0" + assert request_log.response_ms == 2000 + assert request_log.trace == "" + assert str(request_log) == "example.com:8000 at 2021-10-18 13:00:00+00:00" + assert ( + request_log.timestamp.strftime("%Y-%m-%d %H:%M:%S") == "2021-10-18 13:00:00" + ) + # headers + assert request_log.req_headers == expected_headers + assert ( + request_log.res_headers == "Date: Tue, 21 Mar 2023 15:24:08 GMT\n" + "Content-Type: text/plain\nContent-Length: 13" + ) + # request body + assert request_log.req_content_type == "application/json" + assert bytes(request_log.req_body) == b'{"test": "request data"}' + assert request_log.req_body_encoding == "utf-8" + # response body + assert request_log.res_content_type == "text/plain" + assert bytes(request_log.res_body) == b"404 Not Found" + assert request_log.res_body_encoding == "utf-8" + + # # test decoding of binary content #
Certain response values are not logged for responses with an error status code Because the handler sets certain values (such as `status_code` and `response_ms`) after checking if the `response` is truthy, but error responses evaluate as `False`. Code: https://github.com/maykinmedia/django-log-outgoing-requests/blob/main/log_outgoing_requests/handlers.py#L95 ``` (Pdb) response <Response [404]> (Pdb) bool(response) False ```
2024-02-08T15:24:25
-1.0
DiamondLightSource/ispyb-api
73
DiamondLightSource__ispyb-api-73
['68']
7be86ec8aff0d1a2aecdb99efeb858f16992bff4
diff --git a/ispyb/model/__init__.py b/ispyb/model/__init__.py index c75c2f60..4f6d1ec3 100644 --- a/ispyb/model/__init__.py +++ b/ispyb/model/__init__.py @@ -7,9 +7,6 @@ class DBCache(object): in self._data. Cached data should be accessed as self._data. On first uncached access reload() is called.''' - def __init__(self): - '''Data has not yet been loaded from the database.''' - def load(self): '''Ensure data is loaded from the database.''' if not self.cached: @@ -95,6 +92,10 @@ def __nonzero__(self): '''Python 2: value when used in bool() context.''' return bool(self._value) + def __hash__(self): + '''Pass on the hash value of the inner object.''' + return hash(self._value) + def add_properties(objectclass, property_list): '''Generate class properties for a model that provide read-only access diff --git a/ispyb/model/datacollection.py b/ispyb/model/datacollection.py index 861d3bcf..d701c651 100644 --- a/ispyb/model/datacollection.py +++ b/ispyb/model/datacollection.py @@ -29,7 +29,7 @@ def __init__(self, dcid, db_area, preload=None): def reload(self): '''Load/update information from the database.''' - self._data = self._db.retrieve_data_collection_main(self._dcid)[0] + self._data = self._db.retrieve_data_collection(self._dcid)[0] @property def dcid(self): @@ -98,15 +98,26 @@ def __str__(self): ))).format(self) ispyb.model.add_properties(DataCollection, ( + ('comment', 'comments', 'A free comment field for the data collection.'), ('dcgid', 'groupId', 'Returns the Data Collection Group ID associated with this data collection. ' 'You can use .group to get the data collection group model object instead'), + ('detector_distance', 'detectorDistance', 'Distance from the sample to the detector in mm'), + ('detector_2theta', 'detector2Theta', '2Theta angle between the main beam and the detector normal in degrees'), ('file_template', 'fileTemplate', 'Template for file names with the character \'#\' standing in for image number digits.'), ('file_directory', 'imgDir', 'Fully qualified path to the image files'), ('time_start', 'startTime', None), + ('time_exposure', 'exposureTime', 'Exposure time per frame in seconds'), ('time_end', 'endTime', None), ('image_count', 'noImages', None), ('image_start_number', 'startImgNumber', None), + ('resolution', 'resolution', 'Inscribed resolution circle in Angstrom. Currently only well-defined for data collections with 2theta=0'), ('status', 'status', 'Returns a string representing the current data collection status.'), + ('snapshot1', 'snapshot1', 'One of four possible fields to store file paths to image files relating to the data collection'), + ('snapshot2', 'snapshot2', 'One of four possible fields to store file paths to image files relating to the data collection'), + ('snapshot3', 'snapshot3', 'One of four possible fields to store file paths to image files relating to the data collection'), + ('snapshot4', 'snapshot4', 'One of four possible fields to store file paths to image files relating to the data collection'), + ('transmission', 'transmission', 'Beam transmission, in per cent'), + ('wavelength', 'wavelength', 'Beam wavelength in Angstrom'), ))
diff --git a/tests/model/test_datacollection.py b/tests/model/test_datacollection.py index 9efe3efc..3cea6d4e 100644 --- a/tests/model/test_datacollection.py +++ b/tests/model/test_datacollection.py @@ -4,72 +4,110 @@ import mock import pytest + def test_datacollection_model_retrieves_database_records(): - db, record = mock.Mock(), mock.Mock() - db.retrieve_data_collection_main.return_value = [record] + db, record = mock.Mock(), mock.Mock() + db.retrieve_data_collection.return_value = [record] - dc = ispyb.model.datacollection.DataCollection(1234, db) - assert not db.retrieve_data_collection_main.called - assert '1234' in str(dc) - assert '1234' in repr(dc) - assert 'uncached' in repr(dc) + dc = ispyb.model.datacollection.DataCollection(1234, db) + assert not db.retrieve_data_collection.called + assert "1234" in str(dc) + assert "1234" in repr(dc) + assert "uncached" in repr(dc) - dc.load() - db.retrieve_data_collection_main.assert_called_once_with(1234) - assert dc._data == record - assert '1234' in repr(dc) - assert 'cached' in repr(dc) and 'uncached' not in repr(dc) + dc.load() + db.retrieve_data_collection.assert_called_once_with(1234) + assert dc._data == record + assert "1234" in repr(dc) + assert "cached" in repr(dc) and "uncached" not in repr(dc) - # Test caching behaviour - dc.load() - db.retrieve_data_collection_main.assert_called_once() + # Test caching behaviour + dc.load() + db.retrieve_data_collection.assert_called_once() def test_datacollection_model_accepts_preloading(): - db, record = mock.Mock(), mock.Mock() + db, record = mock.Mock(), mock.Mock() - dc = ispyb.model.datacollection.DataCollection(1234, db, preload=record) - assert dc._data == record + dc = ispyb.model.datacollection.DataCollection(1234, db, preload=record) + assert dc._data == record - dc.load() - assert not db.retrieve_data_collection_main.called + dc.load() + assert not db.retrieve_data_collection.called database_column_to_attribute_name = { - "groupId": None, - "detectorId": None, + "apertureSizeX": None, + "axisEnd": None, + "axisRange": None, + "axisStart": None, + "beamSizeAtSampleX": None, + "beamSizeAtSampleY": None, + "bestWilsonPlotPath": None, "blSubSampleId": None, + "chiStart": None, + "comments": "comment", "dcNumber": None, - "startTime": "time_start", + "detector2Theta": "detector_2theta", + "detectorDistance": "detector_distance", + "detectorId": None, "endTime": "time_end", - "status": None, - "noImages": "image_count", - "startImgNumber": "image_start_number", - "noPasses": None, + "exposureTime": "time_exposure", + "fileTemplate": "file_template", + "flux": None, + "fluxEnd": None, + "focalSpotSizeAtSampleX": None, + "focalSpotSizeAtSampleY": None, + "groupId": None, + "imgContainerSubPath": None, "imgDir": None, "imgPrefix": None, "imgSuffix": None, - "fileTemplate": None, - "snapshot1": None, - "snapshot2": None, - "snapshot3": None, - "snapshot4": None, - "comments": None, -} -record = { - k: getattr(mock.sentinel, k) - for k in database_column_to_attribute_name + "kappaStart": None, + "noImages": "image_count", + "noPasses": None, + "omegaStart": None, + "overlap": None, + "phiStart": None, + "resolution": "resolution", + "resolutionAtCorner": None, + "rotationAxis": None, + "slitGapHorizontal": None, + "slitGapVertical": None, + "snapshot1": "snapshot1", + "snapshot2": "snapshot2", + "snapshot3": "snapshot3", + "snapshot4": "snapshot4", + "startImgNumber": "image_start_number", + "startTime": "time_start", + "status": "status", + "synchrotronMode": None, + "transmission": "transmission", + "undulatorGap1": None, + "undulatorGap2": None, + "undulatorGap3": None, + "wavelength": "wavelength", + "xBeam": None, + "yBeam": None, } +record = {k: getattr(mock.sentinel, k) for k in database_column_to_attribute_name} record["imgDir"] = "/path/to/some/images/" record["fileTemplate"] = "file_####.cbf" [email protected]('column,attribute', filter(lambda ca: ca[1], database_column_to_attribute_name.items())) + [email protected]( + "column,attribute", + filter(lambda ca: ca[1], database_column_to_attribute_name.items()), +) def test_datacollection_model_attributes_return_correct_values(column, attribute): - dc = ispyb.model.datacollection.DataCollection(1234, None, preload=record) - assert getattr(dc, attribute) == record[column] + dc = ispyb.model.datacollection.DataCollection(1234, None, preload=record) + assert getattr(dc, attribute) == record[column] + [email protected]('printed_attribute', ('startTime', 'endTime', 'imgDir', 'fileTemplate')) [email protected]( + "printed_attribute", ("startTime", "endTime", "imgDir", "fileTemplate") +) def test_pretty_printing_datacollection_shows_attribute(printed_attribute): - dc_str = str(ispyb.model.datacollection.DataCollection(1234, None, preload=record)) - assert "1234" in dc_str - assert str(record[printed_attribute]) in dc_str + dc_str = str(ispyb.model.datacollection.DataCollection(1234, None, preload=record)) + assert "1234" in dc_str + assert str(record[printed_attribute]) in dc_str
Make extended data collection information accessible via DCID model Currently it is not possible to access eg. the resolution column of a data collection when given the DCID.
Yes, I think that calls for a new stored procedure. The `retrieve_dc_main_v2` is meant to retrieve only a certain subset of the columns, as we also have `upsert_dc_main_v2` that upserts the same columns, and `update_dc_experiment_v2` which updates the "experiment" columns which includes resolution. I will write a new stored procedure that retrieves all the columns (except deprecated ones and other undesirables). OK, I've made a PR for this: https://github.com/DiamondLightSource/ispyb-api/pull/71 Thanks, I'll pick it up from here
2019-04-16T11:53:05
-1.0
maykinmedia/django-log-outgoing-requests
4
maykinmedia__django-log-outgoing-requests-4
['2']
32d109c0de5b8adf99b58653de3a5420c6917f68
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4e1c27c..92204c8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -15,11 +15,8 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python: ["3.7", "3.8", "3.9", "3.10"] + python: ["3.8", "3.9", "3.10"] django: ["3.2", "4.1"] - exclude: - - python: "3.7" - django: "4.1" name: Run the test suite (Python ${{ matrix.python }}, Django ${{ matrix.django }}) diff --git a/README.rst b/README.rst index 2ad5883..f8b9f5c 100644 --- a/README.rst +++ b/README.rst @@ -97,6 +97,17 @@ To use this with your project you need to follow these steps: } LOG_OUTGOING_REQUESTS_DB_SAVE = True # save logs enabled/disabled based on the boolean value + LOG_OUTGOING_REQUESTS_DB_SAVE_BODY = True # save request/response body + LOG_OUTGOING_REQUESTS_EMIT_BODY = True # log request/response body + LOG_OUTGOING_REQUESTS_CONTENT_TYPES = [ + "text/*", + "application/json", + "application/xml", + "application/soap+xml", + ] # save request/response bodies with matching content type + LOG_OUTGOING_REQUESTS_MAX_CONTENT_LENGTH = 524_288 # maximal size (in bytes) for the request/response body + LOG_OUTGOING_REQUESTS_LOG_BODY_TO_STDOUT = True + #. Run the migrations @@ -112,8 +123,9 @@ To use this with your project you need to follow these steps: res = requests.get("https://httpbin.org/json") print(res.json()) -#. Check stdout for the printable output, and navigate to ``/admin/log_outgoing_requests/outgoingrequestslog/`` to see - the saved log records +#. Check stdout for the printable output, and navigate to ``Admin > Miscellaneous > Outgoing Requests Logs`` + to see the saved log records. In order to override the settings for saving logs, navigate to + ``Admin > Miscellaneous > Outgoing Requests Log Configuration``. Local development diff --git a/docs/quickstart.rst b/docs/quickstart.rst index d1fabcb..24c7b9e 100644 --- a/docs/quickstart.rst +++ b/docs/quickstart.rst @@ -58,6 +58,17 @@ Installation } LOG_OUTGOING_REQUESTS_DB_SAVE = True # save logs enabled/disabled based on the boolean value + LOG_OUTGOING_REQUESTS_DB_SAVE_BODY = True # save request/response body + LOG_OUTGOING_REQUESTS_EMIT_BODY = True # log request/response body + LOG_OUTGOING_REQUESTS_CONTENT_TYPES = [ + "text/*", + "application/json", + "application/xml", + "application/soap+xml", + ] # save request/response bodies with matching content type + LOG_OUTGOING_REQUESTS_MAX_CONTENT_LENGTH = 524_288 # maximal size (in bytes) for the request/response body + LOG_OUTGOING_REQUESTS_LOG_BODY_TO_STDOUT = True + #. Run ``python manage.py migrate`` to create the necessary database tables. diff --git a/log_outgoing_requests/admin.py b/log_outgoing_requests/admin.py index fdc9531..bb536a7 100644 --- a/log_outgoing_requests/admin.py +++ b/log_outgoing_requests/admin.py @@ -1,7 +1,11 @@ +from django import forms +from django.conf import settings from django.contrib import admin from django.utils.translation import gettext as _ -from .models import OutgoingRequestsLog +from solo.admin import SingletonModelAdmin + +from .models import OutgoingRequestsLog, OutgoingRequestsLogConfig @admin.register(OutgoingRequestsLog) @@ -31,10 +35,16 @@ class OutgoingRequestsLogAdmin(admin.ModelAdmin): "response_ms", "timestamp", ) - list_filter = ("method", "status_code", "hostname") + list_filter = ("method", "timestamp", "status_code", "hostname") search_fields = ("url", "params", "hostname") date_hierarchy = "timestamp" show_full_result_count = False + change_form_template = "log_outgoing_requests/change_form.html" + + class Media: + css = { + "all": ("log_outgoing_requests/css/admin.css",), + } def has_add_permission(self, request): return False @@ -42,7 +52,26 @@ def has_add_permission(self, request): def has_change_permission(self, request, obj=None): return False + @admin.display(description=_("Query parameters")) def query_params(self, obj): return obj.query_params - query_params.short_description = _("Query parameters") + +class ConfigAdminForm(forms.ModelForm): + class Meta: + model = OutgoingRequestsLogConfig + fields = "__all__" + help_texts = { + "save_to_db": _( + "Whether request logs should be saved to the database (default: {default})." + ).format(default=settings.LOG_OUTGOING_REQUESTS_DB_SAVE), + "save_body": _( + "Whether the body of the request and response should be logged (default: " + "{default})." + ).format(default=settings.LOG_OUTGOING_REQUESTS_DB_SAVE_BODY), + } + + [email protected](OutgoingRequestsLogConfig) +class OutgoingRequestsLogConfigAdmin(SingletonModelAdmin): + form = ConfigAdminForm diff --git a/log_outgoing_requests/compat.py b/log_outgoing_requests/compat.py new file mode 100644 index 0000000..2992046 --- /dev/null +++ b/log_outgoing_requests/compat.py @@ -0,0 +1,60 @@ +import django + +# Taken from djangorestframework, see +# https://github.com/encode/django-rest-framework/blob/376a5cbbba3f8df9c9db8c03a7c8fa2a6e6c05f4/rest_framework/compat.py#LL156C1-L177C10 +# +# License: +# +# Copyright © 2011-present, [Encode OSS Ltd](https://www.encode.io/). +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +if django.VERSION >= (4, 2): + # Django 4.2+: use the stock parse_header_parameters function + # Note: Django 4.1 also has an implementation of parse_header_parameters + # which is slightly different from the one in 4.2, it needs + # the compatibility shim as well. + from django.utils.http import parse_header_parameters # type: ignore +else: + # Django <= 4.1: create a compatibility shim for parse_header_parameters + from django.http.multipartparser import parse_header + + def parse_header_parameters(line): + # parse_header works with bytes, but parse_header_parameters + # works with strings. Call encode to convert the line to bytes. + main_value_pair, params = parse_header(line.encode()) + return main_value_pair, { + # parse_header will convert *some* values to string. + # parse_header_parameters converts *all* values to string. + # Make sure all values are converted by calling decode on + # any remaining non-string values. + k: v if isinstance(v, str) else v.decode() + for k, v in params.items() + } + + +__all__ = ["parse_header_parameters"] diff --git a/log_outgoing_requests/constants.py b/log_outgoing_requests/constants.py new file mode 100644 index 0000000..87522f0 --- /dev/null +++ b/log_outgoing_requests/constants.py @@ -0,0 +1,8 @@ +from django.db import models +from django.utils.translation import gettext_lazy as _ + + +class SaveLogsChoice(models.TextChoices): + use_default = "use_default", _("Use default") + yes = "yes", _("Yes") + no = "no", _("No") diff --git a/log_outgoing_requests/datastructures.py b/log_outgoing_requests/datastructures.py new file mode 100644 index 0000000..136b69a --- /dev/null +++ b/log_outgoing_requests/datastructures.py @@ -0,0 +1,27 @@ +""" +Datastructure(s) for use in settings.py + +Note: do not place any Django-specific imports in this file, as +it must be imported in settings.py. +""" + +from dataclasses import dataclass +from typing import Union + + +@dataclass +class ContentType: + """ + Data class for keeping track of content types and associated default encodings + """ + + pattern: str + default_encoding: str + + +@dataclass +class ProcessedBody: + allow_saving_to_db: bool + content: Union[bytes, str] + content_type: str + encoding: str diff --git a/log_outgoing_requests/formatters.py b/log_outgoing_requests/formatters.py index 8144286..047ad4a 100644 --- a/log_outgoing_requests/formatters.py +++ b/log_outgoing_requests/formatters.py @@ -1,30 +1,42 @@ import logging import textwrap +from django.conf import settings + class HttpFormatter(logging.Formatter): def _formatHeaders(self, d): return "\n".join(f"{k}: {v}" for k, v in d.items()) + def _formatBody(self, content: str, request_or_response: str) -> str: + if settings.LOG_OUTGOING_REQUESTS_EMIT_BODY: + return f"\n{request_or_response} body:\n{content}" + return "" + def formatMessage(self, record): result = super().formatMessage(record) - if record.name == "requests": - result += textwrap.dedent( - """ - ---------------- request ---------------- - {req.method} {req.url} - {reqhdrs} - ---------------- response ---------------- - {res.status_code} {res.reason} {res.url} - {reshdrs} + if record.name != "requests": + return result + result += textwrap.dedent( """ - ).format( - req=record.req, - res=record.res, - reqhdrs=self._formatHeaders(record.req.headers), - reshdrs=self._formatHeaders(record.res.headers), - ) + ---------------- request ---------------- + {req.method} {req.url} + {reqhdrs} {request_body} + + ---------------- response ---------------- + {res.status_code} {res.reason} {res.url} + {reshdrs} {response_body} + + """ + ).format( + req=record.req, + res=record.res, + reqhdrs=self._formatHeaders(record.req.headers), + reshdrs=self._formatHeaders(record.res.headers), + request_body=self._formatBody(record.req.body, "Request"), + response_body=self._formatBody(record.res.content, "Response"), + ) return result diff --git a/log_outgoing_requests/handlers.py b/log_outgoing_requests/handlers.py index 631f147..f067be2 100644 --- a/log_outgoing_requests/handlers.py +++ b/log_outgoing_requests/handlers.py @@ -1,44 +1,91 @@ +# NOTE: Avoid import Django specifics at the module level to prevent circular imports. +# The handler is loaded eagerly at django startup when configuring settings. import logging import traceback +from datetime import datetime +from logging import LogRecord +from typing import Union, cast from urllib.parse import urlparse -from django.conf import settings +from requests.models import PreparedRequest, Response + + +class RequestLogRecord(LogRecord): + requested_at: datetime + req: PreparedRequest + res: Response + + +AnyLogRecord = Union[LogRecord, RequestLogRecord] + + +def is_request_log_record(record: AnyLogRecord) -> bool: + attrs = ("requested_at", "req", "res") + if any(not hasattr(record, attr) for attr in attrs): + return False + return True class DatabaseOutgoingRequestsHandler(logging.Handler): - def emit(self, record): - if settings.LOG_OUTGOING_REQUESTS_DB_SAVE: - from .models import OutgoingRequestsLog - - trace = None - - # save only the requests coming from the library requests - if record and record.getMessage() == "Outgoing request": - safe_req_headers = record.req.headers.copy() - - if "Authorization" in safe_req_headers: - safe_req_headers["Authorization"] = "***hidden***" - - if record.exc_info: - trace = traceback.format_exc() - - parsed_url = urlparse(record.req.url) - kwargs = { - "url": record.req.url, - "hostname": parsed_url.hostname, - "params": parsed_url.params, - "status_code": record.res.status_code, - "method": record.req.method, - "req_content_type": record.req.headers.get("Content-Type", ""), - "res_content_type": record.res.headers.get("Content-Type", ""), - "timestamp": record.requested_at, - "response_ms": int(record.res.elapsed.total_seconds() * 1000), - "req_headers": self.format_headers(safe_req_headers), - "res_headers": self.format_headers(record.res.headers), - "trace": trace, - } - - OutgoingRequestsLog.objects.create(**kwargs) + def emit(self, record: AnyLogRecord): + from .models import OutgoingRequestsLog, OutgoingRequestsLogConfig + from .utils import process_body + + config = cast(OutgoingRequestsLogConfig, OutgoingRequestsLogConfig.get_solo()) + if not config.save_logs_enabled: + return + + # skip requests not coming from the library requests + if not record or not is_request_log_record(record): + return + # Typescript type predicates would be cool here :) + record = cast(RequestLogRecord, record) + + scrubbed_req_headers = record.req.headers.copy() + + if "Authorization" in scrubbed_req_headers: + scrubbed_req_headers["Authorization"] = "***hidden***" + + trace = traceback.format_exc() if record.exc_info else "" + + parsed_url = urlparse(record.req.url) + kwargs = { + "url": record.req.url, + "hostname": parsed_url.netloc, + "params": parsed_url.params, + "status_code": record.res.status_code, + "method": record.req.method, + "timestamp": record.requested_at, + "response_ms": int(record.res.elapsed.total_seconds() * 1000), + "req_headers": self.format_headers(scrubbed_req_headers), + "res_headers": self.format_headers(record.res.headers), + "trace": trace, + } + + if config.save_body_enabled: + # check request + processed_request_body = process_body(record.req, config) + if processed_request_body.allow_saving_to_db: + kwargs.update( + { + "req_content_type": processed_request_body.content_type, + "req_body": processed_request_body.content, + "req_body_encoding": processed_request_body.encoding, + } + ) + + # check response + processed_response_body = process_body(record.res, config) + if processed_response_body.allow_saving_to_db: + kwargs.update( + { + "res_content_type": processed_response_body.content_type, + "res_body": processed_response_body.content, + "res_body_encoding": processed_response_body.encoding, + } + ) + + OutgoingRequestsLog.objects.create(**kwargs) def format_headers(self, headers): return "\n".join(f"{k}: {v}" for k, v in headers.items()) diff --git a/log_outgoing_requests/log_requests.py b/log_outgoing_requests/log_requests.py index e32a513..f89dd79 100644 --- a/log_outgoing_requests/log_requests.py +++ b/log_outgoing_requests/log_requests.py @@ -20,16 +20,16 @@ def install_outgoing_requests_logging(): Log all outgoing requests which are made by the library requests during a session. """ - if hasattr(Session, "_original_request"): + if hasattr(Session, "_initial_request"): logger.debug( - "Session is already patched OR has an ``_original_request`` attribute." + "Session is already patched OR has an ``_initial_request`` attribute." ) return - Session._original_request = Session.request + Session._initial_request = Session.request # type: ignore def new_request(self, *args, **kwargs): self.hooks["response"].append(hook_requests_logging) - return self._original_request(*args, **kwargs) + return self._initial_request(*args, **kwargs) Session.request = new_request diff --git a/log_outgoing_requests/migrations/0002_outgoingrequestslogconfig_and_more.py b/log_outgoing_requests/migrations/0002_outgoingrequestslogconfig_and_more.py new file mode 100644 index 0000000..3c99adc --- /dev/null +++ b/log_outgoing_requests/migrations/0002_outgoingrequestslogconfig_and_more.py @@ -0,0 +1,179 @@ +# Generated by Django 3.2.1 on 2023-06-02 16:22 + +import django.core.validators +from django.db import migrations, models + +import log_outgoing_requests.models + + +class Migration(migrations.Migration): + dependencies = [ + ("log_outgoing_requests", "0001_initial"), + ] + + operations = [ + migrations.CreateModel( + name="OutgoingRequestsLogConfig", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "save_to_db", + models.CharField( + choices=[ + ("use_default", "Use default"), + ("yes", "Yes"), + ("no", "No"), + ], + default="use_default", + max_length=11, + verbose_name="Save logs to database", + ), + ), + ( + "save_body", + models.CharField( + choices=[ + ("use_default", "Use default"), + ("yes", "Yes"), + ("no", "No"), + ], + default="use_default", + max_length=11, + verbose_name="Save request + response body", + ), + ), + ( + "max_content_length", + models.IntegerField( + default=log_outgoing_requests.models.get_default_max_content_length, + help_text="The maximal size of the request/response content (in bytes). If 'Require content length' is not checked, this setting has no effect.", + validators=[django.core.validators.MinValueValidator(0)], + verbose_name="Maximal content size", + ), + ), + ], + options={ + "verbose_name": "Outgoing Requests Log Configuration", + }, + ), + migrations.AddField( + model_name="outgoingrequestslog", + name="req_body", + field=models.BinaryField( + default=b"", help_text="The request body.", verbose_name="Request body" + ), + ), + migrations.AddField( + model_name="outgoingrequestslog", + name="req_body_encoding", + field=models.CharField(default="", max_length=24), + ), + migrations.AddField( + model_name="outgoingrequestslog", + name="res_body", + field=models.BinaryField( + default=b"", + help_text="The response body.", + verbose_name="Response body", + ), + ), + migrations.AddField( + model_name="outgoingrequestslog", + name="res_body_encoding", + field=models.CharField(default="", max_length=24), + ), + migrations.AlterField( + model_name="outgoingrequestslog", + name="hostname", + field=models.CharField( + default="", + help_text="The netloc/hostname part of the url.", + max_length=255, + verbose_name="Hostname", + ), + ), + migrations.AlterField( + model_name="outgoingrequestslog", + name="method", + field=models.CharField( + blank=True, + help_text="The type of request method.", + max_length=10, + verbose_name="Method", + ), + ), + migrations.AlterField( + model_name="outgoingrequestslog", + name="req_content_type", + field=models.CharField( + default="", + help_text="The content type of the request.", + max_length=50, + verbose_name="Request content type", + ), + ), + migrations.AlterField( + model_name="outgoingrequestslog", + name="req_headers", + field=models.TextField( + default="", + help_text="The request headers.", + verbose_name="Request headers", + ), + ), + migrations.AlterField( + model_name="outgoingrequestslog", + name="res_content_type", + field=models.CharField( + default="", + help_text="The content type of the response.", + max_length=50, + verbose_name="Response content type", + ), + ), + migrations.AlterField( + model_name="outgoingrequestslog", + name="res_headers", + field=models.TextField( + default="", + help_text="The response headers.", + verbose_name="Response headers", + ), + ), + migrations.AlterField( + model_name="outgoingrequestslog", + name="response_ms", + field=models.PositiveIntegerField( + default=0, + help_text="This is the response time in ms.", + verbose_name="Response in ms", + ), + ), + migrations.AlterField( + model_name="outgoingrequestslog", + name="trace", + field=models.TextField( + default="", + help_text="Text providing information in case of request failure.", + verbose_name="Trace", + ), + ), + migrations.AlterField( + model_name="outgoingrequestslog", + name="url", + field=models.URLField( + default="", + help_text="The url of the outgoing request.", + max_length=1000, + verbose_name="URL", + ), + ), + ] diff --git a/log_outgoing_requests/models.py b/log_outgoing_requests/models.py index 215e481..81249cb 100644 --- a/log_outgoing_requests/models.py +++ b/log_outgoing_requests/models.py @@ -1,15 +1,24 @@ +import logging +from typing import Union from urllib.parse import urlparse +from django.conf import settings +from django.core.validators import MinValueValidator from django.db import models from django.utils.functional import cached_property from django.utils.translation import gettext_lazy as _ +from solo.models import SingletonModel # type: ignore + +from .constants import SaveLogsChoice + +logger = logging.getLogger(__name__) + class OutgoingRequestsLog(models.Model): url = models.URLField( verbose_name=_("URL"), max_length=1000, - blank=True, default="", help_text=_("The url of the outgoing request."), ) @@ -19,8 +28,7 @@ class OutgoingRequestsLog(models.Model): verbose_name=_("Hostname"), max_length=255, default="", - blank=True, - help_text=_("The hostname part of the url."), + help_text=_("The netloc/hostname part of the url."), ) params = models.TextField( verbose_name=_("Parameters"), @@ -36,7 +44,6 @@ class OutgoingRequestsLog(models.Model): method = models.CharField( verbose_name=_("Method"), max_length=10, - default="", blank=True, help_text=_("The type of request method."), ) @@ -44,32 +51,45 @@ class OutgoingRequestsLog(models.Model): verbose_name=_("Request content type"), max_length=50, default="", - blank=True, help_text=_("The content type of the request."), ) res_content_type = models.CharField( verbose_name=_("Response content type"), max_length=50, default="", - blank=True, help_text=_("The content type of the response."), ) req_headers = models.TextField( verbose_name=_("Request headers"), - blank=True, - null=True, + default="", help_text=_("The request headers."), ) res_headers = models.TextField( verbose_name=_("Response headers"), - blank=True, - null=True, + default="", help_text=_("The response headers."), ) + req_body = models.BinaryField( + verbose_name=_("Request body"), + default=b"", + help_text=_("The request body."), + ) + res_body = models.BinaryField( + verbose_name=_("Response body"), + default=b"", + help_text=_("The response body."), + ) + req_body_encoding = models.CharField( + max_length=24, + default="", + ) + res_body_encoding = models.CharField( + max_length=24, + default="", + ) response_ms = models.PositiveIntegerField( verbose_name=_("Response in ms"), default=0, - blank=True, help_text=_("This is the response time in ms."), ) timestamp = models.DateTimeField( @@ -78,8 +98,7 @@ class OutgoingRequestsLog(models.Model): ) trace = models.TextField( verbose_name=_("Trace"), - blank=True, - null=True, + default="", help_text=_("Text providing information in case of request failure."), ) @@ -99,3 +118,88 @@ def url_parsed(self): @property def query_params(self): return self.url_parsed.query + + def _decode_body(self, content: Union[bytes, memoryview], encoding: str) -> str: + """ + Decode body for use in template. + + If the stored encoding is not found (either because it is empty or because of + spelling errors etc.), we decode "blindly", replacing chars that could not be + decoded. + + Inspired on :meth:`requests.models.Response.text`, which is Apache 2.0 licensed. + """ + try: + return str(content, encoding, errors="replace") + except LookupError: + # A LookupError is raised if the encoding was not found which could + # indicate a misspelling or similar mistake. + return str(content, errors="replace") + + @cached_property + def request_body_decoded(self) -> str: + """ + Decoded request body for use in template. + """ + return self._decode_body(self.req_body, self.req_body_encoding) + + @cached_property + def response_body_decoded(self) -> str: + """ + Decoded response body for use in template. + """ + return self._decode_body(self.res_body, self.res_body_encoding) + + +def get_default_max_content_length(): + """ + Get default value for max content length from settings. + """ + return settings.LOG_OUTGOING_REQUESTS_MAX_CONTENT_LENGTH + + +class OutgoingRequestsLogConfig(SingletonModel): + """Configuration options for request logging.""" + + save_to_db = models.CharField( + _("Save logs to database"), + max_length=11, + choices=SaveLogsChoice.choices, + default=SaveLogsChoice.use_default, + ) + save_body = models.CharField( + _("Save request + response body"), + max_length=11, + choices=SaveLogsChoice.choices, + default=SaveLogsChoice.use_default, + ) + max_content_length = models.IntegerField( + _("Maximal content size"), + validators=[MinValueValidator(0)], + default=get_default_max_content_length, + help_text=_( + "The maximal size of the request/response content (in bytes). " + "If 'Require content length' is not checked, this setting has no effect." + ), + ) + + @property + def save_logs_enabled(self): + """ + Use configuration option or settings to determine if logs should be saved. + """ + if self.save_to_db == SaveLogsChoice.use_default: + return settings.LOG_OUTGOING_REQUESTS_DB_SAVE + return self.save_to_db == SaveLogsChoice.yes + + @property + def save_body_enabled(self): + """ + Use configuration option or settings to determine if log bodies should be saved. + """ + if self.save_body == SaveLogsChoice.use_default: + return settings.LOG_OUTGOING_REQUESTS_DB_SAVE_BODY + return self.save_body == SaveLogsChoice.yes + + class Meta: + verbose_name = _("Outgoing Requests Log Configuration") diff --git a/log_outgoing_requests/static/log_outgoing_requests/css/admin.css b/log_outgoing_requests/static/log_outgoing_requests/css/admin.css new file mode 100644 index 0000000..5654b4d --- /dev/null +++ b/log_outgoing_requests/static/log_outgoing_requests/css/admin.css @@ -0,0 +1,9 @@ +.field-res_body { + display: inline-block; + width: var(--dlor-body-width, 40rem); + font-family: monospace; + line-height: 1.5rem; +} +input[name="max_content_length"] { + width: 7.25em; +} diff --git a/log_outgoing_requests/templates/log_outgoing_requests/change_form.html b/log_outgoing_requests/templates/log_outgoing_requests/change_form.html new file mode 100644 index 0000000..2bbabca --- /dev/null +++ b/log_outgoing_requests/templates/log_outgoing_requests/change_form.html @@ -0,0 +1,16 @@ +{% extends "admin/change_form.html" %} + +{% block after_field_sets %} +<div class="form-row field-req_body"> + <div> + <label>Request body</label> + <div class="request-body-decoded">{{ original.request_body_decoded }}</div> + </div> +</div> +<div class="form-row field-res_body"> + <div> + <label>Response body</label> + <div class="response-body-decoded">{{ original.response_body_decoded }}</div> + </div> +</div> +{% endblock %} diff --git a/log_outgoing_requests/utils.py b/log_outgoing_requests/utils.py new file mode 100644 index 0000000..80b6d38 --- /dev/null +++ b/log_outgoing_requests/utils.py @@ -0,0 +1,161 @@ +"""Tests for the utility functions""" + +import logging +from typing import Iterable, Tuple, Union + +from django.conf import settings + +from requests import PreparedRequest, Response + +from .compat import parse_header_parameters +from .datastructures import ContentType, ProcessedBody +from .models import OutgoingRequestsLogConfig + +logger = logging.getLogger(__name__) + +HttpObj = Union[PreparedRequest, Response] + + +def process_body(http_obj: HttpObj, config: OutgoingRequestsLogConfig) -> ProcessedBody: + """ + Process a request or response body by parsing the meta information. + """ + content_type, encoding = parse_content_type_header(http_obj) + if not encoding: + encoding = get_default_encoding(content_type) + allow_persisting = check_content_type(content_type) and check_content_length( + http_obj, config=config + ) + content = _get_body(http_obj) if allow_persisting else b"" + return ProcessedBody( + allow_saving_to_db=allow_persisting, + content=content or b"", + content_type=content_type, + encoding=encoding, + ) + + +# +# Handler utilities +# +def _get_body(http_obj: HttpObj) -> Union[bytes, str, None]: + return http_obj.content if isinstance(http_obj, Response) else http_obj.body + + +def _get_content_length(http_obj: HttpObj) -> str: + """ + Try to determine the size of a request/response content. + + Try `Content-Length` header first. If not present, try to + determine the size by reading `len(body)`. The entire content + is thereby read into memory (the assumption being that the content + will eventually be consumed anyway). + """ + content_length = http_obj.headers.get("Content-Length", "") + + if not content_length: + body = _get_body(http_obj) + if body is not None: + content_length = str(len(body)) + + return content_length + + +def check_content_length( + http_obj: HttpObj, + config: "OutgoingRequestsLogConfig", +) -> bool: + """ + Check `content_length` against settings. + + If `content_length` could not be determined (i.e. `content_length` == ""), the test + passes with a warning. + """ + content_length = _get_content_length(http_obj) + + if not content_length: + # for logging: get netloc (response) or url (request) + target = getattr(http_obj, "netloc", "") or http_obj.url + logger.warning( + "Content length of the request/response (request netloc: %s) could not be determined." + % target + ) + return True + + max_content_length = config.max_content_length + + return int(content_length) <= max_content_length + + +def parse_content_type_header(http_obj: HttpObj) -> Tuple[str, str]: + """ + Wrapper around Django's `parse_header`. + + If a charset/encoding is found, we replace the representation of it with a string. + + :returns: a `tuple` (content_type, encoding) + """ + content_type_line = http_obj.headers.get("Content-Type", "") + if not content_type_line: + return ("", "") + + content_type, params = parse_header_parameters(content_type_line) + encoding = params.get("charset", "") + return content_type, encoding + + +def check_content_type(content_type: str) -> bool: + """ + Check `content_type` against settings. + + The string patterns of the content types specified under `LOG_OUTGOING_REQUESTS_ + CONTENT_TYPES` are split into two groups. For regular patterns not containing a + wildcard ("text/xml"), check if `content_type.pattern` is included in the list. + For patterns containing a wildcard ("text/*"), check if `content_type.pattern` + is a substring of any pattern contained in the list. + """ + allowed_content_types: Iterable[ + ContentType + ] = settings.LOG_OUTGOING_REQUESTS_CONTENT_TYPES + regular_patterns = [ + item.pattern for item in allowed_content_types if not item.pattern.endswith("*") + ] + wildcard_patterns = [ + item.pattern for item in allowed_content_types if item.pattern.endswith("*") + ] + + if content_type in regular_patterns: + return True + + return any(content_type.startswith(pattern[:-1]) for pattern in wildcard_patterns) + + +def get_default_encoding(content_type_pattern: str) -> str: + """ + Get the default encoding for the `ContentType` with the associated pattern. + """ + allowed_content_types: Iterable[ + ContentType + ] = settings.LOG_OUTGOING_REQUESTS_CONTENT_TYPES + + regular_types = [ + item for item in allowed_content_types if not item.pattern.endswith("*") + ] + wildcard_types = [ + item for item in allowed_content_types if item.pattern.endswith("*") + ] + + content_type = next( + (item for item in regular_types if item.pattern == content_type_pattern), None + ) + if content_type is None: + content_type = next( + ( + item + for item in wildcard_types + if content_type_pattern.startswith(item.pattern[:-1]) + ), + None, + ) + + return content_type.default_encoding if content_type else "" diff --git a/setup.cfg b/setup.cfg index 7fe5f25..5598da6 100644 --- a/setup.cfg +++ b/setup.cfg @@ -24,7 +24,6 @@ classifiers = Operating System :: Unix Operating System :: MacOS Operating System :: Microsoft :: Windows - Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 @@ -37,6 +36,7 @@ packages = find: install_requires = django >= 3.2 requests + django-solo tests_require = pytest pytest-django @@ -46,6 +46,7 @@ tests_require = flake8 freezegun requests-mock + pyquery [options.packages.find] include = @@ -56,6 +57,7 @@ include = tests = pytest pytest-django + pyquery tox isort black @@ -93,6 +95,8 @@ sections=FUTURE,STDLIB,DJANGO,THIRDPARTY,FIRSTPARTY,LOCALFOLDER [tool:pytest] testpaths = tests DJANGO_SETTINGS_MODULE=testapp.settings +filterwarnings = + error:DateTimeField .* received a naive datetime:RuntimeWarning:django.db.models.fields [pep8] [flake8] diff --git a/tox.ini b/tox.ini index 8dcf981..c363d71 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,5 @@ [tox] envlist = - py37-django32 py{38,39,310}-django{32,41} isort black @@ -10,7 +9,6 @@ skip_missing_interpreters = true [gh-actions] python = - 3.7: py37 3.8: py38 3.9: py39 3.10: py310 @@ -24,7 +22,13 @@ DJANGO = setenv = DJANGO_SETTINGS_MODULE=testapp.settings PYTHONPATH={toxinidir} +passenv = + PGPORT + DB_USER + DB_HOST + DB_PASSWORD extras = + db tests coverage deps = @@ -55,6 +59,7 @@ basepython=python changedir=docs skipsdist=true extras = + db tests docs commands=
diff --git a/testapp/settings.py b/testapp/settings.py index 15ab347..adf56b0 100644 --- a/testapp/settings.py +++ b/testapp/settings.py @@ -1,5 +1,6 @@ import os +from log_outgoing_requests.datastructures import ContentType from log_outgoing_requests.formatters import HttpFormatter BASE_DIR = os.path.abspath(os.path.dirname(__file__)) @@ -16,6 +17,7 @@ INSTALLED_APPS = [ "django.contrib.contenttypes", "django.contrib.auth", + "django.contrib.messages", "django.contrib.sessions", "django.contrib.admin", "log_outgoing_requests", @@ -91,6 +93,16 @@ # LOG OUTGOING REQUESTS # LOG_OUTGOING_REQUESTS_DB_SAVE = True +LOG_OUTGOING_REQUESTS_DB_SAVE_BODY = True +LOG_OUTGOING_REQUESTS_CONTENT_TYPES = [ + ContentType(pattern="application/json", default_encoding="utf-8"), + ContentType(pattern="application/soap+xml", default_encoding="utf-8"), + ContentType(pattern="application/xml", default_encoding="utf-8"), + ContentType(pattern="text/xml", default_encoding="iso-8859-1"), + ContentType(pattern="text/*", default_encoding="utf-8"), +] +LOG_OUTGOING_REQUESTS_EMIT_BODY = True +LOG_OUTGOING_REQUESTS_MAX_CONTENT_LENGTH = 524_288 # 0.5 MB ROOT_URLCONF = "testapp.urls" diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..9762ff1 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,59 @@ +"""Global pytest fixtures""" + +import pytest + +from log_outgoing_requests.datastructures import ContentType + + +# +# default settings +# [email protected] +def default_settings(settings): + settings.LOG_OUTGOING_REQUESTS_CONTENT_TYPES = [ + ContentType(pattern="application/json", default_encoding="utf-8"), + ContentType(pattern="application/soap+xml", default_encoding="utf-8"), + ContentType(pattern="application/xml", default_encoding="utf-8"), + ContentType(pattern="text/xml", default_encoding="iso-8859-1"), + ContentType(pattern="text/*", default_encoding="utf-8"), + ] + return settings + + +# +# requests data +# [email protected] +def request_mock_kwargs(): + return { + "url": "http://example.com:8000/some-path?version=2.0", + "status_code": 200, + "json": {"test": "response data"}, + "request_headers": { + "Authorization": "test", + "Content-Type": "application/json", + "Content-Length": "24", + }, + "headers": { + "Date": "Tue, 21 Mar 2023 15:24:08 GMT", + "Content-Type": "application/json", + "Content-Length": "25", + }, + } + + [email protected] +def request_mock_kwargs_binary(): + return { + "url": "http://example.com:8000/some-path?version=2.0", + "status_code": 200, + "content": b"{\x03\xff\x00d", + "request_headers": { + "Authorization": "test", + "Content-Type": "binary", + }, + "headers": { + "Date": "Tue, 21 Mar 2023 15:24:08 GMT", + "Content-Type": "binary", + }, + } diff --git a/tests/test_admin.py b/tests/test_admin.py new file mode 100644 index 0000000..5238036 --- /dev/null +++ b/tests/test_admin.py @@ -0,0 +1,96 @@ +"""Tests for the admin interface""" + +from django.urls import reverse +from django.utils import timezone + +import pytest +import requests +from pyquery import PyQuery + +from log_outgoing_requests.models import OutgoingRequestsLog, OutgoingRequestsLogConfig + + +# +# test display +# [email protected]_db +def test_decoded_content_display(admin_client): + """Assert that decoded request/response bodies are properly displayed""" + + log = OutgoingRequestsLog.objects.create( + id=1, + req_body=b"I'm a lumberjack and I'm okay.", + res_body=b"I sleep all night and work all day.", + timestamp=timezone.now(), + ) + url = reverse( + "admin:log_outgoing_requests_outgoingrequestslog_change", args=(log.pk,) + ) + + response = admin_client.get(url) + assert response.status_code == 200 + + html = response.content.decode("utf-8") + doc = PyQuery(html) + request_body = doc.find(".request-body-decoded").text() + response_body = doc.find(".response-body-decoded").text() + + assert request_body == "I'm a lumberjack and I'm okay." + assert response_body == "I sleep all night and work all day." + + +# +# test override of settings +# [email protected]_db +def test_admin_override_db_save(requests_mock, request_mock_kwargs): + """Assert that saving logs can be disabled in admin""" + + config = OutgoingRequestsLogConfig.get_solo() + config.save_to_db = "no" + config.save() + + requests_mock.get(**request_mock_kwargs) + requests.get( + request_mock_kwargs["url"], headers=request_mock_kwargs["request_headers"] + ) + + request_log = OutgoingRequestsLog.objects.last() + + assert request_log is None + + [email protected]_db +def test_admin_override_save_body(requests_mock, request_mock_kwargs): + """Assert that saving body can be disabled in admin""" + + config = OutgoingRequestsLogConfig.get_solo() + config.save_body = "no" + config.save() + + requests_mock.get(**request_mock_kwargs) + requests.get( + request_mock_kwargs["url"], headers=request_mock_kwargs["request_headers"] + ) + + request_log = OutgoingRequestsLog.objects.last() + + assert request_log.res_body == b"" + + [email protected]_db +def test_admin_override_max_content_length(requests_mock, request_mock_kwargs): + """Assert that `max_content_length` can be overriden in admin""" + + config = OutgoingRequestsLogConfig.get_solo() + config.max_content_length = "10" + config.save() + + requests_mock.get(**request_mock_kwargs) + requests.get( + request_mock_kwargs["url"], headers=request_mock_kwargs["request_headers"] + ) + + request_log = OutgoingRequestsLog.objects.last() + + assert request_log.res_body == b"" diff --git a/tests/test_formatter.py b/tests/test_formatter.py new file mode 100644 index 0000000..ded4c45 --- /dev/null +++ b/tests/test_formatter.py @@ -0,0 +1,46 @@ +"""Tests for the HttpFormatter helper class""" + +import logging + +import pytest +import requests + +from log_outgoing_requests.formatters import HttpFormatter + + [email protected]_db [email protected]( + "log_body, expected", + [ + (True, True), + (False, False), + ], +) +def test_formatter( + requests_mock, + request_mock_kwargs, + caplog, + settings, + log_body, + expected, +): + """Assert that request/response bodies are (not) saved if setting is enabled (disabled)""" + + settings.LOG_OUTGOING_REQUESTS_EMIT_BODY = log_body + + formatter = HttpFormatter() + + with caplog.at_level(logging.DEBUG): + requests_mock.post(**request_mock_kwargs) + requests.post( + request_mock_kwargs["url"], + headers=request_mock_kwargs["request_headers"], + json={"test": "request data"}, + ) + + record = caplog.records[1] + + res = formatter.formatMessage(record) + + assert ('{"test": "request data"}' in res) is expected + assert ('{"test": "response data"}' in res) is expected diff --git a/tests/test_logging.py b/tests/test_logging.py index e0f8dd2..f926e01 100644 --- a/tests/test_logging.py +++ b/tests/test_logging.py @@ -1,141 +1,244 @@ -from django.test import TestCase, override_settings +"""Integration tests for the core functionality of the library""" +import logging + +import pytest import requests -import requests_mock from freezegun import freeze_time +from log_outgoing_requests.datastructures import ContentType from log_outgoing_requests.models import OutgoingRequestsLog -@requests_mock.Mocker() +# +# Local pytest fixtures +# [email protected]() +def request_variants(requests_mock): + return [ + ("GET", requests.get, requests_mock.get), + ("POST", requests.post, requests_mock.post), + ("PUT", requests.put, requests_mock.put), + ("PATCH", requests.patch, requests_mock.patch), + ("DELETE", requests.delete, requests_mock.delete), + ("HEAD", requests.head, requests_mock.head), + ] + + [email protected]() +def expected_headers(): + return ( + f"User-Agent: python-requests/{requests.__version__}\n" + "Accept-Encoding: gzip, deflate\n" + "Accept: */*\n" + "Connection: keep-alive\n" + "Authorization: ***hidden***\n" + "Content-Type: application/json\n" + "Content-Length: 24" + ) + + +# +# Tests +# [email protected]_db +def test_data_is_logged(requests_mock, request_mock_kwargs, caplog): + with caplog.at_level(logging.DEBUG): + requests_mock.get(**request_mock_kwargs) + requests.get( + request_mock_kwargs["url"], headers=request_mock_kwargs["request_headers"] + ) + + records = caplog.records + assert records[1].levelname == "DEBUG" + assert records[1].name == "requests" + assert records[1].msg == "Outgoing request" + + [email protected]_db @freeze_time("2021-10-18 13:00:00") -class OutgoingRequestsLoggingTests(TestCase): - def _setUpMocks(self, m): - m.get( - "http://example.com/some-path?version=2.0", - status_code=200, - content=b"some content", +def test_data_is_saved(request_mock_kwargs, request_variants, expected_headers): + for method, request_func, request_mock in request_variants: + request_mock(**request_mock_kwargs) + response = request_func( + request_mock_kwargs["url"], + headers=request_mock_kwargs["request_headers"], + json={"test": "request data"}, ) - @override_settings(LOG_OUTGOING_REQUESTS_DB_SAVE=True) - def test_outgoing_requests_are_logged(self, m): - self._setUpMocks(m) - - with self.assertLogs("requests", level="DEBUG") as logs: - requests.get("http://example.com/some-path?version=2.0") - - self.assertEqual(logs.output, ["DEBUG:requests:Outgoing request"]) - self.assertEqual(logs.records[0].name, "requests") - self.assertEqual(logs.records[0].getMessage(), "Outgoing request") - self.assertEqual(logs.records[0].levelname, "DEBUG") - - @override_settings(LOG_OUTGOING_REQUESTS_DB_SAVE=True) - def test_expected_data_is_saved_when_saving_enabled(self, m): - methods = [ - ("GET", requests.get, m.get), - ("POST", requests.post, m.post), - ("PUT", requests.put, m.put), - ("PATCH", requests.patch, m.patch), - ("DELETE", requests.delete, m.delete), - ("HEAD", requests.head, m.head), - ] - - for method, func, mocked in methods: - with self.subTest(): - mocked( - "http://example.com/some-path?version=2.0", - status_code=200, - json={"test": "data"}, - request_headers={ - "Authorization": "test", - "Content-Type": "text/html", - }, - headers={ - "Date": "Tue, 21 Mar 2023 15:24:08 GMT", - "Content-Type": "application/json", - }, - ) - expected_req_headers = ( - f"User-Agent: python-requests/{requests.__version__}\n" - "Accept-Encoding: gzip, deflate\n" - "Accept: */*\n" - "Connection: keep-alive\n" - "Authorization: ***hidden***\n" - "Content-Type: text/html" - ) - if method not in ["HEAD", "GET"]: - expected_req_headers += "\nContent-Length: 0" - - response = func( - "http://example.com/some-path?version=2.0", - headers={"Authorization": "test", "Content-Type": "text/html"}, - ) - - request_log = OutgoingRequestsLog.objects.last() - - self.assertEqual( - request_log.url, "http://example.com/some-path?version=2.0" - ) - self.assertEqual(request_log.hostname, "example.com") - self.assertEqual(request_log.params, "") - self.assertEqual(request_log.query_params, "version=2.0") - self.assertEqual(response.status_code, 200) - self.assertEqual(request_log.method, method) - self.assertEqual(request_log.req_content_type, "text/html") - self.assertEqual(request_log.res_content_type, "application/json") - self.assertEqual(request_log.response_ms, 0) - self.assertEqual(request_log.req_headers, expected_req_headers) - self.assertEqual( - request_log.res_headers, - "Date: Tue, 21 Mar 2023 15:24:08 GMT\nContent-Type: application/json", - ) - self.assertEqual( - request_log.timestamp.strftime("%Y-%m-%d %H:%M:%S"), - "2021-10-18 13:00:00", - ) - self.assertIsNone(request_log.trace) - - @override_settings(LOG_OUTGOING_REQUESTS_DB_SAVE=True) - def test_authorization_header_is_hidden(self, m): - self._setUpMocks(m) + assert response.status_code == 200 - requests.get( - "http://example.com/some-path?version=2.0", - headers={"Authorization": "test"}, + request_log = OutgoingRequestsLog.objects.last() + + assert request_log.method == method + assert request_log.hostname == "example.com:8000" + assert request_log.params == "" + assert request_log.query_params == "version=2.0" + assert request_log.response_ms == 0 + assert request_log.trace == "" + assert str(request_log) == "example.com:8000 at 2021-10-18 13:00:00+00:00" + assert ( + request_log.timestamp.strftime("%Y-%m-%d %H:%M:%S") == "2021-10-18 13:00:00" + ) + # headers + assert request_log.req_headers == expected_headers + assert ( + request_log.res_headers == "Date: Tue, 21 Mar 2023 15:24:08 GMT\n" + "Content-Type: application/json\nContent-Length: 25" + ) + # request body + assert request_log.req_content_type == "application/json" + assert bytes(request_log.req_body) == b'{"test": "request data"}' + assert request_log.req_body_encoding == "utf-8" + # response body + assert request_log.res_content_type == "application/json" + assert bytes(request_log.res_body) == b'{"test": "response data"}' + assert request_log.res_body_encoding == "utf-8" + + +# +# test decoding of binary content +# [email protected]( + "content, encoding, expected", + [ + (b"test\x03\xff\xff{\x03}", "utf-8", "test\x03��{\x03}"), + (b"test\x03\xff\xff{\x03}", "utx-99", "test\x03��{\x03}"), + (b"test{\x03\xff\xff\x00d", "", "test{\x03��\x00d"), + ], +) [email protected]_db +def test_decoding_of_binary_content( + content, encoding, expected, requests_mock, request_mock_kwargs_binary, settings +): + """ + Assert that decoding of binary contents works with: + - correct encoding + - wrong (e.g. misspelled) encoding + - missing encoding + """ + settings.LOG_OUTGOING_REQUESTS_CONTENT_TYPES = [ + ContentType(pattern="binary", default_encoding=encoding) + ] + + request_mock_kwargs_binary["content"] = content + + requests_mock.post(**request_mock_kwargs_binary) + response = requests.post( + request_mock_kwargs_binary["url"], + headers=request_mock_kwargs_binary["request_headers"], + data=content, + ) + + assert response.status_code == 200 + + request_log = OutgoingRequestsLog.objects.last() + + assert request_log.response_body_decoded == expected + assert request_log.request_body_decoded == expected + + [email protected]_db +def test_authorization_header_is_hidden(requests_mock, request_mock_kwargs): + requests_mock.get(**request_mock_kwargs) + requests.get( + request_mock_kwargs["url"], headers=request_mock_kwargs["request_headers"] + ) + + log = OutgoingRequestsLog.objects.get() + + assert "Authorization: ***hidden***" in log.req_headers + + [email protected]_db +def test_disable_save_db(request_mock_kwargs, request_variants, caplog, settings): + """Assert that data is logged but not saved to DB when setting is disabled""" + + settings.LOG_OUTGOING_REQUESTS_DB_SAVE = False + + for method, request_func, request_mock in request_variants: + with caplog.at_level(logging.DEBUG): + request_mock(**request_mock_kwargs) + response = request_func( + request_mock_kwargs["url"], + headers=request_mock_kwargs["request_headers"], + json={"test": "request data"}, + ) + + assert response.status_code == 200 + + # data is logged + records = caplog.records + assert records[1].levelname == "DEBUG" + assert records[1].name == "requests" + assert records[1].msg == "Outgoing request" + + # data is not saved + assert OutgoingRequestsLog.objects.exists() is False + + [email protected]_db +def test_disable_save_body(request_mock_kwargs, request_variants, settings): + """Assert that request/response bodies are not saved when setting is disabled""" + + settings.LOG_OUTGOING_REQUESTS_DB_SAVE_BODY = False + + for method, request_func, request_mock in request_variants: + request_mock(**request_mock_kwargs) + response = request_func( + request_mock_kwargs["url"], + headers=request_mock_kwargs["request_headers"], + json={"test": "request data"}, ) - log = OutgoingRequestsLog.objects.get() - self.assertIn("Authorization: ***hidden***", log.req_headers) + assert response.status_code == 200 - @override_settings(LOG_OUTGOING_REQUESTS_DB_SAVE=False) - def test_data_is_not_saved_when_saving_disabled(self, m): - self._setUpMocks(m) + request_log = OutgoingRequestsLog.objects.last() - with self.assertLogs("requests", level="DEBUG") as logs: - requests.get("http://example.com/some-path?version=2.0") + assert bytes(request_log.req_body) == b"" + assert bytes(request_log.res_body) == b"" - self.assertEqual(logs.output, ["DEBUG:requests:Outgoing request"]) - self.assertEqual(logs.records[0].name, "requests") - self.assertEqual(logs.records[0].getMessage(), "Outgoing request") - self.assertEqual(logs.records[0].levelname, "DEBUG") - self.assertFalse(OutgoingRequestsLog.objects.exists()) - @override_settings(LOG_OUTGOING_REQUESTS_DB_SAVE=False) - def test_outgoing_requests_are_logged_when_saving_disabled(self, m): - self._setUpMocks(m) [email protected]_db +def test_content_type_not_allowed(request_mock_kwargs, request_variants, settings): + """Assert that request/response bodies are not saved when content type is not allowed""" - with self.assertLogs("requests", level="DEBUG") as logs: - requests.get("http://example.com/some-path?version=2.0") + settings.LOG_OUTGOING_REQUESTS_CONTENT_TYPES = [ + ContentType(pattern="text/*", default_encoding="utf-8") + ] - self.assertEqual(logs.output, ["DEBUG:requests:Outgoing request"]) - self.assertEqual(logs.records[0].name, "requests") - self.assertEqual(logs.records[0].getMessage(), "Outgoing request") - self.assertEqual(logs.records[0].levelname, "DEBUG") + for method, request_func, request_mock in request_variants: + request_mock(**request_mock_kwargs) + response = request_func( + request_mock_kwargs["url"], + headers=request_mock_kwargs["request_headers"], + json={"test": "request data"}, + ) + + assert response.status_code == 200 + + request_log = OutgoingRequestsLog.objects.last() + + assert bytes(request_log.req_body) == b"" + assert bytes(request_log.res_body) == b"" + + [email protected]_db +def test_content_length_exceeded(request_mock_kwargs, request_variants, settings): + """Assert that body is not saved when content-length exceeds pre-defined max""" + + settings.LOG_OUTGOING_REQUESTS_MAX_CONTENT_LENGTH = 10 + + for method, request_func, request_mock in request_variants: + request_mock(**request_mock_kwargs) + response = request_func( + request_mock_kwargs["url"], + headers=request_mock_kwargs["request_headers"], + json={"test": "request data"}, + ) - @override_settings(LOG_OUTGOING_REQUESTS_DB_SAVE=False) - def test_request_data_is_not_saved_when_saving_disabled(self, m): - self._setUpMocks(m) + assert response.status_code == 200 - requests.get("http://example.com/some-path?version=2.0") + request_log = OutgoingRequestsLog.objects.last() - self.assertFalse(OutgoingRequestsLog.objects.exists()) + assert bytes(request_log.res_body) == b"" diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 0000000..be1bd45 --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,167 @@ +"""Tests for the utility functions""" + +import logging + +import pytest +import requests + +from log_outgoing_requests.datastructures import ContentType +from log_outgoing_requests.models import OutgoingRequestsLogConfig +from log_outgoing_requests.utils import ( + check_content_length, + check_content_type, + get_default_encoding, + parse_content_type_header, +) + + +# +# test check_content_length +# [email protected]( + "max_content_length, expected", + [ + (1048, True), + (12, False), + ], +) [email protected]_db +def test_check_content_length( + max_content_length, + requests_mock, + request_mock_kwargs, + expected, +): + config = OutgoingRequestsLogConfig.objects.create( + max_content_length=max_content_length, + ) + + # we check the functionality of determining missing content length + # only for the response (the requests library automatically inserts + # the content length for the request) + del request_mock_kwargs["headers"]["Content-Length"] + + mock = requests_mock.post(**request_mock_kwargs) + response = requests.post( + request_mock_kwargs["url"], + headers=request_mock_kwargs["request_headers"], + json={"test": "request data"}, + ) + + assert response.status_code == 200 + + result_request = check_content_length(mock.last_request, config=config) + assert result_request is expected + + result_response = check_content_length(response, config=config) + assert result_response is expected + + +# +# test parse_content_type_header +# [email protected]( + "content_type_header, expected_content_type, expected_encoding", + [ + ("text/xml; charset=us-ascii", "text/xml", "us-ascii"), + ("text/xml", "text/xml", ""), + ("application/json", "application/json", ""), + ("", "", ""), + ], +) [email protected]_db +def test_parse_content_type_header( + content_type_header, + requests_mock, + request_mock_kwargs, + expected_content_type, + expected_encoding, +): + request_mock_kwargs["request_headers"]["Content-Type"] = content_type_header + request_mock_kwargs["headers"]["Content-Type"] = content_type_header + + mock = requests_mock.post(**request_mock_kwargs) + response = requests.post( + request_mock_kwargs["url"], + headers=request_mock_kwargs["request_headers"], + json={"test": "request data"}, + ) + + assert response.status_code == 200 + + # check request + parsed_request_header = parse_content_type_header(mock.last_request) + assert parsed_request_header[0] == expected_content_type + assert parsed_request_header[1] == expected_encoding + + # check response + parsed_response_header = parse_content_type_header(response) + assert parsed_response_header[0] == expected_content_type + assert parsed_response_header[1] == expected_encoding + + +# +# test check_content_type +# [email protected]( + "allowed_content_types, content_type_pattern, expected", + [ + ([ContentType("text/xml", "iso-8859-1")], "text/xml", True), + ([ContentType("text/xml", "iso-8859-1")], "text/html", False), + ([ContentType("text/xml", "iso-8859-1")], "video/mp4", False), + ([ContentType("text/*", "utf-8")], "text/html", True), + ], +) [email protected]_db +def test_check_content_type( + allowed_content_types, + content_type_pattern, + expected, + settings, +): + settings.LOG_OUTGOING_REQUESTS_CONTENT_TYPES = allowed_content_types + + result = check_content_type(content_type_pattern) + assert result is expected + + +# +# test get_default_encoding +# [email protected]( + "content_type_pattern, expected", + [ + ("text/html", "utf-8"), + ("text/xml", "iso-8859-1"), + ("application/json", "utf-8"), + ("application/unknown", ""), + ], +) [email protected]_db +def test_get_default_encoding( + content_type_pattern, + expected, +): + result = get_default_encoding(content_type_pattern) + assert result == expected + + [email protected]_db +def test_logger_warning_missing_content_length( + requests_mock, request_mock_kwargs, caplog +): + del request_mock_kwargs["request_headers"]["Content-Length"] + + with caplog.at_level(logging.DEBUG): + requests_mock.get(**request_mock_kwargs) + requests.get( + request_mock_kwargs["url"], headers=request_mock_kwargs["request_headers"] + ) + + records = caplog.records + assert records[1].levelname == "WARNING" + assert records[1].name == "log_outgoing_requests.utils" + assert ( + records[1].msg + == "Content length of the request/response (request netloc: example.com:8000) could not be determined." + )
Request: Display body of request (i'll skip the part about security/privacy) Just the outgoing request doesn't tell the whole story, especially in a system where requests to a single end-point accepts all kinds of request like a ESB. These endpoints can respond with a 200, but in fact return a exception in the body. Wishes for this feature: - [x] Enable body logging for x minutes - [x] Purge body logs after x time - [ ] Style body syntax - [ ] Show a "copy" button - [ ] Show a "delete" button
2023-05-10T06:30:24
-1.0
nazebzurati/jeng
3
nazebzurati__jeng-3
['2']
df3ceb80a63e6767b1a24115116328cea1c744c5
diff --git a/MANIFEST.in b/MANIFEST.in index ef0fb69..9eac73d 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,8 @@ +include *.in include *.md +include *.properties include pytest.ini +recursive-include src *.WSDL +recursive-include tests *.csv recursive-include tests *.py recursive-include tests *.xml diff --git a/setup.py b/setup.py index c3ad5a9..5231652 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setup( name="jeng", - version="0.0.4", + version="0.0.6", license="MIT", description="A simple WITSML client with utilities.", long_description=long_description, @@ -15,6 +15,7 @@ url="https://github.com/nazebzurati/jeng", packages=find_packages("src"), package_dir={"": "src"}, + include_package_data=True, py_modules=[ "jeng", ], diff --git a/src/jeng/client.py b/src/jeng/client.py index dfdf548..74d7691 100644 --- a/src/jeng/client.py +++ b/src/jeng/client.py @@ -1,3 +1,5 @@ +import os + import requests import urllib3 from requests import Session @@ -5,6 +7,7 @@ from zeep import Client, proxy, xsd from zeep.transports import Transport +import jeng from jeng import exception # disabling urllib warnings @@ -16,12 +19,13 @@ class WitsmlClient: def __init__(self): self.__client = None + self.__service = None self.__session = Session() def __test(self): # exception will be caught by function caller return ( - self.__client.service.WMLS_GetBaseMsg( + self.__service.WMLS_GetBaseMsg( ReturnValueIn=1, ) ).strip() == "Function completed successfully" @@ -38,7 +42,7 @@ def connect( Parameters ---------- url : str - WSDL from a WITSML Store web service URL (usually ends with '?wsdl') + WITSML Store service endpoint username : str Username for user authentication password : str @@ -49,15 +53,20 @@ def connect( bool Status of the connection (True is OK) """ + wsdl_file_path = os.path.join(jeng.__path__[0], "xml", "WMLS.WSDL") + witsml_binding_uri = "{http://www.witsml.org/wsdl/120}StoreSoapBinding" self.__session.auth = HTTPBasicAuth(username, password) try: - self.__client = Client(url, transport=Transport(session=self.__session)) + self.__client = Client(transport=Transport(session=self.__session), wsdl=wsdl_file_path) + self.__service = self.__client.create_service(witsml_binding_uri, url) return self.__test() except requests.exceptions.SSLError: self.__session.verify = False - self.__client = Client(url, transport=Transport(session=self.__session)) + self.__client = Client(transport=Transport(session=self.__session), wsdl=wsdl_file_path) + self.__service = self.__client.create_service(witsml_binding_uri, url) return self.__test() - except Exception: + except Exception as e: + print(str(e)) return False def service(self) -> proxy.ServiceProxy: @@ -69,7 +78,7 @@ def service(self) -> proxy.ServiceProxy: zeep.proxy.ServiceProxy Service proxy for calling API functions """ - return self.__client.service + return self.__service def get_from_store( self, @@ -95,7 +104,7 @@ def get_from_store( API call reply """ try: - return self.__client.service.WMLS_GetFromStore( + return self.__service.WMLS_GetFromStore( WMLtypeIn=wml_type_in, QueryIn=xml_in, OptionsIn=f"returnElements={return_element}", @@ -125,7 +134,7 @@ def add_to_store( API call reply """ try: - return self.__client.service.WMLS_AddToStore( + return self.__service.WMLS_AddToStore( WMLtypeIn=wml_type_in, XMLin=xml_in, OptionsIn=xsd.SkipValue, @@ -155,7 +164,7 @@ def update_in_store( API call reply. """ try: - return self.__client.service.WMLS_UpdateInStore( + return self.__service.WMLS_UpdateInStore( WMLtypeIn=wml_type_in, XMLin=xml_in, OptionsIn=xsd.SkipValue, @@ -185,7 +194,7 @@ def delete_from_store( API call reply. """ try: - return self.__client.service.WMLS_DeleteFromStore( + return self.__service.WMLS_DeleteFromStore( WMLtypeIn=wml_type_in, QueryIn=xml_in, OptionsIn=xsd.SkipValue, diff --git a/src/jeng/xml/WMLS.WSDL b/src/jeng/xml/WMLS.WSDL new file mode 100644 index 0000000..dbf2694 --- /dev/null +++ b/src/jeng/xml/WMLS.WSDL @@ -0,0 +1,243 @@ +<?xml version='1.0' encoding='UTF-8'?> + +<definitions name='WMLS' targetNamespace='http://www.witsml.org/wsdl/120' + xmlns:wsdlns='http://www.witsml.org/wsdl/120' + xmlns:soap='http://schemas.xmlsoap.org/wsdl/soap/' + xmlns:xsd='http://www.w3.org/2001/XMLSchema' + xmlns='http://schemas.xmlsoap.org/wsdl/'> + + <documentation>WITSML Version 1.2.0 STORE interface WSDL file</documentation> + + <!-- Abstract Definitions Section - <types>, <message> and <portType> elements --> + + <!-- <types> element declares user-defined machine and language independent data types, and is + not needed for the WITSML STORE interface, which uses only W3C-defined data types --> + + <!-- <message> elements define request/response messages and their parameters--> + + <message name='Store.WMLS_AddToStore'> + <part name='WMLtypeIn' type='xsd:string' /> + <part name='XMLin' type='xsd:string' /> + <part name='OptionsIn' type='xsd:string' /> + <part name='CapabilitiesIn' type='xsd:string' /> + </message> + <message name='Store.WMLS_AddToStoreResponse'> + <part name='Result' type='xsd:short' /> + <part name='SuppMsgOut' type='xsd:string' /> + </message> + + <message name='Store.WMLS_DeleteFromStore'> + <part name='WMLtypeIn' type='xsd:string' /> + <part name='QueryIn' type='xsd:string' /> + <part name='OptionsIn' type='xsd:string' /> + <part name='CapabilitiesIn' type='xsd:string' /> + </message> + <message name='Store.WMLS_DeleteFromStoreResponse'> + <part name='Result' type='xsd:short' /> + <part name='SuppMsgOut' type='xsd:string' /> + </message> + + <message name='Store.WMLS_GetBaseMsg'> + <part name='ReturnValueIn' type='xsd:short' /> + </message> + <message name='Store.WMLS_GetBaseMsgResponse'> + <part name='Result' type='xsd:string' /> + </message> + + <message name='Store.WMLS_GetCap'> + <part name='OptionsIn' type='xsd:string' /> + </message> + <message name='Store.WMLS_GetCapResponse'> + <part name='Result' type='xsd:short' /> + <part name='CapabilitiesOut' type='xsd:string' /> + <part name='SuppMsgOut' type='xsd:string' /> + </message> + + <message name='Store.WMLS_GetFromStore'> + <part name='WMLtypeIn' type='xsd:string' /> + <part name='QueryIn' type='xsd:string' /> + <part name='OptionsIn' type='xsd:string' /> + <part name='CapabilitiesIn' type='xsd:string' /> + </message> + <message name='Store.WMLS_GetFromStoreResponse'> + <part name='Result' type='xsd:short' /> + <part name='XMLout' type='xsd:string' /> + <part name='SuppMsgOut' type='xsd:string' /> + </message> + + <message name='Store.WMLS_GetVersion'> + </message> + <message name='Store.WMLS_GetVersionResponse'> + <part name='Result' type='xsd:string' /> + </message> + + <message name='Store.WMLS_UpdateInStore'> + <part name='WMLtypeIn' type='xsd:string' /> + <part name='XMLin' type='xsd:string' /> + <part name='OptionsIn' type='xsd:string' /> + <part name='CapabilitiesIn' type='xsd:string' /> + </message> + <message name='Store.WMLS_UpdateInStoreResponse'> + <part name='Result' type='xsd:short' /> + <part name='SuppMsgOut' type='xsd:string' /> + </message> + + <!-- <portType> element groups the functions (operations) into an interface --> + + <portType name='StoreSoapPort'> + + <!-- <operation> elements define the function signatures (operation name and parameters) + and associate the input and output messages --> + + <!-- parameterOrder attribute values must be separated by a single space --> + + <operation name='WMLS_AddToStore' + parameterOrder='WMLtypeIn XMLin OptionsIn CapabilitiesIn SuppMsgOut'> + <input message='wsdlns:Store.WMLS_AddToStore' /> + <output message='wsdlns:Store.WMLS_AddToStoreResponse' /> + </operation> + + <operation name='WMLS_DeleteFromStore' + parameterOrder='WMLtypeIn QueryIn OptionsIn CapabilitiesIn SuppMsgOut'> + <input message='wsdlns:Store.WMLS_DeleteFromStore' /> + <output message='wsdlns:Store.WMLS_DeleteFromStoreResponse' /> + </operation> + + <operation name='WMLS_GetBaseMsg' parameterOrder='ReturnValueIn'> + <input message='wsdlns:Store.WMLS_GetBaseMsg' /> + <output message='wsdlns:Store.WMLS_GetBaseMsgResponse' /> + </operation> + + <operation name='WMLS_GetCap' + parameterOrder='OptionsIn CapabilitiesOut SuppMsgOut'> + <input message='wsdlns:Store.WMLS_GetCap' /> + <output message='wsdlns:Store.WMLS_GetCapResponse' /> + </operation> + + <operation name='WMLS_GetFromStore' + parameterOrder='WMLtypeIn QueryIn OptionsIn CapabilitiesIn XMLout SuppMsgOut'> + <input message='wsdlns:Store.WMLS_GetFromStore' /> + <output message='wsdlns:Store.WMLS_GetFromStoreResponse' /> + </operation> + + <operation name='WMLS_GetVersion'> + <input message='wsdlns:Store.WMLS_GetVersion' /> + <output message='wsdlns:Store.WMLS_GetVersionResponse' /> + </operation> + + <operation name='WMLS_UpdateInStore' + parameterOrder='WMLtypeIn XMLin OptionsIn CapabilitiesIn SuppMsgOut'> + <input message='wsdlns:Store.WMLS_UpdateInStore' /> + <output message='wsdlns:Store.WMLS_UpdateInStoreResponse' /> + </operation> + + </portType> + + <!-- Concrete Definitions Section - <binding> and <service> elements --> + + <!-- <binding> specifies the protocol binding for each operation in the <portType> section --> + + <binding name='StoreSoapBinding' type='wsdlns:StoreSoapPort'> + + <soap:binding style='rpc' transport='http://schemas.xmlsoap.org/soap/http' /> + + <operation name='WMLS_AddToStore'> + <soap:operation + soapAction='http://www.witsml.org/action/120/Store.WMLS_AddToStore' /> + <input> + <soap:body use='encoded' namespace='http://www.witsml.org/message/120' + encodingStyle='http://schemas.xmlsoap.org/soap/encoding/' /> + </input> + <output> + <soap:body use='encoded' namespace='http://www.witsml.org/message/120' + encodingStyle='http://schemas.xmlsoap.org/soap/encoding/' /> + </output> + </operation> + + <operation name='WMLS_DeleteFromStore'> + <soap:operation + soapAction='http://www.witsml.org/action/120/Store.WMLS_DeleteFromStore' /> + <input> + <soap:body use='encoded' namespace='http://www.witsml.org/message/120' + encodingStyle='http://schemas.xmlsoap.org/soap/encoding/' /> + </input> + <output> + <soap:body use='encoded' namespace='http://www.witsml.org/message/120' + encodingStyle='http://schemas.xmlsoap.org/soap/encoding/' /> + </output> + </operation> + + <operation name='WMLS_GetBaseMsg'> + <soap:operation + soapAction='http://www.witsml.org/action/120/Store.WMLS_GetBaseMsg' /> + <input> + <soap:body use='encoded' namespace='http://www.witsml.org/message/120' + encodingStyle='http://schemas.xmlsoap.org/soap/encoding/' /> + </input> + <output> + <soap:body use='encoded' namespace='http://www.witsml.org/message/120' + encodingStyle='http://schemas.xmlsoap.org/soap/encoding/' /> + </output> + </operation> + + <operation name='WMLS_GetCap'> + <soap:operation + soapAction='http://www.witsml.org/action/120/Store.WMLS_GetCap' /> + <input> + <soap:body use='encoded' namespace='http://www.witsml.org/message/120' + encodingStyle='http://schemas.xmlsoap.org/soap/encoding/' /> + </input> + <output> + <soap:body use='encoded' namespace='http://www.witsml.org/message/120' + encodingStyle='http://schemas.xmlsoap.org/soap/encoding/' /> + </output> + </operation> + + <operation name='WMLS_GetFromStore'> + <soap:operation soapAction='http://www.witsml.org/action/120/Store.WMLS_GetFromStore' /> + <input> + <soap:body use='encoded' namespace='http://www.witsml.org/message/120' + encodingStyle='http://schemas.xmlsoap.org/soap/encoding/' /> + </input> + <output> + <soap:body use='encoded' namespace='http://www.witsml.org/message/120' + encodingStyle='http://schemas.xmlsoap.org/soap/encoding/' /> + </output> + </operation> + + <operation name='WMLS_GetVersion'> + <soap:operation soapAction='http://www.witsml.org/action/120/Store.WMLS_GetVersion' /> + <input> + <soap:body use='encoded' namespace='http://www.witsml.org/message/120' + encodingStyle='http://schemas.xmlsoap.org/soap/encoding/' /> + </input> + <output> + <soap:body use='encoded' namespace='http://www.witsml.org/message/120' + encodingStyle='http://schemas.xmlsoap.org/soap/encoding/' /> + </output> + </operation> + + <operation name='WMLS_UpdateInStore'> + <soap:operation + soapAction='http://www.witsml.org/action/120/Store.WMLS_UpdateInStore' /> + <input> + <soap:body use='encoded' namespace='http://www.witsml.org/message/120' + encodingStyle='http://schemas.xmlsoap.org/soap/encoding/' /> + </input> + <output> + <soap:body use='encoded' namespace='http://www.witsml.org/message/120' + encodingStyle='http://schemas.xmlsoap.org/soap/encoding/' /> + </output> + </operation> + + </binding> + + <!-- <service> specifies the portType for each binding and the URL of the service --> + + <service name='WMLS'> + <port name='StoreSoapPort' binding='wsdlns:StoreSoapBinding'> + <soap:address location='http://yourorg.com/yourwebservice' /> + </port> + </service> + +</definitions> \ No newline at end of file
diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml index 234f971..60dadd5 100644 --- a/.github/workflows/unit-test.yml +++ b/.github/workflows/unit-test.yml @@ -6,8 +6,6 @@ name: Unit test on: push: branches: [ "main" ] - pull_request: - branches: [ "main" ] jobs: build: diff --git a/tests/common.py b/tests/common.py index 045c48b..0c5983b 100644 --- a/tests/common.py +++ b/tests/common.py @@ -5,7 +5,7 @@ from jeng import model from jeng.client import WitsmlClient -QUERY_PATH = "tests/query" +QUERY_PATH = "tests/xml" SAMPLE_PATH = "tests/sample" SAMPLE_TIME_FORMAT = "%H:%M:%S/%d-%b-%Y" CONNECTION_URL = os.environ.get("JENG_CONN_URL") diff --git a/tests/query/log_delete.xml b/tests/xml/log_delete.xml similarity index 100% rename from tests/query/log_delete.xml rename to tests/xml/log_delete.xml diff --git a/tests/query/log_reply_data.xml b/tests/xml/log_reply_data.xml similarity index 100% rename from tests/query/log_reply_data.xml rename to tests/xml/log_reply_data.xml diff --git a/tests/query/log_reply_empty_column.xml b/tests/xml/log_reply_empty_column.xml similarity index 100% rename from tests/query/log_reply_empty_column.xml rename to tests/xml/log_reply_empty_column.xml diff --git a/tests/query/log_reply_empty_columns.xml b/tests/xml/log_reply_empty_columns.xml similarity index 100% rename from tests/query/log_reply_empty_columns.xml rename to tests/xml/log_reply_empty_columns.xml diff --git a/tests/query/log_reply_insufficient_column.xml b/tests/xml/log_reply_insufficient_column.xml similarity index 100% rename from tests/query/log_reply_insufficient_column.xml rename to tests/xml/log_reply_insufficient_column.xml diff --git a/tests/query/log_reply_insufficient_data.xml b/tests/xml/log_reply_insufficient_data.xml similarity index 100% rename from tests/query/log_reply_insufficient_data.xml rename to tests/xml/log_reply_insufficient_data.xml diff --git a/tests/query/log_reply_no_column.xml b/tests/xml/log_reply_no_column.xml similarity index 100% rename from tests/query/log_reply_no_column.xml rename to tests/xml/log_reply_no_column.xml diff --git a/tests/query/log_reply_no_data.xml b/tests/xml/log_reply_no_data.xml similarity index 100% rename from tests/query/log_reply_no_data.xml rename to tests/xml/log_reply_no_data.xml diff --git a/tests/query/log_reply_no_data_value.xml b/tests/xml/log_reply_no_data_value.xml similarity index 100% rename from tests/query/log_reply_no_data_value.xml rename to tests/xml/log_reply_no_data_value.xml diff --git a/tests/query/well_create.xml b/tests/xml/well_create.xml similarity index 100% rename from tests/query/well_create.xml rename to tests/xml/well_create.xml diff --git a/tests/query/well_delete.xml b/tests/xml/well_delete.xml similarity index 100% rename from tests/query/well_delete.xml rename to tests/xml/well_delete.xml diff --git a/tests/query/well_read.xml b/tests/xml/well_read.xml similarity index 100% rename from tests/query/well_read.xml rename to tests/xml/well_read.xml diff --git a/tests/query/well_update.xml b/tests/xml/well_update.xml similarity index 100% rename from tests/query/well_update.xml rename to tests/xml/well_update.xml diff --git a/tests/query/wellbore_create.xml b/tests/xml/wellbore_create.xml similarity index 100% rename from tests/query/wellbore_create.xml rename to tests/xml/wellbore_create.xml diff --git a/tests/query/wellbore_delete.xml b/tests/xml/wellbore_delete.xml similarity index 100% rename from tests/query/wellbore_delete.xml rename to tests/xml/wellbore_delete.xml
Unable to connect server with different WSDL and service endpoint
2022-12-26T04:07:18
-1.0
DiamondLightSource/ispyb-api
46
DiamondLightSource__ispyb-api-46
['3']
09dd224f8a861ab0019b223e1dfb7bcbbc5bfab2
diff --git a/conf/config.example.cfg b/conf/config.example.cfg index bd9d446e..d1d52723 100644 --- a/conf/config.example.cfg +++ b/conf/config.example.cfg @@ -5,4 +5,3 @@ pw = host = localhost port = 3306 db = ispybtest -conn_inactivity = 360 diff --git a/ispyb/__init__.py b/ispyb/__init__.py index 3073240f..377498b1 100644 --- a/ispyb/__init__.py +++ b/ispyb/__init__.py @@ -6,7 +6,7 @@ import ConfigParser as configparser import logging -__version__ = '4.11.1' +__version__ = '4.12.0' _log = logging.getLogger('ispyb') diff --git a/ispyb/connector/mysqlsp/main.py b/ispyb/connector/mysqlsp/main.py index f39556a4..e5ba3fdb 100644 --- a/ispyb/connector/mysqlsp/main.py +++ b/ispyb/connector/mysqlsp/main.py @@ -1,9 +1,9 @@ -import datetime +from __future__ import absolute_import, division, print_function + import os import sys -import traceback import threading -import time +import traceback import ispyb.interface.connection import mysql.connector @@ -17,7 +17,7 @@ class ISPyBMySQLSPConnector(ispyb.interface.connection.IF): def __init__(self, user=None, pw=None, host='localhost', db=None, port=3306, conn_inactivity=360): self.lock = threading.Lock() - self.connect(user=user, pw=pw, host=host, db=db, port=port, conn_inactivity=conn_inactivity) + self.connect(user=user, pw=pw, host=host, db=db, port=port) def __enter__(self): if hasattr(self, 'conn') and self.conn is not None: @@ -30,23 +30,15 @@ def __exit__(self, type, value, traceback): def connect(self, user=None, pw=None, host='localhost', db=None, port=3306, conn_inactivity=360): self.disconnect() - self.user = user - self.pw = pw - self.host = host - self.db = db - self.port = port - self.conn_inactivity = int(conn_inactivity) self.conn = mysql.connector.connect(user=user, password=pw, host=host, database=db, port=int(port)) - if self.conn is not None: - self.conn.autocommit=True - else: - raise ISPyBConnectionException - self.last_activity_ts = time.time() + if not self.conn: + raise ISPyBConnectionException('Could not connect to database') + self.conn.autocommit = True def __del__(self): self.disconnect() @@ -61,17 +53,13 @@ def get_data_area_package(self): return 'ispyb.sp' def create_cursor(self, dictionary=False): - if time.time() - self.last_activity_ts > self.conn_inactivity: - # re-connect: - self.connect(self.user, self.pw, self.host, self.db, self.port) - self.last_activity_ts = time.time() - if self.conn is None: - raise ISPyBConnectionException - - cursor = self.conn.cursor(dictionary=dictionary) - if cursor is None: - raise ISPyBConnectionException - return cursor + if not self.conn: + raise ISPyBConnectionException('Not connected to database') + self.conn.ping(reconnect=True) + cursor = self.conn.cursor(dictionary=dictionary) + if not cursor: + raise ISPyBConnectionException('Could not create database cursor') + return cursor def call_sp_write(self, procname, args): with self.lock: diff --git a/ispyb/model/__future__.py b/ispyb/model/__future__.py index f69d9e2e..0f9367a9 100644 --- a/ispyb/model/__future__.py +++ b/ispyb/model/__future__.py @@ -15,7 +15,7 @@ _db_config = None -def enable(configuration_file): +def enable(configuration_file, section='ispyb'): '''Enable access to features that are currently under development.''' global _db, _db_cc, _db_config @@ -37,19 +37,46 @@ def enable(configuration_file): cfgparser = configparser.RawConfigParser() if not cfgparser.read(configuration_file): raise RuntimeError('Could not read from configuration file %s' % configuration_file) - cfgsection = dict(cfgparser.items('ispyb')) + cfgsection = dict(cfgparser.items(section)) host = cfgsection.get('host') port = cfgsection.get('port', 3306) - database = cfgsection.get('database') - username = cfgsection.get('username') - password = cfgsection.get('password') + database = cfgsection.get('database', cfgsection.get('db')) + username = cfgsection.get('username', cfgsection.get('user')) + password = cfgsection.get('password', cfgsection.get('pw')) # Open a direct MySQL connection _db = mysql.connector.connect(host=host, port=port, user=username, password=password, database=database) _db.autocommit = True - _db_cc = DictionaryContextcursorFactory(_db.cursor) _db_config = configuration_file + class DictionaryCursorContextManager(object): + '''This class creates dictionary cursors for mysql.connector connections. + By using a context manager it is ensured that cursors are closed + immediately after use. + Cursors created with this context manager return results as a dictionary + and offer a .run() function, which is an alias to .execute that accepts + query parameters as function parameters rather than a list. + ''' + + def __enter__(cm): + '''Enter context. Ensure the database is alive and return a cursor + with an extra .run() function.''' + _db.ping(reconnect=True) + cm.cursor = _db.cursor(dictionary=True) + + def flat_execute(stmt, *parameters): + '''Pass all given function parameters as a list to the existing + .execute() function.''' + return cm.cursor.execute(stmt, parameters) + setattr(cm.cursor, 'run', flat_execute) + return cm.cursor + + def __exit__(cm, *args): + '''Leave context. Close cursor. Destroy reference.''' + cm.cursor.close() + cm.cursor = None + _db_cc = DictionaryCursorContextManager + import ispyb.model.datacollection ispyb.model.datacollection.DataCollection.integrations = _get_linked_autoprocintegration_for_dc import ispyb.model.gridinfo @@ -57,52 +84,6 @@ def enable(configuration_file): import ispyb.model.processingprogram ispyb.model.processingprogram.ProcessingProgram.reload = _get_autoprocprogram -class DictionaryContextcursorFactory(object): - '''This class creates dictionary context manager objects for mysql.connector - cursors. By using a context manager it is ensured that cursors are - closed immediately after use. - Context managers created via this factory return results as a dictionary - by default, and offer a .run() function, which is an alias to .execute - that accepts query parameters as function parameters rather than a list. - ''' - - def __init__(self, cursor_factory_function): - '''Set up the context manager factory.''' - - class ContextManager(object): - '''The context manager object which is actually used in the - with .. as ..: - clause.''' - - def __init__(cm, parameters): - '''Store any constructor parameters, given as dictionary, so that they - can be passed to the cursor factory later.''' - cm.cursorparams = { 'dictionary': True } - cm.cursorparams.update(parameters) - - def __enter__(cm): - '''Enter context. Instantiate and return the actual cursor using the - given constructor, parameters, and an extra .run() function.''' - cm.cursor = cursor_factory_function(**cm.cursorparams) - - def flat_execute(stmt, *parameters): - '''Pass all given function parameters as a list to the existing - .execute() function.''' - return cm.cursor.execute(stmt, parameters) - setattr(cm.cursor, 'run', flat_execute) - return cm.cursor - - def __exit__(cm, *args): - '''Leave context. Close cursor. Destroy reference.''' - cm.cursor.close() - cm.cursor = None - - self._contextmanager_factory = ContextManager - - def __call__(self, **parameters): - '''Creates and returns a context manager object.''' - return self._contextmanager_factory(parameters) - def _get_gridinfo(self): # https://jira.diamond.ac.uk/browse/MXSW-1173 with _db_cc() as cursor:
diff --git a/tests/conftest.py b/tests/conftest.py index 6f3fc3f5..75a4f925 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -4,6 +4,7 @@ import os +import ispyb import pytest @pytest.fixture diff --git a/tests/test_misc.py b/tests/test_misc.py index 07977e10..522d16fb 100644 --- a/tests/test_misc.py +++ b/tests/test_misc.py @@ -3,7 +3,10 @@ import threading import context +import ispyb import ispyb.exception +import ispyb.model.__future__ +import mysql.connector.errors import pytest def test_multi_threads_upsert(testconfig): @@ -42,3 +45,45 @@ def test_retrieve_failure(testconfig): with ispyb.open(testconfig) as conn: with pytest.raises(ispyb.exception.ISPyBNoResultException): rs = conn.mx_acquisition.retrieve_data_collection_main(0) + +def test_database_reconnects_on_connection_failure(testconfig, testdb): + ispyb.model.__future__.enable(testconfig, section='ispyb_mysql_sp') + + # Create minimal data collection and data collection group for test + params = testdb.mx_acquisition.get_data_collection_group_params() + params['parentid'] = 55168 + dcgid = testdb.mx_acquisition.insert_data_collection_group(list(params.values())) + assert dcgid, "Could not create dummy data collection group" + params = testdb.mx_acquisition.get_data_collection_params() + params['parentid'] = dcgid + dcid = testdb.mx_acquisition.insert_data_collection(list(params.values())) + assert dcid, "Could not create dummy data collection" + + # Test the database connections + # This goes from DCID to DCGID using the default connection, + # and looks into the GridInfo table using the __future__ connection. + assert bool(testdb.get_data_collection(dcid).group.gridinfo) is False + + fconn = ispyb.model.__future__._db + iconn = testdb.conn + + # Break both connections from the server side + c = iconn.cursor() + with pytest.raises(mysql.connector.errors.DatabaseError): + c.execute("KILL CONNECTION_ID();") + c.close() + + c = fconn.cursor() + with pytest.raises(mysql.connector.errors.DatabaseError): + c.execute("KILL CONNECTION_ID();") + c.close() + + # Confirm both connections are broken + with pytest.raises(mysql.connector.errors.OperationalError): + iconn.cursor() + + with pytest.raises(mysql.connector.errors.OperationalError): + fconn.cursor() + + # Test implicit reconnect + assert bool(testdb.get_data_collection(dcid).group.gridinfo) is False
Protection against DB connection loss Should handle database connection loss by catching the exception and attempt reconnecting. Should possibly 'ping' at set intervals to keep the connection alive.
Re-connect after a certain number of seconds of inactivity: https://github.com/DiamondLightSource/ispyb-api/commit/03056677eec33d4b53103b97a5ada471e14a9f1f
2018-09-24T08:10:25
-1.0
VirologyCharite/gb2seq
12
VirologyCharite__gb2seq-12
['11']
6ae2edf0f5d5599d22b84b42d4e7fcd309f260c4
diff --git a/CHANGELOG.md b/CHANGELOG.md index 88e0d73..be3bcab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.2.22 August 27, 2024 + +Improved dealing with features that have multiple genome ranges to fix +https://github.com/VirologyCharite/gb2seq/issues/11 + ## 0.2.21 August 10, 2023 Added `--translated` option to `bin/describe-genome.py`. This will extract all the features that are translated. diff --git a/bin/describe-feature.py b/bin/describe-feature.py index 9e37dfd..8cbd4a6 100755 --- a/bin/describe-feature.py +++ b/bin/describe-feature.py @@ -69,7 +69,8 @@ def reportGenomeFeature(features, name, alignment, maxSequenceLength, oneBased): should be reported. @param maxSequenceLength: The maximum sequence length to print. Longer sequences will be truncated. Use 0 or C{None} to skip printing sequences. - @param oneBased: If true, print one-based sites instead of zero-based offsets. + @param oneBased: A C{bool}. If true, print one-based sites instead of zero-based + offsets. """ print(f" Genome {alignment.genome.id}:") @@ -90,9 +91,11 @@ def reportGenomeFeature(features, name, alignment, maxSequenceLength, oneBased): absoluteStop = len(gappedSequence[:alignedStop].replace("-", "")) _, genomeNt = alignment.ntSequences(name, raiseOnReferenceGaps=False) - print(f" start: {absoluteStart + bool(oneBased)}") + print(f" start: {absoluteStart + oneBased}") print(f" stop: {absoluteStop}") print(f" length (nt): {len(genomeNt.sequence)}") + print(f" aligned (to ref) start: {alignedStart + oneBased}") + print(f" aligned (to ref) stop: {alignedStop}") if maxSequenceLength: print( diff --git a/gb2seq/__init__.py b/gb2seq/__init__.py index a550e5e..df61e9a 100644 --- a/gb2seq/__init__.py +++ b/gb2seq/__init__.py @@ -2,4 +2,4 @@ class Gb2SeqError(Exception): "A gb2seq library error occurred." -__version__ = "0.2.21" +__version__ = "0.2.22" diff --git a/gb2seq/alignment.py b/gb2seq/alignment.py index 9756801..7a4b667 100644 --- a/gb2seq/alignment.py +++ b/gb2seq/alignment.py @@ -12,7 +12,6 @@ from gb2seq.translate import ( translate, translateSARS2Spike, - TranslationError, TranslatedReferenceAndGenomeLengthError, ) from gb2seq.variants import VARIANTS @@ -641,7 +640,7 @@ def _getChanges( if aa else self.ntSequences(featureName) ) - except TranslationError as e: + except Gb2SeqError as e: if onError == "raise": raise elif onError == "print": diff --git a/gb2seq/features.py b/gb2seq/features.py index 042688e..86c2791 100644 --- a/gb2seq/features.py +++ b/gb2seq/features.py @@ -11,7 +11,7 @@ from importlib_resources import files, as_file -# from warnings import warn +from warnings import warn import json import argparse @@ -255,30 +255,56 @@ def _initializeFromGenBankRecord( if type_ not in alsoInclude: continue + for optional in "translation", "note": + try: + value[optional] = feature.qualifiers[optional][0] + except KeyError: + pass + start = int(feature.location.start) stop = int(feature.location.end) genomeRanges = GenomeRanges(str(feature.location)) + nRanges = len(genomeRanges.ranges) + + if nRanges == 0: + raise ValueError("No genome ranges present for feature {name!r}.") - # We can only handle a single range at the moment. - if len(genomeRanges.ranges) == 1: - assert start == genomeRanges.ranges[0][0] - assert stop == genomeRanges.ranges[0][1] - forward = genomeRanges.ranges[0][2] - elif self.sars2 and name == "ORF1ab polyprotein": - assert len(genomeRanges.ranges) == 2 - assert start == genomeRanges.ranges[0][0] - assert stop == genomeRanges.ranges[1][1] - forward = True + # If we just have one range, check that the given high-level start and stop + # attributes match the start and end of the range. The situation with + # multiple ranges is more complicated (e.g., the HBV polymerase of + # NC_001896.1 starts at 2309 and goes to 1637). + # + # We should probably ignore the "location" start and use the ranges. But + # then we should generalize to be more sophisticate regarding start/stop, + # translation, etc. + if nRanges == 1: + rangeStart, rangeStop = genomeRanges.ranges[0][:2] + assert start == rangeStart, ( + f"Record start offset {start} does not match first genome range " + f"start {rangeStart}." + ) + assert stop == rangeStop, ( + f"Record stop offset {stop} does not match first genome range " + f"stop {rangeStop}." + ) + + directions = set(genomeRange[2] for genomeRange in genomeRanges.ranges) + if len(directions) == 1: + # All ranges have the same orientation. + forward = directions.pop() else: - if not self.sars2: - # At some point (soon) we should emit a warning. But let's first try - # to fix things so we can translate anything. - # - # warn( - # f"Multiple reference genome ranges {genomeRanges} found " - # f"for feature {name!r} will not be translated reliably." - # ) - pass + # The genome ranges have mixed orientations. If there is no translation + # present (from a GenBank record), warn that we do not yet support + # translation for this feature (this would be easy to add - we should do + # it!). + forward = None + + if "translation" not in value: + warn( + f"The reference genome ranges {genomeRanges} " + f"for feature {name!r} do not all have the same orientation. " + f"This feature will not be translated reliably!" + ) sequence = str(record.seq)[start:stop] @@ -292,12 +318,6 @@ def _initializeFromGenBankRecord( } ) - for optional in "translation", "note": - try: - value[optional] = feature.qualifiers[optional][0] - except KeyError: - pass - # If there is a translation, add an amino acid '*' stop # indicator if there is not one already and the sequence ends # with a stop codon. diff --git a/gb2seq/sars2.py b/gb2seq/sars2.py index 344aca3..3b28e81 100644 --- a/gb2seq/sars2.py +++ b/gb2seq/sars2.py @@ -1,9 +1,69 @@ +from gb2seq import Gb2SeqError + + +class NoSlipperySequenceError(Gb2SeqError): + "No slippery sequence could be found in a genome." + + +class NoStopCodonError(Gb2SeqError): + "No stop codon was found downstream from the slippery sequence." + + +class StopCodonTooDistantError(Gb2SeqError): + "The stop codon following the slippery sequence was too far away." + + +# The maximum difference (number of nucleotides) to allow between the +# offset of the start of the slippery sequence and the downstream stop +# codon. +_MAX_DISTANCE_TO_STOP = 20 + +SLIPPERY_SEQUENCE = "TTTAAAC" + +_SLIPPERY_LEN = len(SLIPPERY_SEQUENCE) + + +def getORF1abSequence(seq): + # See Fields Virology (figure 10.6a on page 421, 7th edition or + # figure 28.7a on page 836, 6th edition) plus + # https://www.ncbi.nlm.nih.gov/nuccore/NC_045512 for details of + # what happens below. Note that the nucelotide sequence we are + # passed is the one that's made from the alignment with the + # reference ORF1ab nucleotide sequence (in sequence.py) and so is + # just that ORF and does not include the leading ~265 nucleotides + # of the 5' UTR. As a result, the offset used to begin the search + # for the slippery sequence is 13000, which is chosen to be a bit + # before 13462 - 265. There are various occurrences of the + # slippery sequence in the reference genome (and hence probably in + # other CoV genomes), but only one in this region and with a stop + # codon shortly (up to _MAX_DISTANCE_TO_STOP nucleotides) downstream. + offset = seq.find(SLIPPERY_SEQUENCE, 13000) + stop = seq.find("TAA", offset + _SLIPPERY_LEN) + if offset == -1: + raise NoSlipperySequenceError("No slippery sequence found.") + if stop == -1: + raise NoStopCodonError( + f"Could not find a stop codon downstream from the start of " + f"the slippery sequence at site {offset + 1}." + ) + if stop - offset > _MAX_DISTANCE_TO_STOP: + raise StopCodonTooDistantError( + f"The stop codon was too far ({stop - offset} nucleotides) " + f"downstream (max allowed distance is " + f"{_MAX_DISTANCE_TO_STOP}) from the start of the slippery " + f"sequence at site {offset + 1}." + ) + + return seq[: offset + _SLIPPERY_LEN] + seq[offset + _SLIPPERY_LEN - 1 :] + + # Provide convenient aliases for SARS-CoV-2 feature names. The alias is the # key, the canonical name (as found in the GenBank file) is the value. # # Alphanumeric feature aliases must have lower case keys. If not they will not # be detected (and the test suite will fail). + SARS_COV_2_ALIASES = { "2": "2'-O-ribose methyltransferase", "3clpro": "3C-like proteinase", diff --git a/gb2seq/translate.py b/gb2seq/translate.py index bd64534..f4e8c87 100644 --- a/gb2seq/translate.py +++ b/gb2seq/translate.py @@ -3,6 +3,7 @@ from typing import Dict, List, Optional from gb2seq import Gb2SeqError +from gb2seq.sars2 import getORF1abSequence from dark.aaVars import CODONS, STOP_CODONS from dark.reads import AARead @@ -12,18 +13,6 @@ class TranslationError(Gb2SeqError): "Error when using custom translation of sequences." -class NoSlipperySequenceError(TranslationError): - "No slippery sequence could be found in a genome." - - -class NoStopCodonError(TranslationError): - "No stop codon was found downstream from the slippery sequence." - - -class StopCodonTooDistantError(TranslationError): - "The stop codon following the slippery sequence was too far away." - - class TranslatedSequenceLengthError(TranslationError): "A sequence to be translated has an incorrect length." @@ -43,15 +32,6 @@ class TranslatedGapLengthError(TranslationError): + [("---", "-")] ) -# The maximum difference (number of nucleotides) to allow between the -# offset of the start of the slippery sequence and the downstream stop -# codon. -_MAX_DISTANCE_TO_STOP = 20 - -SLIPPERY_SEQUENCE = "TTTAAAC" - -_SLIPPERY_LEN = len(SLIPPERY_SEQUENCE) - def translate( seq: str, @@ -70,38 +50,8 @@ def translate( translate into '-' or 'X'. @return: A translated C{str} amino acid sequence. """ - if name == "ORF1ab polyprotein": - # See Fields Virology (figure 10.6a on page 421, 7th edition or - # figure 28.7a on page 836, 6th edition) plus - # https://www.ncbi.nlm.nih.gov/nuccore/NC_045512 for details of - # what happens below. Note that the nucelotide sequence we are - # passed is the one that's made from the alignment with the - # reference ORF1ab nucleotide sequence (in sequence.py) and so is - # just that ORF and does not include the leading ~265 nucleotides - # of the 5' UTR. As a result, the offset used to begin the search - # for the slippery sequence is 13000, which is chosen to be a bit - # before 13462 - 265. There are various occurrences of the - # slippery sequence in the reference genome (and hence probably in - # other CoV genomes), but only one in this region and with a stop - # codon shortly (up to _MAX_DISTANCE_TO_STOP nucleotides) downstream. - offset = seq.find(SLIPPERY_SEQUENCE, 13000) - stop = seq.find("TAA", offset + _SLIPPERY_LEN) - if offset == -1: - raise NoSlipperySequenceError("No slippery sequence found.") - if stop == -1: - raise NoStopCodonError( - f"Could not find a stop codon downstream from the start of " - f"the slippery sequence at site {offset + 1}." - ) - if stop - offset > _MAX_DISTANCE_TO_STOP: - raise StopCodonTooDistantError( - f"The stop codon was too far ({stop - offset} nucleotides) " - f"downstream (max allowed distance is " - f"{_MAX_DISTANCE_TO_STOP}) from the start of the slippery " - f"sequence at site {offset + 1}." - ) - - seq = seq[: offset + _SLIPPERY_LEN] + seq[offset + _SLIPPERY_LEN - 1 :] + if sars2 and name == "ORF1ab polyprotein": + seq = getORF1abSequence(seq) # Pad with 'N' to avoid a 'BiopythonWarning: Partial codon' warning. remainder = len(seq) % 3
diff --git a/test/test_alignment.py b/test/test_alignment.py index c567376..b156121 100644 --- a/test/test_alignment.py +++ b/test/test_alignment.py @@ -18,7 +18,7 @@ ) from gb2seq.change import splitChange from gb2seq.features import Features, AmbiguousFeatureError, MissingFeatureError -from gb2seq.translate import NoSlipperySequenceError +from gb2seq.sars2 import NoSlipperySequenceError from .fasta import getSequence diff --git a/test/test_translate.py b/test/test_translate.py index d928e4d..77de7f0 100644 --- a/test/test_translate.py +++ b/test/test_translate.py @@ -2,12 +2,15 @@ from unittest import TestCase from dark.reads import AARead -from gb2seq.translate import ( - KNOWN_INSERTIONS, +from gb2seq.sars2 import ( + SLIPPERY_SEQUENCE, NoSlipperySequenceError, NoStopCodonError, - SLIPPERY_SEQUENCE, StopCodonTooDistantError, +) + +from gb2seq.translate import ( + KNOWN_INSERTIONS, TranslatedReferenceAndGenomeLengthError, TranslatedSequenceLengthError, getSubstitutionsString, @@ -27,7 +30,12 @@ def testNoSlipperySequencs(self): """ error = r"^No slippery sequence found\.$" self.assertRaisesRegex( - NoSlipperySequenceError, error, translate, "AAATTT", "ORF1ab polyprotein" + NoSlipperySequenceError, + error, + translate, + "AAATTT", + name="ORF1ab polyprotein", + sars2=True, ) def testNoStopCodonFollowingTheSlipperySequence(self): @@ -41,7 +49,12 @@ def testNoStopCodonFollowingTheSlipperySequence(self): ) sequence = "A" * 13000 + SLIPPERY_SEQUENCE self.assertRaisesRegex( - NoStopCodonError, error, translate, sequence, "ORF1ab polyprotein" + NoStopCodonError, + error, + translate, + sequence, + name="ORF1ab polyprotein", + sars2=True, ) def testDistantStopCodonFollowingTheSlipperySequence(self): @@ -56,7 +69,12 @@ def testDistantStopCodonFollowingTheSlipperySequence(self): ) sequence = "A" * 13000 + SLIPPERY_SEQUENCE + "A" * 100 + "TAA" self.assertRaisesRegex( - StopCodonTooDistantError, error, translate, sequence, "ORF1ab polyprotein" + StopCodonTooDistantError, + error, + translate, + sequence, + name="ORF1ab polyprotein", + sars2=True, ) def testEmpty(self):
local variable 'forward' referenced before assignment Using the `NC_001896.1.` GenBank file (fetched with the dark-matter script, but you can just download it yourself it you don't have that) `ncbi-fetch-id.py --format gb NC_001896.1 > NC_001896.1.gb`, I get this: ```sh $ describe-feature.py --reference NC_001896.1.gb Traceback (most recent call last): File "/Users/terry/charite/gb2seq/bin/describe-feature.py", line 251, in <module> main(args) File "/Users/terry/charite/gb2seq/bin/describe-feature.py", line 123, in main features = Features( File "/Users/terry/charite/gb2seq/gb2seq/features.py", line 136, in __init__ self._initializeFromGenBankRecord(record, alsoInclude) File "/Users/terry/charite/gb2seq/gb2seq/features.py", line 287, in _initializeFromGenBankRecord "forward": forward, UnboundLocalError: local variable 'forward' referenced before assignment ```
This occurs because HBV has multiple genome ranges specified for some genes (it is circular, so the gene offsets cross zero).
2024-08-27T03:11:44
-1.0
pythonpune/linkstatus
13
pythonpune__linkstatus-13
['12']
4e5bed12ebf51dc5e375aead266716ae7ff4aa69
diff --git a/linkstatus/linkstatus.py b/linkstatus/linkstatus.py index 40e68ea..2b6f5c2 100644 --- a/linkstatus/linkstatus.py +++ b/linkstatus/linkstatus.py @@ -5,6 +5,7 @@ import click import requests +from linkstatus.parser import link_validator from linkstatus.parser import parse_file @@ -61,7 +62,7 @@ def main(source, recursive, timeout, retry): for f in files: links = parse_file(f) - + links = link_validator(links) if links: click.echo(click.style("Links in File: '{}'".format(f), bg="blue", fg="white")) diff --git a/linkstatus/parser.py b/linkstatus/parser.py index c4534c8..1c40a5e 100644 --- a/linkstatus/parser.py +++ b/linkstatus/parser.py @@ -3,10 +3,9 @@ import markdown - REGULAR_EXP = r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+" -LINKS = namedtuple("LINKS", ["line", "urls", "skip"]) +LINKS = namedtuple("LINKS", ["line", "urls", "skip", "valid"]) def parse_line(line): @@ -42,5 +41,37 @@ def parse_file(file_path): line_links = parse_line(line) if line_links: skip = True if "noqa" in line else False - links.append(LINKS(line=line_number + 1, urls=line_links, skip=skip)) + links.append(LINKS(line=line_number + 1, urls=line_links, skip=skip, valid=False)) return links + + +def link_validator(links_list): + """Validate link + Args: + links_list: List of links. + + Return: + Named tuple of the valid and invalid links. + """ + validated_list = [] + + regex = re.compile( + r"^(?:http|ftp)s?://" # http:// or https:// + r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|" + # for domain + r"localhost|" # localhost... + r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" # ...or ip + r"(?::\d+)?" # optional port + r"(?:/?|[/?]\S+)$", + re.IGNORECASE, + ) + + for link in links_list: + urls = [] + for i in link.urls: + if re.match(regex, i): + urls.append(i) + else: + validated_list.append(LINKS(line=link.line, urls=[i], valid=False, skip=True)) + validated_list.append(LINKS(line=link.line, urls=urls, skip=False, valid=True)) + return validated_list
diff --git a/tests/dir/links_markdown.md b/tests/dir/links_markdown.md index 492cf1b..075d978 100644 --- a/tests/dir/links_markdown.md +++ b/tests/dir/links_markdown.md @@ -32,3 +32,11 @@ Some text to show that the reference links can follow later. [link text itself]: http://www.reddit.com <!--noqa--> [broken link](https://github.com/pythonpune/linkstatus) + +https://github.com//pythonpune/ + +http://<hostname>:<port> + +https://<hostname>:<port>/pages + +file:///etc/hosts
Need validator to check links first then check for the reachout. I found one issue with the script. If you are passing some urls like `https://<hostname>:<port>` then it shows `None` and it shows in the down list. It needs to be fixed. If those urls are not valid then those links should be skipped.
2019-12-07T05:22:45
-1.0
ab5424/agility
31
ab5424__agility-31
['30']
201a875e29d5b2d15cf6e8242c636f3f2988548b
diff --git a/README.md b/README.md index 9092aeb..64f443f 100755 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ [![Documentation Status](https://readthedocs.org/projects/agility1/badge/?version=latest)](https://agility1.readthedocs.io/en/latest/?badge=latest) [![Coverage Status](https://coveralls.io/repos/github/ab5424/agility/badge.svg?branch=main)](https://coveralls.io/github/ab5424/agility?branch=main) +[![code coverage](https://img.shields.io/codecov/c/gh/ab5424/agility)](https://codecov.io/gh/ab5424/agility) [![pre-commit.ci status](https://results.pre-commit.ci/badge/github/ab5424/agility/main.svg)](https://results.pre-commit.ci/latest/github/ab5424/agility/main) [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/ab5424/agility/HEAD)
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3a0a3e9..efae58f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -42,8 +42,9 @@ jobs: sudo apt update && sudo apt install -y libegl1-mesa-dev - name: pytest run: | - pytest tests + pytest --cov=agility --cov-report=xml tests - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v3 + if: matrix.python-version == '3.11' && matrix.os == 'ubuntu-latest' env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
Fix coverage not working
2024-01-26T14:27:21
-1.0
maykinmedia/django-log-outgoing-requests
16
maykinmedia__django-log-outgoing-requests-16
['14']
3998158bd80bfb635e88dde18efbb8fceb5faf81
diff --git a/log_outgoing_requests/admin.py b/log_outgoing_requests/admin.py index a9f3b48..e9921b2 100644 --- a/log_outgoing_requests/admin.py +++ b/log_outgoing_requests/admin.py @@ -1,3 +1,5 @@ +from urllib.parse import urlparse + from django import forms from django.contrib import admin from django.utils.translation import gettext as _ @@ -12,7 +14,7 @@ class OutgoingRequestsLogAdmin(admin.ModelAdmin): list_display = ( "hostname", - "url", + "truncated_url", "params", "status_code", "method", @@ -93,6 +95,20 @@ def request_body(self, obj) -> str: def response_body(self, obj) -> str: return obj.response_body_decoded or "-" + def truncated_url(self, obj): + parsed_url = urlparse(obj.url) + path = parsed_url.path + max_length = 200 + path_length = len(path) + + if path_length <= max_length: + return path + + half_length = (max_length - 3) // 2 + left_half = path[:half_length] + right_half = path[-half_length:] + return left_half + " \u2026 " + right_half + class ConfigAdminForm(forms.ModelForm): class Meta:
diff --git a/tests/test_admin.py b/tests/test_admin.py index 283b4d9..82ee05c 100644 --- a/tests/test_admin.py +++ b/tests/test_admin.py @@ -94,3 +94,40 @@ def test_admin_override_max_content_length(requests_mock, request_mock_kwargs): request_log = OutgoingRequestsLog.objects.last() assert request_log.res_body == b"" + + [email protected]_db +def test_list_url_is_trucated_over_200_chars(admin_client): + OutgoingRequestsLog.objects.create( + id=1, + url="https://example.com/a1b2c3d4e5f6g7h8i9j0k1l2m3n4o5p6q7r8s9t1u2v3w4x5y6z1a2b3c4d5e6f7g8h9i0j1k2l3m4n5o6p7q8r9s1t2u3v4w5x6y7z/some-path-3894ndjidjd93djjd3eu9jjddu9eu93j3e39ei9idjd3ddksdj9393/some-path/some-path-as-well/skdlkdlskdksdkd9828393jdd", + timestamp=timezone.now(), + ) + url = reverse("admin:log_outgoing_requests_outgoingrequestslog_changelist") + + response = admin_client.get(url) + html = response.content.decode("utf-8") + doc = PyQuery(html) + truncated_url = doc.find(".field-truncated_url").text() + + assert ( + truncated_url + == "/a1b2c3d4e5f6g7h8i9j0k1l2m3n4o5p6q7r8s9t1u2v3w4x5y6z1a2b3c4d5e6f7g8h9i0j1k2l3m4n5o6p7q8r9s1t2u3v4w \u2026 d93djjd3eu9jjddu9eu93j3e39ei9idjd3ddksdj9393/some-path/some-path-as-well/skdlkdlskdksdkd9828393jdd" + ) + + [email protected]_db +def test_list_url_is_not_trucated_under_200_chars(admin_client): + OutgoingRequestsLog.objects.create( + id=1, + url="https://example.com/a1b2c3d4e/some-path", + timestamp=timezone.now(), + ) + url = reverse("admin:log_outgoing_requests_outgoingrequestslog_changelist") + + response = admin_client.get(url) + html = response.content.decode("utf-8") + doc = PyQuery(html) + truncated_url = doc.find(".field-truncated_url").text() + + assert truncated_url == "/a1b2c3d4e/some-path"
Cut off the URL admin list field after 200 chars. To prevent the other columns moving out of view: ![image](https://github.com/maykinmedia/django-log-outgoing-requests/assets/96970/70909c82-80ae-4030-b123-4fbef328f10d) Ideally, you can leave out the domain (since its already in the first column) and the URL path would be nicest if it looks like: `/some/very/very/very ... /very/long/path` (so elipses in the middle)
2023-07-28T07:46:09
-1.0
laughingman7743/BigQuery-DatasetManager
3
laughingman7743__BigQuery-DatasetManager-3
['2']
555792046ce2e7664229a54cce1f3a6bb516980a
diff --git a/README.rst b/README.rst index 3559c85..a4799df 100644 --- a/README.rst +++ b/README.rst @@ -44,7 +44,7 @@ The resource representation of the dataset is described in `YAML format`_. description: null default_table_expiration_ms: null location: US - access_grants: + access_entries: - role: OWNER entity_type: specialGroup entity_id: projectOwners @@ -62,48 +62,48 @@ The resource representation of the dataset is described in `YAML format`_. See `the official documentation of BigQuery Datasets`_ for details of key names. -+---------------+-------------+-----------+---------+----------------------------------------------------------+ -| Key name | Value | Description | -+===============+=============+===========+=========+==========================================================+ -| name | str | The name of the dataset. | -+---------------+-------------+-----------+---------+----------------------------------------------------------+ -| friendly_name | str | Title of the dataset. | -+---------------+-------------+-----------+---------+----------------------------------------------------------+ -| description | str | Description of the dataset. | -+---------------+-------------+-----------+---------+----------------------------------------------------------+ -| default_table_expiration_ms | int | Default expiration time for tables in the dataset. | -+---------------+-------------+-----------+---------+----------------------------------------------------------+ -| location | str | Location in which the dataset is hosted. | -+---------------+-------------+-----------+---------+----------------------------------------------------------+ -| access_grants | seq | Roles granted to entities for this dataset. | -+---------------+-------------+-----------+---------+----------------------------------------------------------+ -| access_grants | role | str | Role granted to the entity. One of | -| | | | | -| | | | * ``OWNER`` | -| | | | * ``WRITER`` | -| | | | * ``READER`` | -| | | | | -| | | | May also be ``None`` if the ``entity_type`` is ``view``. | -+ +-------------+-----------+---------+----------------------------------------------------------+ -| | entity_type | str | Type of entity being granted the role. One of | -| | | | | -| | | | * ``userByEmail`` | -| | | | * ``groupByEmail`` | -| | | | * ``domain`` | -| | | | * ``specialGroup`` | -| | | | * ``view`` | -+ +-------------+-----------+---------+----------------------------------------------------------+ -| | entity_id | | str/map | ID of entity being granted the role. | -+ + +-----------+---------+----------------------------------------------------------+ -| | | datasetId | str | The ID of the dataset containing this table. | -| | | | | (Specified when ``entity_type`` is ``view``.) | -+ + +-----------+---------+----------------------------------------------------------+ -| | | projectId | str | The ID of the project containing this table. | -| | | | | (Specified when ``entity_type`` is ``view``.) | -+ + +-----------+---------+----------------------------------------------------------+ -| | | tableId | str | The ID of the table. | -| | | | | (Specified when ``entity_type`` is ``view``.) | -+---------------+-------------+-----------+---------+----------------------------------------------------------+ ++----------------+-------------+-----------+---------+----------------------------------------------------------+ +| Key name | Value | Description | ++================+=============+===========+=========+==========================================================+ +| name | str | The name of the dataset. | ++----------------+-------------+-----------+---------+----------------------------------------------------------+ +| friendly_name | str | Title of the dataset. | ++----------------+-------------+-----------+---------+----------------------------------------------------------+ +| description | str | Description of the dataset. | ++----------------+-------------+-----------+---------+----------------------------------------------------------+ +| default_table_expiration_ms | int | Default expiration time for tables in the dataset. | ++----------------+-------------+-----------+---------+----------------------------------------------------------+ +| location | str | Location in which the dataset is hosted. | ++----------------+-------------+-----------+---------+----------------------------------------------------------+ +| access_entries | seq | Represent grant of an access role to an entity. | ++----------------+-------------+-----------+---------+----------------------------------------------------------+ +| access_entries | role | str | Role granted to the entity. One of | +| | | | | +| | | | * ``OWNER`` | +| | | | * ``WRITER`` | +| | | | * ``READER`` | +| | | | | +| | | | May also be ``None`` if the ``entity_type`` is ``view``. | ++ +-------------+-----------+---------+----------------------------------------------------------+ +| | entity_type | str | Type of entity being granted the role. One of | +| | | | | +| | | | * ``userByEmail`` | +| | | | * ``groupByEmail`` | +| | | | * ``domain`` | +| | | | * ``specialGroup`` | +| | | | * ``view`` | ++ +-------------+-----------+---------+----------------------------------------------------------+ +| | entity_id | | str/map | ID of entity being granted the role. | ++ + +-----------+---------+----------------------------------------------------------+ +| | | datasetId | str | The ID of the dataset containing this table. | +| | | | | (Specified when ``entity_type`` is ``view``.) | ++ + +-----------+---------+----------------------------------------------------------+ +| | | projectId | str | The ID of the project containing this table. | +| | | | | (Specified when ``entity_type`` is ``view``.) | ++ + +-----------+---------+----------------------------------------------------------+ +| | | tableId | str | The ID of the table. | +| | | | | (Specified when ``entity_type`` is ``view``.) | ++----------------+-------------+-----------+---------+----------------------------------------------------------+ .. _`the official documentation of BigQuery Datasets`: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets diff --git a/bqdm/cli.py b/bqdm/cli.py index 9f2a945..5405243 100755 --- a/bqdm/cli.py +++ b/bqdm/cli.py @@ -12,7 +12,7 @@ import bqdm.message as msg from bqdm import CONTEXT_SETTINGS from bqdm.action import DatasetAction -from bqdm.model import BigQueryDataset, BigQueryAccessGrant +from bqdm.model import BigQueryDataset, BigQueryAccessEntry from bqdm.util import list_local_datasets, ordered_dict_constructor, str_representer @@ -24,7 +24,7 @@ yaml.add_representer(str, str_representer) yaml.add_representer(unicode, str_representer) yaml.add_representer(BigQueryDataset, BigQueryDataset.represent) -yaml.add_representer(BigQueryAccessGrant, BigQueryAccessGrant.represent) +yaml.add_representer(BigQueryAccessEntry, BigQueryAccessEntry.represent) yaml.add_constructor('tag:yaml.org,2002:map', ordered_dict_constructor) diff --git a/bqdm/model.py b/bqdm/model.py index c835def..c93266d 100644 --- a/bqdm/model.py +++ b/bqdm/model.py @@ -3,51 +3,51 @@ from collections import OrderedDict from future.utils import iteritems -from google.cloud.bigquery.dataset import Dataset, AccessGrant +from google.cloud.bigquery.dataset import Dataset, AccessEntry class BigQueryDataset(object): def __init__(self, name, friendly_name, description, - default_table_expiration_ms, location, access_grants): + default_table_expiration_ms, location, access_entries): self.name = name self.friendly_name = friendly_name self.description = description self.default_table_expiration_ms = default_table_expiration_ms self.location = location - self.access_grants = access_grants + self.access_entries = access_entries @staticmethod def from_dict(value): - access_grants = value.get('access_grants', None) - if access_grants: - access_grants = [BigQueryAccessGrant.from_dict(a) for a in access_grants] + access_entries = value.get('access_entries', None) + if access_entries: + access_entries = [BigQueryAccessEntry.from_dict(a) for a in access_entries] return BigQueryDataset(value.get('name', None), value.get('friendly_name', None), value.get('description', None), value.get('default_table_expiration_ms', None), value.get('location', None), - access_grants) + access_entries) @staticmethod def from_dataset(value): value.reload() - access_grants = value.access_grants - if access_grants: - access_grants = [BigQueryAccessGrant.from_access_grant(a) for a in access_grants] + access_entries = value.access_entries + if access_entries: + access_entries = [BigQueryAccessEntry.from_access_entry(a) for a in access_entries] return BigQueryDataset(value.name, value.friendly_name, value.description, value.default_table_expiration_ms, value.location, - access_grants) + access_entries) @staticmethod def to_dataset(client, value): - access_grants = value.access_grants - if access_grants: - access_grants = tuple([BigQueryAccessGrant.to_access_grant(a) for a in access_grants]) - dataset = Dataset(value.name, client, access_grants) + access_entries = value.access_entries + if access_entries: + access_entries = tuple([BigQueryAccessEntry.to_access_entry(a) for a in access_entries]) + dataset = Dataset(value.name, client, access_entries) dataset.friendly_name = value.friendly_name dataset.description = value.description dataset.default_table_expiration_ms = value.default_table_expiration_ms @@ -64,7 +64,7 @@ def represent(dumper, value): ('description', value.description), ('default_table_expiration_ms', value.default_table_expiration_ms), ('location', value.location), - ('access_grants', value.access_grants), + ('access_entries', value.access_entries), ) ) @@ -74,8 +74,8 @@ def _key(self): self.description, self.default_table_expiration_ms, self.location, - frozenset(self.access_grants) if self.access_grants is not None - else self.access_grants,) + frozenset(self.access_entries) if self.access_entries is not None + else self.access_entries,) def __eq__(self, other): if not isinstance(other, BigQueryDataset): @@ -92,7 +92,7 @@ def __repr__(self): return 'BigQueryDataset{0}'.format(self._key()) -class BigQueryAccessGrant(object): +class BigQueryAccessEntry(object): def __init__(self, role, entity_type, entity_id): self.role = role @@ -101,21 +101,21 @@ def __init__(self, role, entity_type, entity_id): @staticmethod def from_dict(value): - return BigQueryAccessGrant( + return BigQueryAccessEntry( value.get('role', None), value.get('entity_type', None), value.get('entity_id', None),) @staticmethod - def from_access_grant(value): - return BigQueryAccessGrant( + def from_access_entry(value): + return BigQueryAccessEntry( value.role, value.entity_type, value.entity_id,) @staticmethod - def to_access_grant(value): - return AccessGrant(value.role, value.entity_type, value.entity_id) + def to_access_entry(value): + return AccessEntry(value.role, value.entity_type, value.entity_id) @staticmethod def represent(dumper, value): @@ -135,7 +135,7 @@ def _key(self): if isinstance(self.entity_id, (dict, OrderedDict,)) else self.entity_id,) def __eq__(self, other): - if not isinstance(other, BigQueryAccessGrant): + if not isinstance(other, BigQueryAccessEntry): return NotImplemented return self._key() == other._key() @@ -146,4 +146,4 @@ def __hash__(self): return hash(self._key()) def __repr__(self): - return 'BigQueryAccessGrant{0}'.format(self._key()) + return 'BigQueryAccessEntry{0}'.format(self._key()) diff --git a/setup.py b/setup.py index 6808225..742814f 100755 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ 'future', 'click>=6.0', 'PyYAML>=3.12', - 'google-cloud-bigquery>=0.27.0', + 'google-cloud-bigquery==0.28.0', ], tests_require=[ 'pytest',
diff --git a/tests/__init__.py b/tests/__init__.py index 688a9d5..95bd52a 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -2,10 +2,10 @@ import yaml from bqdm.util import ordered_dict_constructor, str_representer -from bqdm.model import BigQueryDataset, BigQueryAccessGrant +from bqdm.model import BigQueryDataset, BigQueryAccessEntry yaml.add_representer(str, str_representer) yaml.add_representer(BigQueryDataset, BigQueryDataset.represent) -yaml.add_representer(BigQueryAccessGrant, BigQueryAccessGrant.represent) +yaml.add_representer(BigQueryAccessEntry, BigQueryAccessEntry.represent) yaml.add_constructor('tag:yaml.org,2002:map', ordered_dict_constructor) diff --git a/tests/test_action.py b/tests/test_action.py index ad52c1b..16b2cd9 100644 --- a/tests/test_action.py +++ b/tests/test_action.py @@ -2,7 +2,7 @@ import unittest from bqdm.action import DatasetAction -from bqdm.model import BigQueryDataset, BigQueryAccessGrant +from bqdm.model import BigQueryDataset, BigQueryAccessEntry class TestAction(unittest.TestCase): @@ -75,7 +75,7 @@ def test_get_change_datasets(self): 24 * 30 * 60 * 1000, 'US', [ - BigQueryAccessGrant( + BigQueryAccessEntry( None, 'view', { @@ -149,7 +149,7 @@ def test_get_destroy_datasets(self): 24 * 30 * 60 * 1000, 'US', [ - BigQueryAccessGrant( + BigQueryAccessEntry( None, 'view', { @@ -223,7 +223,7 @@ def test_get_intersection_datasets(self): 24 * 30 * 60 * 1000, 'US', [ - BigQueryAccessGrant( + BigQueryAccessEntry( None, 'view', { diff --git a/tests/test_model.py b/tests/test_model.py index 653d826..032c42b 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- import unittest -from bqdm.model import BigQueryDataset, BigQueryAccessGrant +from bqdm.model import BigQueryDataset, BigQueryAccessEntry class TestModel(unittest.TestCase): @@ -44,18 +44,18 @@ def test_eq_dataset(self): self.assertNotEqual(dataset1, dataset4) self.assertNotEqual(dataset3, dataset4) - def test_eq_dataset_with_access_grant(self): - access_grant1 = BigQueryAccessGrant( + def test_eq_dataset_with_access_entry(self): + access_entry1 = BigQueryAccessEntry( 'OWNER', 'specialGroup', 'projectOwners' ) - access_grant2 = BigQueryAccessGrant( + access_entry2 = BigQueryAccessEntry( 'OWNER', 'specialGroup', 'projectOwners' ) - access_grant3 = BigQueryAccessGrant( + access_entry3 = BigQueryAccessEntry( None, 'view', { @@ -65,69 +65,69 @@ def test_eq_dataset_with_access_grant(self): } ) - dataset_with_access_grant1 = BigQueryDataset( + dataset_with_access_entry1 = BigQueryDataset( 'test', 'test_friendly_name', 'test_description', 24 * 30 * 60 * 1000, 'US', - [access_grant1] + [access_entry1] ) - dataset_with_access_grant2 = BigQueryDataset( + dataset_with_access_entry2 = BigQueryDataset( 'test', 'test_friendly_name', 'test_description', 24 * 30 * 60 * 1000, 'US', - [access_grant2] + [access_entry2] ) - dataset_with_access_grant3 = BigQueryDataset( + dataset_with_access_entry3 = BigQueryDataset( 'test', 'test_friendly_name', 'test_description', 24 * 30 * 60 * 1000, 'US', - [access_grant3] + [access_entry3] ) - dataset_with_access_grant4 = BigQueryDataset( + dataset_with_access_entry4 = BigQueryDataset( 'foo', 'bar', 'test_description', 24 * 30 * 60 * 1000, 'US', - [access_grant1] + [access_entry1] ) - dataset_with_access_grant5 = BigQueryDataset( + dataset_with_access_entry5 = BigQueryDataset( 'foo', 'bar', 'test_description', 24 * 30 * 60 * 1000, 'US', - [access_grant1, access_grant3] + [access_entry1, access_entry3] ) - dataset_with_access_grant6 = BigQueryDataset( + dataset_with_access_entry6 = BigQueryDataset( 'foo', 'bar', 'test_description', 24 * 30 * 60 * 1000, 'US', - [access_grant3, access_grant1] + [access_entry3, access_entry1] ) - dataset_with_access_grant7 = BigQueryDataset( + dataset_with_access_entry7 = BigQueryDataset( 'foo', 'bar', 'test_description', 24 * 30 * 60 * 1000, 'US', - [access_grant1, access_grant2] + [access_entry1, access_entry2] ) - self.assertEqual(dataset_with_access_grant1, dataset_with_access_grant2) - self.assertNotEqual(dataset_with_access_grant1, dataset_with_access_grant3) - self.assertNotEqual(dataset_with_access_grant1, dataset_with_access_grant4) - self.assertNotEqual(dataset_with_access_grant3, dataset_with_access_grant4) - self.assertEqual(dataset_with_access_grant5, dataset_with_access_grant6) - self.assertEqual(dataset_with_access_grant4, dataset_with_access_grant7) - self.assertNotEqual(dataset_with_access_grant6, dataset_with_access_grant7) + self.assertEqual(dataset_with_access_entry1, dataset_with_access_entry2) + self.assertNotEqual(dataset_with_access_entry1, dataset_with_access_entry3) + self.assertNotEqual(dataset_with_access_entry1, dataset_with_access_entry4) + self.assertNotEqual(dataset_with_access_entry3, dataset_with_access_entry4) + self.assertEqual(dataset_with_access_entry5, dataset_with_access_entry6) + self.assertEqual(dataset_with_access_entry4, dataset_with_access_entry7) + self.assertNotEqual(dataset_with_access_entry6, dataset_with_access_entry7) def test_dataset_from_dict(self): dataset = BigQueryDataset( @@ -144,7 +144,7 @@ def test_dataset_from_dict(self): 'description': 'test_description', 'default_table_expiration_ms': 24 * 30 * 60 * 1000, 'location': 'US', - 'access_grants': None + 'access_entris': None }) dataset_from_dict2 = BigQueryDataset.from_dict({ 'name': 'test', @@ -152,7 +152,7 @@ def test_dataset_from_dict(self): 'description': 'test_description', 'default_table_expiration_ms': None, 'location': 'US', - 'access_grants': None + 'access_entries': None }) dataset_from_dict3 = BigQueryDataset.from_dict({ 'name': 'foo', @@ -160,19 +160,19 @@ def test_dataset_from_dict(self): 'description': 'test_description', 'default_table_expiration_ms': None, 'location': 'US', - 'access_grants': None + 'access_entries': None }) self.assertEqual(dataset, dataset_from_dict1) self.assertNotEqual(dataset, dataset_from_dict2) self.assertNotEqual(dataset, dataset_from_dict3) - def test_dataset_from_dict_with_access_grant(self): - access_grant1 = BigQueryAccessGrant( + def test_dataset_from_dict_with_access_entry(self): + access_entry1 = BigQueryAccessEntry( 'OWNER', 'specialGroup', 'projectOwners' ) - access_grant2 = BigQueryAccessGrant( + access_entry2 = BigQueryAccessEntry( None, 'view', { @@ -182,45 +182,45 @@ def test_dataset_from_dict_with_access_grant(self): } ) - dataset_with_access_grant1 = BigQueryDataset( + dataset_with_access_entry1 = BigQueryDataset( 'test', 'test_friendly_name', 'test_description', 24 * 30 * 60 * 1000, 'US', - [access_grant1] + [access_entry1] ) - dataset_with_access_grant2 = BigQueryDataset( + dataset_with_access_entry2 = BigQueryDataset( 'test', 'test_friendly_name', 'test_description', 24 * 30 * 60 * 1000, 'US', - [access_grant2] + [access_entry2] ) - dataset_with_access_grant3 = BigQueryDataset( + dataset_with_access_entry3 = BigQueryDataset( 'test', 'test_friendly_name', 'test_description', 24 * 30 * 60 * 1000, 'US', - [access_grant1, access_grant2] + [access_entry1, access_entry2] ) - dataset_with_access_grant4 = BigQueryDataset( + dataset_with_access_entry4 = BigQueryDataset( 'test', 'test_friendly_name', 'test_description', 24 * 30 * 60 * 1000, 'US', - [access_grant1, access_grant1] + [access_entry1, access_entry1] ) - dataset_with_access_grant_from_dict1 = BigQueryDataset.from_dict({ + dataset_with_access_entry_from_dict1 = BigQueryDataset.from_dict({ 'name': 'test', 'friendly_name': 'test_friendly_name', 'description': 'test_description', 'default_table_expiration_ms': 24 * 30 * 60 * 1000, 'location': 'US', - 'access_grants': [ + 'access_entries': [ { 'role': 'OWNER', 'entity_type': 'specialGroup', @@ -228,13 +228,13 @@ def test_dataset_from_dict_with_access_grant(self): } ] }) - dataset_with_access_grant_from_dict2 = BigQueryDataset.from_dict({ + dataset_with_access_entry_from_dict2 = BigQueryDataset.from_dict({ 'name': 'test', 'friendly_name': 'test_friendly_name', 'description': 'test_description', 'default_table_expiration_ms': 24 * 30 * 60 * 1000, 'location': 'US', - 'access_grants': [ + 'access_entries': [ { 'role': None, 'entity_type': 'view', @@ -246,13 +246,13 @@ def test_dataset_from_dict_with_access_grant(self): } ] }) - dataset_with_access_grant_from_dict3 = BigQueryDataset.from_dict({ + dataset_with_access_entry_from_dict3 = BigQueryDataset.from_dict({ 'name': 'test', 'friendly_name': 'test_friendly_name', 'description': 'test_description', 'default_table_expiration_ms': 24 * 30 * 60 * 1000, 'location': 'US', - 'access_grants': [ + 'access_entries': [ { 'role': 'OWNER', 'entity_type': 'specialGroup', @@ -269,13 +269,13 @@ def test_dataset_from_dict_with_access_grant(self): } ] }) - dataset_with_access_grant_from_dict4 = BigQueryDataset.from_dict({ + dataset_with_access_entry_from_dict4 = BigQueryDataset.from_dict({ 'name': 'test', 'friendly_name': 'test_friendly_name', 'description': 'test_description', 'default_table_expiration_ms': 24 * 30 * 60 * 1000, 'location': 'US', - 'access_grants': [ + 'access_entries': [ { 'role': 'OWNER', 'entity_type': 'specialGroup', @@ -288,15 +288,15 @@ def test_dataset_from_dict_with_access_grant(self): } ] }) - self.assertEqual(dataset_with_access_grant1, dataset_with_access_grant_from_dict1) - self.assertNotEqual(dataset_with_access_grant1, dataset_with_access_grant_from_dict2) - self.assertNotEqual(dataset_with_access_grant1, dataset_with_access_grant_from_dict3) - self.assertEqual(dataset_with_access_grant2, dataset_with_access_grant_from_dict2) - self.assertNotEqual(dataset_with_access_grant2, dataset_with_access_grant_from_dict1) - self.assertNotEqual(dataset_with_access_grant2, dataset_with_access_grant_from_dict3) - self.assertEqual(dataset_with_access_grant3, dataset_with_access_grant_from_dict3) - self.assertNotEqual(dataset_with_access_grant3, dataset_with_access_grant_from_dict1) - self.assertNotEqual(dataset_with_access_grant3, dataset_with_access_grant_from_dict2) - self.assertEqual(dataset_with_access_grant4, dataset_with_access_grant_from_dict4) - self.assertEqual(dataset_with_access_grant1, dataset_with_access_grant_from_dict4) - self.assertEqual(dataset_with_access_grant4, dataset_with_access_grant_from_dict1) + self.assertEqual(dataset_with_access_entry1, dataset_with_access_entry_from_dict1) + self.assertNotEqual(dataset_with_access_entry1, dataset_with_access_entry_from_dict2) + self.assertNotEqual(dataset_with_access_entry1, dataset_with_access_entry_from_dict3) + self.assertEqual(dataset_with_access_entry2, dataset_with_access_entry_from_dict2) + self.assertNotEqual(dataset_with_access_entry2, dataset_with_access_entry_from_dict1) + self.assertNotEqual(dataset_with_access_entry2, dataset_with_access_entry_from_dict3) + self.assertEqual(dataset_with_access_entry3, dataset_with_access_entry_from_dict3) + self.assertNotEqual(dataset_with_access_entry3, dataset_with_access_entry_from_dict1) + self.assertNotEqual(dataset_with_access_entry3, dataset_with_access_entry_from_dict2) + self.assertEqual(dataset_with_access_entry4, dataset_with_access_entry_from_dict4) + self.assertEqual(dataset_with_access_entry1, dataset_with_access_entry_from_dict4) + self.assertEqual(dataset_with_access_entry4, dataset_with_access_entry_from_dict1) diff --git a/tests/test_util.py b/tests/test_util.py index 618d9b0..c609b81 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- import unittest -from bqdm.model import BigQueryDataset, BigQueryAccessGrant +from bqdm.model import BigQueryDataset, BigQueryAccessEntry from bqdm.util import dump_dataset @@ -22,11 +22,11 @@ def test_dump_dataset(self): description: test_description default_table_expiration_ms: 43200000 location: US -access_grants: null +access_entries: null """ self.assertEqual(actual_dump_data1, expected_dump_data1) - access_grant2 = BigQueryAccessGrant( + access_entry2 = BigQueryAccessEntry( 'OWNER', 'specialGroup', 'projectOwners' @@ -37,7 +37,7 @@ def test_dump_dataset(self): 'test_description', 24 * 30 * 60 * 1000, 'US', - [access_grant2] + [access_entry2] ) actual_dump_data2 = dump_dataset(dataset2) expected_dump_data2 = """name: test2 @@ -45,14 +45,14 @@ def test_dump_dataset(self): description: test_description default_table_expiration_ms: 43200000 location: US -access_grants: +access_entries: - role: OWNER entity_type: specialGroup entity_id: projectOwners """ self.assertEqual(actual_dump_data2, expected_dump_data2) - access_grant3 = BigQueryAccessGrant( + access_entry3 = BigQueryAccessEntry( None, 'view', { @@ -67,7 +67,7 @@ def test_dump_dataset(self): 'test_description', 24 * 30 * 60 * 1000, 'US', - [access_grant3] + [access_entry3] ) actual_dump_data3 = dump_dataset(dataset3) expected_dump_data3 = """name: test3 @@ -75,7 +75,7 @@ def test_dump_dataset(self): description: test_description default_table_expiration_ms: 43200000 location: US -access_grants: +access_entries: - role: null entity_type: view entity_id: @@ -91,7 +91,7 @@ def test_dump_dataset(self): 'test_description', 24 * 30 * 60 * 1000, 'US', - [access_grant2, access_grant3] + [access_entry2, access_entry3] ) actual_dump_data4 = dump_dataset(dataset4) expected_dump_data4 = """name: test4 @@ -99,7 +99,7 @@ def test_dump_dataset(self): description: test_description default_table_expiration_ms: 43200000 location: US -access_grants: +access_entries: - role: OWNER entity_type: specialGroup entity_id: projectOwners
Migrating to Python Client Library v0.28 https://github.com/GoogleCloudPlatform/google-cloud-python/releases/tag/bigquery-0.28.0
2017-11-03T05:20:26
-1.0
CraveFood/django-duprequests
2
CraveFood__django-duprequests-2
['1']
5ffa4d96104bc75816dc0bcbb1dbf20430701974
diff --git a/LICENSE b/LICENSE index 45fb8ff..8e54ac2 100644 --- a/LICENSE +++ b/LICENSE @@ -28,4 +28,3 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/duprequests/middleware.py b/duprequests/middleware.py index 674633e..07c5bf7 100644 --- a/duprequests/middleware.py +++ b/duprequests/middleware.py @@ -1,41 +1,66 @@ - -from uuid import uuid4 +from uuid import uuid4, uuid5, NAMESPACE_DNS +from urllib.parse import urlencode from django.conf import settings from django.core.cache import caches from django.http.response import HttpResponseNotModified + try: from django.utils.deprecation import MiddlewareMixin -except ImportError: # pragma: nocover +except ImportError: # pragma: nocover MiddlewareMixin = object -CACHE_NAME = getattr(settings, 'DUPLICATED_REQUESTS_CACHE_NAME', 'default') -CACHE_TIMEOUT = getattr(settings, 'DUPLICATED_REQUESTS_CACHE_TIMEOUT', 5) -COOKIE_NAME = getattr(settings, 'DUPLICATED_REQUESTS_COOKIE_NAME', - 'dj-request-id') -COOKIE_PREFIX = getattr(settings, 'DUPLICATED_REQUESTS_COOKIE_PREFIX', - 'request-id-') +CACHE_NAME = getattr(settings, "DUPLICATED_REQUESTS_CACHE_NAME", "default") +CACHE_TIMEOUT = getattr(settings, "DUPLICATED_REQUESTS_CACHE_TIMEOUT", 5) +COOKIE_NAME = getattr(settings, "DUPLICATED_REQUESTS_COOKIE_NAME", "dj-request-id") +COOKIE_PREFIX = getattr(settings, "DUPLICATED_REQUESTS_COOKIE_PREFIX", "request-id-") class DropDuplicatedRequests(MiddlewareMixin): - """Middleware that drops requests made in quick succession. + """ + Middleware that drops requests made in quick succession. + Uses Django's caching system to check/save each request. + """ - Uses Django's caching system to check/save each request.""" + def _get_request_hash(self, request): + """ + Generates a unique key based on request path, method, body and arguments + """ + hash_value = uuid5( + NAMESPACE_DNS, + request.path_info + + "--" + + request.method.lower() + + "--" + + urlencode(request.GET) + + "--" + + request.body.decode("utf-8"), + ).node + return str(hash_value) def process_request(self, request): - if not request.method.lower() in ('post', 'put', 'delete', 'patch'): + """ + Stores a unique key per request in the cache, if it already exists, returns 304 + """ + if not request.method.lower() in ("post", "put", "delete", "patch"): return - cache_key = request.COOKIES.get(COOKIE_NAME) - if not cache_key: + cookie_value = request.COOKIES.get(COOKIE_NAME) + if not cookie_value: return + cache_key = cookie_value + self._get_request_hash(request) + cache = caches[CACHE_NAME] if cache_key in cache: return HttpResponseNotModified() cache.set(cache_key, True, CACHE_TIMEOUT) def process_response(self, request, response): + """ + Sends a cookie with a unique hash to identify requests that are the same + but from different sources + """ response.set_cookie(COOKIE_NAME, COOKIE_PREFIX + uuid4().hex) return response
diff --git a/duprequests/tests.py b/duprequests/tests.py index ecdfe73..62af352 100644 --- a/duprequests/tests.py +++ b/duprequests/tests.py @@ -1,5 +1,6 @@ - from unittest import TestCase +from uuid import uuid5, NAMESPACE_DNS +from urllib.parse import urlencode from django.conf import settings from django.core.cache import caches @@ -10,9 +11,8 @@ from .middleware import DropDuplicatedRequests -CACHE_NAME = getattr(settings, 'DUPLICATED_REQUESTS_CACHE_NAME', 'default') -COOKIE_NAME = getattr(settings, 'DUPLICATED_REQUESTS_COOKIE_NAME', - 'dj-request-id') +CACHE_NAME = getattr(settings, "DUPLICATED_REQUESTS_CACHE_NAME", "default") +COOKIE_NAME = getattr(settings, "DUPLICATED_REQUESTS_COOKIE_NAME", "dj-request-id") class TestDropDuplicatedRequests(TestCase): @@ -24,7 +24,7 @@ def tearDown(self): cache = caches[CACHE_NAME] cache.clear() - def _call_view_using_middleware(self, method, set_cookie=True): + def _call_view_using_middleware(self, method, set_cookie=True, path="/", body={}): class TestView(View): def get(self, request): return HttpResponse() @@ -32,9 +32,9 @@ def get(self, request): put = post = patch = delete = get # Get a new request and process it using middleware - request = getattr(self.factory, method)('/') + request = getattr(self.factory, method)(path, body) if set_cookie: - request.COOKIES[COOKIE_NAME] = 'not-so-unique-id' + request.COOKIES[COOKIE_NAME] = "not-so-unique-id" response = self.middleware.process_request(request) if response is None: @@ -42,39 +42,81 @@ def get(self, request): return self.middleware.process_response(request, response) def test_double_get(self): - response_1 = self._call_view_using_middleware('get') + response_1 = self._call_view_using_middleware("get") self.assertEqual(response_1.status_code, 200) - response_2 = self._call_view_using_middleware('get') + response_2 = self._call_view_using_middleware("get") self.assertEqual(response_2.status_code, 200) def test_double_post(self): - response_1 = self._call_view_using_middleware('post') + response_1 = self._call_view_using_middleware("post", body={"a": "a"}) self.assertEqual(response_1.status_code, 200) - response_2 = self._call_view_using_middleware('post') + response_2 = self._call_view_using_middleware("post", body={"a": "a"}) self.assertEqual(response_2.status_code, 304) def test_double_post_without_cookie(self): - response_1 = self._call_view_using_middleware('post', False) + response_1 = self._call_view_using_middleware("post", False) self.assertEqual(response_1.status_code, 200) - response_2 = self._call_view_using_middleware('post', False) + response_2 = self._call_view_using_middleware("post", False) self.assertEqual(response_2.status_code, 200) def test_double_put(self): - response_1 = self._call_view_using_middleware('put') + response_1 = self._call_view_using_middleware("put") self.assertEqual(response_1.status_code, 200) - response_2 = self._call_view_using_middleware('put') + response_2 = self._call_view_using_middleware("put") self.assertEqual(response_2.status_code, 304) def test_double_patch(self): - response_1 = self._call_view_using_middleware('patch') + response_1 = self._call_view_using_middleware("patch") self.assertEqual(response_1.status_code, 200) - response_2 = self._call_view_using_middleware('patch') + response_2 = self._call_view_using_middleware("patch") self.assertEqual(response_2.status_code, 304) def test_double_delete(self): - response_1 = self._call_view_using_middleware('delete') + response_1 = self._call_view_using_middleware("delete") + self.assertEqual(response_1.status_code, 200) + response_2 = self._call_view_using_middleware("delete") + self.assertEqual(response_2.status_code, 304) + + def test_double_requests_different_method(self): + response_1 = self._call_view_using_middleware("patch") + self.assertEqual(response_1.status_code, 200) + response_2 = self._call_view_using_middleware("put") + self.assertEqual(response_2.status_code, 200) + + def test_double_requests_different_path(self): + response_1 = self._call_view_using_middleware("put", path="/123") + self.assertEqual(response_1.status_code, 200) + response_2 = self._call_view_using_middleware("put", path="/456") + self.assertEqual(response_2.status_code, 200) + + def test_double_requests_same_path(self): + response_1 = self._call_view_using_middleware("put", path="/123") + self.assertEqual(response_1.status_code, 200) + response_2 = self._call_view_using_middleware("put", path="/123") + self.assertEqual(response_2.status_code, 304) + + def test_double_requests_different_get_params(self): + response_1 = self._call_view_using_middleware("put", path="/?a=123") + self.assertEqual(response_1.status_code, 200) + response_2 = self._call_view_using_middleware("put", path="/?a=456") + self.assertEqual(response_2.status_code, 200) + + def test_double_requests_same_get_params(self): + response_1 = self._call_view_using_middleware("put", path="/?a=123") + self.assertEqual(response_1.status_code, 200) + response_2 = self._call_view_using_middleware("put", path="/?a=123") + self.assertEqual(response_2.status_code, 304) + + def test_double_requests_different_body(self): + response_1 = self._call_view_using_middleware("put", body={"a": "b"}) + self.assertEqual(response_1.status_code, 200) + response_2 = self._call_view_using_middleware("put", body={"a": "c"}) + self.assertEqual(response_2.status_code, 200) + + def test_double_requests_same_body(self): + response_1 = self._call_view_using_middleware("put", body={"a": "b"}) self.assertEqual(response_1.status_code, 200) - response_2 = self._call_view_using_middleware('delete') + response_2 = self._call_view_using_middleware("put", body={"a": "b"}) self.assertEqual(response_2.status_code, 304) def test_set_cookie(self):
Allow multiple requests at same time if for different paths Discussion at https://cravefoodsystems.slack.com/archives/C0FLWNK0D/p1585835789007900 ## Context This middleware is responsible for dropping a second/duplicated request. For example, if a client makes two consecutive requests to the same URL, the second should not be processed: 1st: POST /v1/account/user/ -> Returns HTTP 200 2st POST /v1/account/user/ -> Returns HTTP 304 ## Current Behavior The middleware will not differentiate URLs, so this will happen: 1st: PUT /v1/account/user/123 -> Returns HTTP 200 2st PUT /v1/account/user/345 -> Returns HTTP 304 Note there are 2 different URLS and the second should be processed correctly, not return 304. Real example: ![image](https://user-images.githubusercontent.com/9268203/89820417-5c791980-db23-11ea-9661-f3e1b5478318.png) ## Expected Behavior I should be able to make two consecutive requests to different urls: 1st: PUT /v1/account/user/123 -> Returns HTTP 200 2st PUT /v1/account/user/345 -> Returns HTTP 200
I have a branch with an initial solution I started 3 months ago: https://github.com/CraveFood/django-duprequests/tree/fix-multiple-requests-per-path Whoever pick this issue let me know so we can have a chat ;) We should also consider same URLS with different payloads: ![2020-08-11 09 46 51](https://user-images.githubusercontent.com/9268203/90019386-b7765200-dc84-11ea-9246-115da451b024.gif)
2020-08-18T14:06:54
-1.0
End of preview. Expand in Data Studio

No dataset card yet

Downloads last month
13