diff --git a/data/.editorconfig b/data/.editorconfig new file mode 100644 index 0000000000000000000000000000000000000000..d4a2c4405ec2e962c521a13af91bf5f7098a62a8 --- /dev/null +++ b/data/.editorconfig @@ -0,0 +1,21 @@ +# http://editorconfig.org + +root = true + +[*] +indent_style = space +indent_size = 4 +trim_trailing_whitespace = true +insert_final_newline = true +charset = utf-8 +end_of_line = lf + +[*.bat] +indent_style = tab +end_of_line = crlf + +[LICENSE] +insert_final_newline = false + +[Makefile] +indent_style = tab diff --git a/data/.pre-commit-config.yaml b/data/.pre-commit-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..61fd4955f771d72871db72e8d8a2932dd32fe4e5 --- /dev/null +++ b/data/.pre-commit-config.yaml @@ -0,0 +1,44 @@ +fail_fast: false +default_language_version: + python: python3 +default_stages: + - pre-commit + - pre-push +minimum_pre_commit_version: 2.16.0 +repos: + - repo: https://github.com/rbubley/mirrors-prettier + rev: v3.4.2 + hooks: + - id: prettier + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.8.6 + hooks: + - id: ruff + args: [--fix, --exit-non-zero-on-fix, --unsafe-fixes] + - id: ruff-format + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: detect-private-key + - id: check-ast + - id: end-of-file-fixer + - id: mixed-line-ending + args: [--fix=lf] + - id: trailing-whitespace + - id: check-case-conflict + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.14.1 + hooks: + - id: mypy + args: [--no-strict-optional, --ignore-missing-imports] + additional_dependencies: + ["types-setuptools", "types-requests", "types-attrs"] + - repo: local + hooks: + - id: forbid-to-commit + name: Don't commit rej files + entry: | + Cannot commit .rej files. These indicate merge conflicts that arise during automated template updates. + Fix the merge conflicts manually and remove the .rej files. + language: fail + files: '.*\.rej$' diff --git a/data/.readthedocs.yml b/data/.readthedocs.yml new file mode 100644 index 0000000000000000000000000000000000000000..4dc2dd86f9b290464dd205d3fa42e678c195f3e7 --- /dev/null +++ b/data/.readthedocs.yml @@ -0,0 +1,18 @@ +version: 2 +build: + os: ubuntu-24.04 + tools: + python: "3.12" + commands: + - asdf plugin add uv + - asdf install uv latest + - asdf global uv latest + - uv venv + - uv pip install .[docs] + - .venv/bin/python -m sphinx -T -b html -d docs/_build/doctrees -D language=en docs $READTHEDOCS_OUTPUT/html +sphinx: + configuration: docs/conf.py + fail_on_warning: false + +submodules: + include: all diff --git a/data/CODE_OF_CONDUCT.md b/data/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..39816a93ae214898564a95ed3cdc7deae2adbfe2 --- /dev/null +++ b/data/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our +project and our community a harassment-free experience for everyone, +regardless of age, body size, disability, ethnicity, gender identity and +expression, level of experience, nationality, personal appearance, race, +religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual + attention or advances +- Trolling, insulting/derogatory comments, and personal or political + attacks +- Public or private harassment +- Publishing others’ private information, such as a physical or + electronic address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of +acceptable behavior and are expected to take appropriate and fair +corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, +or reject comments, commits, code, wiki edits, issues, and other +contributions that are not aligned to this Code of Conduct, or to ban +temporarily or permanently any contributor for other behaviors that they +deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public +spaces when an individual is representing the project or its community. +Examples of representing a project or community include using an +official project e-mail address, posting via an official social media +account, or acting as an appointed representative at an online or +offline event. Representation of a project may be further defined and +clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may +be reported by opening an issue. The project team +will review and investigate all complaints, and will respond in a way +that it deems appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an +incident. Further details of specific enforcement policies may be posted +separately. + +Project maintainers who do not follow or enforce the Code of Conduct in +good faith may face temporary or permanent repercussions as determined +by other members of the project’s leadership. + +## Attribution + +This Code of Conduct is adapted from the Contributor Covenant, version 1.4, +available at diff --git a/data/LICENSE b/data/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..9bb82f124a2bd3aa0bb0b5ed7b1be7c30999626e --- /dev/null +++ b/data/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2022 Lukas Heumos + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/data/codecov.yml b/data/codecov.yml new file mode 100644 index 0000000000000000000000000000000000000000..e89fa9401b4cfb240c0fbcf014b4854f875b5ec1 --- /dev/null +++ b/data/codecov.yml @@ -0,0 +1,13 @@ +comment: false +coverage: + status: + project: + default: + target: auto + patch: + default: + enabled: no + ignore: + - "test/" +github_checks: + annotations: false diff --git a/data/docs/Makefile b/data/docs/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..679df64c22c3e330aa275b070d74861f4cd1f650 --- /dev/null +++ b/data/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = python -msphinx +SPHINXPROJ = ehrapy +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/data/docs/_ext/edit_on_github.py b/data/docs/_ext/edit_on_github.py new file mode 100644 index 0000000000000000000000000000000000000000..746d2c415abbfda91c2b5865a20f480f3bd792c4 --- /dev/null +++ b/data/docs/_ext/edit_on_github.py @@ -0,0 +1,50 @@ +"""Based on gist.github.com/MantasVaitkunas/7c16de233812adcb7028.""" + +import os +import warnings +from typing import Any, Optional + +from sphinx.application import Sphinx + +__licence__ = "BSD (3 clause)" + + +def get_github_repo(app: Sphinx, path: str) -> str: + if path.endswith(".ipynb"): + return str(app.config.github_nb_repo) + if "auto_examples" in path: + return str(app.config.github_nb_repo) + if "auto_tutorials" in path: + return str(app.config.github_nb_repo) + return str(app.config.github_repo) + + +def _html_page_context( + app: Sphinx, _pagename: str, templatename: str, context: dict[str, Any], doctree: Any | None +) -> None: + # doctree is None - otherwise viewcode fails + if templatename != "page.html" or doctree is None: + return + + if not app.config.github_repo: + return + + if not app.config.github_nb_repo: + nb_repo = f"{app.config.github_repo}_notebooks" + app.config.github_nb_repo = nb_repo + + path = os.path.relpath(doctree.get("source"), app.builder.srcdir) + repo = get_github_repo(app, path) + + # For sphinx_rtd_theme. + context["display_github"] = True + context["github_user"] = "theislab" + context["github_version"] = "master" + context["github_repo"] = repo + context["conf_py_path"] = "/docs/source/" + + +def setup(app: Sphinx) -> None: + app.add_config_value("github_nb_repo", "", True) + app.add_config_value("github_repo", "", True) + app.connect("html-page-context", _html_page_context) diff --git a/data/docs/_ext/typed_returns.py b/data/docs/_ext/typed_returns.py new file mode 100644 index 0000000000000000000000000000000000000000..1ed8577181aea00f0e8574d1b7699d79de84a28a --- /dev/null +++ b/data/docs/_ext/typed_returns.py @@ -0,0 +1,27 @@ +import re +from collections.abc import Iterable, Iterator + +from sphinx.application import Sphinx +from sphinx.ext.napoleon import NumpyDocstring + + +def _process_return(lines: Iterable[str]) -> Iterator[str]: + for line in lines: + m = re.fullmatch(r"(?P\w+)\s+:\s+(?P[\w.]+)", line) + if m: + # Once this is in scanpydoc, we can use the fancy hover stuff + yield f'**{m["param"]}** : :class:`~{m["type"]}`' + else: + yield line + + +def _parse_returns_section(self: NumpyDocstring, section: str) -> list[str]: + lines_raw = list(_process_return(self._dedent(self._consume_to_next_section()))) + lines: list[str] = self._format_block(":returns: ", lines_raw) + if lines and lines[-1]: + lines.append("") + return lines + + +def setup(app: Sphinx) -> None: + NumpyDocstring._parse_returns_section = _parse_returns_section diff --git a/data/docs/_static/SCVI_LICENSE b/data/docs/_static/SCVI_LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..12a37ecac53ffde80522e3b465f7e21807422f18 --- /dev/null +++ b/data/docs/_static/SCVI_LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2020 Romain Lopez, Adam Gayoso, Galen Xing, Yosef Lab +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/data/docs/_static/css/overwrite.css b/data/docs/_static/css/overwrite.css new file mode 100644 index 0000000000000000000000000000000000000000..d2d885858239424234333d35206e8ac17f5f745f --- /dev/null +++ b/data/docs/_static/css/overwrite.css @@ -0,0 +1,74 @@ +/* + Furo CSS variables + https://github.com/pradyunsg/furo/blob/main/src/furo/assets/styles/variables/_index.scss + https://github.com/pradyunsg/furo/blob/main/src/furo/theme/partials/_head_css_variables.html +https://github.com/streamlink/streamlink/blob/17a4088c38709123c0bcab4a150549bd16d19e07/docs/_static/styles/custom.css +*/ + +/* for the sphinx design cards */ +body { + --sd-color-shadow: dimgrey; +} + +dt:target, +span.highlighted { + background-color: #f0f0f0; +} + +dl.citation > dt { + float: left; + margin-right: 15px; + font-weight: bold; +} + +/* Parameters normalize size and captialized, */ +dl .field-list dt { + font-size: var(--font-size--normal) !important; + text-transform: none !important; +} + +/* examples and headings in classes */ +p.rubric { + font-size: var(--font-size--normal); + text-transform: none; + font-weight: 500; +} + +/* adapted from https://github.com/dask/dask-sphinx-theme/blob/main/dask_sphinx_theme/static/css/nbsphinx.css */ + +.nbinput .prompt, +.nboutput .prompt { + display: none; +} +.nboutput .stderr { + display: none; +} + +div.nblast.container { + padding-bottom: 10px !important; + padding-right: 0px; + padding-left: 0px; +} + +div.nbinput.container { + padding-top: 10px !important; + padding-right: 0px; + padding-left: 0px; +} + +div.nbinput.container div.input_area div[class*="highlight"] > pre { + padding: 10px !important; + margin: 0; +} + +p.topic-title { + margin-top: 0; +} + +/* so that api methods are small in sidebar */ +li.toctree-l3 { + font-size: 81.25% !important; +} +li.toctree-l4 { + font-size: 75% !important; +} diff --git a/data/docs/_static/css/sphinx_gallery.css b/data/docs/_static/css/sphinx_gallery.css new file mode 100644 index 0000000000000000000000000000000000000000..746692047ae1a31b9a2d65acece89d05a70c653b --- /dev/null +++ b/data/docs/_static/css/sphinx_gallery.css @@ -0,0 +1,27 @@ +.sphx-glr-thumbcontainer { + background: inherit !important; + min-height: 250px !important; + margin: 10px !important; +} + +.sphx-glr-thumbcontainer .headerlink { + display: none !important; +} + +div.sphx-glr-thumbcontainer span { + font-style: normal !important; +} + +.sphx-glr-thumbcontainer a.internal { + padding: 140px 10px 0 !important; +} + +.sphx-glr-thumbcontainer .figure { + width: 200px !important; +} + +.sphx-glr-thumbcontainer .figure.align-center { + text-align: center; + margin-left: 0%; + transform: translate(0%); +} diff --git a/data/docs/_static/docstring_previews/catplot.png b/data/docs/_static/docstring_previews/catplot.png new file mode 100644 index 0000000000000000000000000000000000000000..cfad54f0e87715d646994f5040b1db5e4c9968da --- /dev/null +++ b/data/docs/_static/docstring_previews/catplot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4111d49fc8d05678805696de956a1eb9a93ebcb49448a1964ea4af8fb15a60a6 +size 15921 diff --git a/data/docs/_static/docstring_previews/clustermap.png b/data/docs/_static/docstring_previews/clustermap.png new file mode 100644 index 0000000000000000000000000000000000000000..b4ddc5b41830762172ff2719d61340c3c63fcbe3 --- /dev/null +++ b/data/docs/_static/docstring_previews/clustermap.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:734ceb4fa4d94f73802ac4dc4164cb9688da39d1fd62aae36d148c2500758d6f +size 94060 diff --git a/data/docs/_static/docstring_previews/cohort_tracking.png b/data/docs/_static/docstring_previews/cohort_tracking.png new file mode 100644 index 0000000000000000000000000000000000000000..21cbe17568e57a6bc8da8e96dbeb71f74aa2dd06 --- /dev/null +++ b/data/docs/_static/docstring_previews/cohort_tracking.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:941041aa584bdc023b77b392867828e1c4cb26264cc8e0fad7e4a6eb10b14cd6 +size 44578 diff --git a/data/docs/_static/docstring_previews/dendrogram.png b/data/docs/_static/docstring_previews/dendrogram.png new file mode 100644 index 0000000000000000000000000000000000000000..e9d4b7e7a989936211f0735fcc20739c821a6bd9 --- /dev/null +++ b/data/docs/_static/docstring_previews/dendrogram.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2b45acd706a513a7723ec7a715f64f56ea0765c4f0c233be19cd17052df648f +size 2239 diff --git a/data/docs/_static/docstring_previews/diffmap.png b/data/docs/_static/docstring_previews/diffmap.png new file mode 100644 index 0000000000000000000000000000000000000000..abb0ffde2bb5771ad316288dfaeca13374679175 --- /dev/null +++ b/data/docs/_static/docstring_previews/diffmap.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:354257844c68bd847a85555d1fbf5d4df098b6f657c7b51b99ea74751a705325 +size 39173 diff --git a/data/docs/_static/docstring_previews/dotplot.png b/data/docs/_static/docstring_previews/dotplot.png new file mode 100644 index 0000000000000000000000000000000000000000..456b360818a734d861fcad5488795e5cc981849b --- /dev/null +++ b/data/docs/_static/docstring_previews/dotplot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25120ea73174fbcfac9744758624c36413e2c661a3c2a0a4f062f1bac8721680 +size 67731 diff --git a/data/docs/_static/docstring_previews/dpt_groups_pseudotime.png b/data/docs/_static/docstring_previews/dpt_groups_pseudotime.png new file mode 100644 index 0000000000000000000000000000000000000000..da314940bee4d7baf38b68ec171841d25c93eb81 --- /dev/null +++ b/data/docs/_static/docstring_previews/dpt_groups_pseudotime.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ef6c92d26aa5009c12dda15cf80eaf6a6897832419e71d7a0d9d79f16197dec +size 16262 diff --git a/data/docs/_static/docstring_previews/dpt_timeseries.png b/data/docs/_static/docstring_previews/dpt_timeseries.png new file mode 100644 index 0000000000000000000000000000000000000000..df95daa7d3203b0cb7596d4363ddaf1488345dea --- /dev/null +++ b/data/docs/_static/docstring_previews/dpt_timeseries.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f623747c293092f3a72a2eeb253eb25e2f2fa65cf082c6ccc95009f0cb65bfe +size 57505 diff --git a/data/docs/_static/docstring_previews/draw_graph_1.png b/data/docs/_static/docstring_previews/draw_graph_1.png new file mode 100644 index 0000000000000000000000000000000000000000..b42dcf7d1c9a5b3855a0c66b47cfaec4cb850282 --- /dev/null +++ b/data/docs/_static/docstring_previews/draw_graph_1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f485ee8d0c164b69e0fbf7f21f25ce2ce08c66dc0474788f3c4cb0634064eff6 +size 54015 diff --git a/data/docs/_static/docstring_previews/draw_graph_2.png b/data/docs/_static/docstring_previews/draw_graph_2.png new file mode 100644 index 0000000000000000000000000000000000000000..eb64f07e115c014c8fcce0902fa8a33ef8a0c100 --- /dev/null +++ b/data/docs/_static/docstring_previews/draw_graph_2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7e0ebee347d874e902b6e9814b22d4c73f9734e8018a4a8261b2f52bab681dc +size 190161 diff --git a/data/docs/_static/docstring_previews/embedding.png b/data/docs/_static/docstring_previews/embedding.png new file mode 100644 index 0000000000000000000000000000000000000000..b6e02ccaa223b10cc9fb4d6d278a681d8c86b2e4 --- /dev/null +++ b/data/docs/_static/docstring_previews/embedding.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70020811aac4b174a9db0917e45c8a238b4691cb2d0b03e1074603b8e67a3e44 +size 66419 diff --git a/data/docs/_static/docstring_previews/embedding_density.png b/data/docs/_static/docstring_previews/embedding_density.png new file mode 100644 index 0000000000000000000000000000000000000000..0095fb12a8befcebef8360b2ec6aa187f31898cd --- /dev/null +++ b/data/docs/_static/docstring_previews/embedding_density.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e058de31724c512346905544266f31c604ca4b29410d7903d1fa207a0147d0e6 +size 331234 diff --git a/data/docs/_static/docstring_previews/feature_importances.png b/data/docs/_static/docstring_previews/feature_importances.png new file mode 100644 index 0000000000000000000000000000000000000000..7143bee1b8b57fc842aafa7a3d5c7e1b51b8e431 --- /dev/null +++ b/data/docs/_static/docstring_previews/feature_importances.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2b4640a7a9ba4233efbf4079f4fe83b5ddb59040eea02e90d8ede3442ee7577 +size 20235 diff --git a/data/docs/_static/docstring_previews/flowchart.png b/data/docs/_static/docstring_previews/flowchart.png new file mode 100644 index 0000000000000000000000000000000000000000..4f2e8634fada87b7fd52f8acf6b4a2fedb0b0fa5 --- /dev/null +++ b/data/docs/_static/docstring_previews/flowchart.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:636355b18f48d787f3b99f756fe972025d9b007718fda72984ccc28408bc66de +size 23797 diff --git a/data/docs/_static/docstring_previews/heatmap.png b/data/docs/_static/docstring_previews/heatmap.png new file mode 100644 index 0000000000000000000000000000000000000000..efb2e1b26cbd960e682df8c8c0ace8610ddc5344 --- /dev/null +++ b/data/docs/_static/docstring_previews/heatmap.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:274fbef4457bb4fcd1955b67d714f6aa5743c9a8238e5b3e85178c32de44e6a1 +size 30453 diff --git a/data/docs/_static/docstring_previews/kmf_plot_1.png b/data/docs/_static/docstring_previews/kmf_plot_1.png new file mode 100644 index 0000000000000000000000000000000000000000..cd729709ab4f34f450af49b5409ac7169e27eb7e --- /dev/null +++ b/data/docs/_static/docstring_previews/kmf_plot_1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:543ad3a670bb812e7c884f8eaf9c5f64e516c7119d823de9b6b2bd790ecf601e +size 11876 diff --git a/data/docs/_static/docstring_previews/kmf_plot_2.png b/data/docs/_static/docstring_previews/kmf_plot_2.png new file mode 100644 index 0000000000000000000000000000000000000000..b228c03f5f30f6d33e6482777c0f1527a755b61e --- /dev/null +++ b/data/docs/_static/docstring_previews/kmf_plot_2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f678d85cf57d79c80988084e59dc2dfb65ef9b23b5f62e18b9b8c94eaf5c6f5 +size 8639 diff --git a/data/docs/_static/docstring_previews/matrixplot.png b/data/docs/_static/docstring_previews/matrixplot.png new file mode 100644 index 0000000000000000000000000000000000000000..e5afa05067ecd2ebd8b74aea9af1868ff2e6122b --- /dev/null +++ b/data/docs/_static/docstring_previews/matrixplot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e078b623437bd6a69bc64bb894e00d0dff6bf3e72aa38faefe9bc10289dd29b +size 12184 diff --git a/data/docs/_static/docstring_previews/missingno_barplot.png b/data/docs/_static/docstring_previews/missingno_barplot.png new file mode 100644 index 0000000000000000000000000000000000000000..d4097f028b533f25741cc83a18e41648f1d1e439 --- /dev/null +++ b/data/docs/_static/docstring_previews/missingno_barplot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:986ffaec3ce897d79256d21c3bedb5f4c2e2117ae02dabc657ba3b11ea45aa34 +size 117306 diff --git a/data/docs/_static/docstring_previews/missingno_dendrogram.png b/data/docs/_static/docstring_previews/missingno_dendrogram.png new file mode 100644 index 0000000000000000000000000000000000000000..bae22ae3bec1dae06ad4a6c7104f5a5af25649c3 --- /dev/null +++ b/data/docs/_static/docstring_previews/missingno_dendrogram.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1af725952674a10588d763648699e436c452adff2aa4d1d51cdf09d6e02edba7 +size 60750 diff --git a/data/docs/_static/docstring_previews/missingno_heatmap.png b/data/docs/_static/docstring_previews/missingno_heatmap.png new file mode 100644 index 0000000000000000000000000000000000000000..b2c4625cbaf43dd7b09032f707d2258bf8774155 --- /dev/null +++ b/data/docs/_static/docstring_previews/missingno_heatmap.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f33200f32b3dbcd0934f8467eefb3da2f96bb1a867e835f9482b8bafad5b348c +size 130069 diff --git a/data/docs/_static/docstring_previews/missingno_matrix.png b/data/docs/_static/docstring_previews/missingno_matrix.png new file mode 100644 index 0000000000000000000000000000000000000000..7f3c891fabe1efab669ea9d94f4f72fab1232565 --- /dev/null +++ b/data/docs/_static/docstring_previews/missingno_matrix.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4253a91e1139873b910f873d63e7994262b3f84cc21b6b4ab2fd131b814a230c +size 96653 diff --git a/data/docs/_static/docstring_previews/ols_plot_1.png b/data/docs/_static/docstring_previews/ols_plot_1.png new file mode 100644 index 0000000000000000000000000000000000000000..85d9ff9d01bd296ca6bbd6e34502fcbc5bcaf885 --- /dev/null +++ b/data/docs/_static/docstring_previews/ols_plot_1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2da204bcd471e0c88dbce57966ef480f8fcd2f5e066d7bef0da15d4760a5c14b +size 24094 diff --git a/data/docs/_static/docstring_previews/ols_plot_2.png b/data/docs/_static/docstring_previews/ols_plot_2.png new file mode 100644 index 0000000000000000000000000000000000000000..148364164268b2f96858d596138b987aae77e0a9 --- /dev/null +++ b/data/docs/_static/docstring_previews/ols_plot_2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc699b36b3f877e47e72609bd7be6b4f6ac7977a2783aa9cf30709aedd819c6b +size 24634 diff --git a/data/docs/_static/docstring_previews/ols_plot_3.png b/data/docs/_static/docstring_previews/ols_plot_3.png new file mode 100644 index 0000000000000000000000000000000000000000..538edf3a2c8b58451bb7304e867a8bcd1d776aa8 --- /dev/null +++ b/data/docs/_static/docstring_previews/ols_plot_3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b2231ec77eb60d9559d02ea2e5ccae722f0f5ce61c6a18595e6de56f52973eb +size 11407 diff --git a/data/docs/_static/docstring_previews/paga.png b/data/docs/_static/docstring_previews/paga.png new file mode 100644 index 0000000000000000000000000000000000000000..b42dcf7d1c9a5b3855a0c66b47cfaec4cb850282 --- /dev/null +++ b/data/docs/_static/docstring_previews/paga.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f485ee8d0c164b69e0fbf7f21f25ce2ce08c66dc0474788f3c4cb0634064eff6 +size 54015 diff --git a/data/docs/_static/docstring_previews/pca.png b/data/docs/_static/docstring_previews/pca.png new file mode 100644 index 0000000000000000000000000000000000000000..f3d580fe49f38c28be50adf4d256434a510946e2 --- /dev/null +++ b/data/docs/_static/docstring_previews/pca.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d778c72952aa5bd17db39298abe85fb10824dec67e57001d7e9aa2974e2e5b01 +size 46622 diff --git a/data/docs/_static/docstring_previews/pca_loadings.png b/data/docs/_static/docstring_previews/pca_loadings.png new file mode 100644 index 0000000000000000000000000000000000000000..508daac83f334af4f5611f0130ce78477ff7b71d --- /dev/null +++ b/data/docs/_static/docstring_previews/pca_loadings.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea11b49cabd4c632ad7209dd0244af50ac7fa3e71d6a25550f3b0d1c4234ed7b +size 59450 diff --git a/data/docs/_static/docstring_previews/pca_overview_1.png b/data/docs/_static/docstring_previews/pca_overview_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f3d580fe49f38c28be50adf4d256434a510946e2 --- /dev/null +++ b/data/docs/_static/docstring_previews/pca_overview_1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d778c72952aa5bd17db39298abe85fb10824dec67e57001d7e9aa2974e2e5b01 +size 46622 diff --git a/data/docs/_static/docstring_previews/pca_overview_2.png b/data/docs/_static/docstring_previews/pca_overview_2.png new file mode 100644 index 0000000000000000000000000000000000000000..508daac83f334af4f5611f0130ce78477ff7b71d --- /dev/null +++ b/data/docs/_static/docstring_previews/pca_overview_2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea11b49cabd4c632ad7209dd0244af50ac7fa3e71d6a25550f3b0d1c4234ed7b +size 59450 diff --git a/data/docs/_static/docstring_previews/pca_overview_3.png b/data/docs/_static/docstring_previews/pca_overview_3.png new file mode 100644 index 0000000000000000000000000000000000000000..7a00e01b13ec493cc7b2aff522c75bd656ad5ffd --- /dev/null +++ b/data/docs/_static/docstring_previews/pca_overview_3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:999654b2a1fc96dad339099a3eb5aa5d3baee5b0294b420aba869edf4d66480b +size 10392 diff --git a/data/docs/_static/docstring_previews/pca_variance_ratio.png b/data/docs/_static/docstring_previews/pca_variance_ratio.png new file mode 100644 index 0000000000000000000000000000000000000000..0257fb82f4703105edf78d3fae5fe7e5e3c4f561 --- /dev/null +++ b/data/docs/_static/docstring_previews/pca_variance_ratio.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6e06b803e043b6f1f4f87c0c79961fb05ad76932dc556c7cfcbbaeb13be79dd +size 6976 diff --git a/data/docs/_static/docstring_previews/rank_features_groups.png b/data/docs/_static/docstring_previews/rank_features_groups.png new file mode 100644 index 0000000000000000000000000000000000000000..25b6c7684db7e19b4306fd5b52809a7f8daf6ff6 --- /dev/null +++ b/data/docs/_static/docstring_previews/rank_features_groups.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:661d53eaa3bc74bdaf22cde22106cb6299ff83707c0faa629f9731744add5b00 +size 49056 diff --git a/data/docs/_static/docstring_previews/rank_features_groups_dotplot.png b/data/docs/_static/docstring_previews/rank_features_groups_dotplot.png new file mode 100644 index 0000000000000000000000000000000000000000..0a73c425a0a9111ded7f8e61350ec9da7bf1b8dd --- /dev/null +++ b/data/docs/_static/docstring_previews/rank_features_groups_dotplot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d103da011f7efa4d20b383d1b92dd5c0137aa056e93551e89a5a8f962917982e +size 66387 diff --git a/data/docs/_static/docstring_previews/rank_features_groups_heatmap.png b/data/docs/_static/docstring_previews/rank_features_groups_heatmap.png new file mode 100644 index 0000000000000000000000000000000000000000..c9742462ac787b35428b6c39b05a9d1ff603b203 --- /dev/null +++ b/data/docs/_static/docstring_previews/rank_features_groups_heatmap.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7403ce3bcdb6380ab35052c2b843c306063e878da464ab34c9f335304e2be1b1 +size 57381 diff --git a/data/docs/_static/docstring_previews/rank_features_groups_matrixplot.png b/data/docs/_static/docstring_previews/rank_features_groups_matrixplot.png new file mode 100644 index 0000000000000000000000000000000000000000..46c4d123cceeeff78b21fdb703afc4ea166bf76b --- /dev/null +++ b/data/docs/_static/docstring_previews/rank_features_groups_matrixplot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45e2b413999d85d9db4439f19e9c01acf084bce0905ee1f429014f14be9477f0 +size 24132 diff --git a/data/docs/_static/docstring_previews/rank_features_groups_stacked_violin.png b/data/docs/_static/docstring_previews/rank_features_groups_stacked_violin.png new file mode 100644 index 0000000000000000000000000000000000000000..9f87922624f8f2e5574066eb345366378927abcb --- /dev/null +++ b/data/docs/_static/docstring_previews/rank_features_groups_stacked_violin.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbcaee842b0c5f456404577661e10babab6324bef3e7f359f66eafb817332f71 +size 26721 diff --git a/data/docs/_static/docstring_previews/rank_features_groups_tracksplot.png b/data/docs/_static/docstring_previews/rank_features_groups_tracksplot.png new file mode 100644 index 0000000000000000000000000000000000000000..2210d8ce34f66bb9e0c6c34221ddaec3822fd698 --- /dev/null +++ b/data/docs/_static/docstring_previews/rank_features_groups_tracksplot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcbd51b6982ea18694c54c22a8ffb2c632fa33d9bce6b3892f3e213cd84ca1ba +size 277851 diff --git a/data/docs/_static/docstring_previews/rank_features_groups_violin_1.png b/data/docs/_static/docstring_previews/rank_features_groups_violin_1.png new file mode 100644 index 0000000000000000000000000000000000000000..3f83a3473e1b16c2fb755b153fd023b1d13d1932 --- /dev/null +++ b/data/docs/_static/docstring_previews/rank_features_groups_violin_1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:549d57eed2b553c277ee94e7a1960f675d7503f9a4906416933903a65a0408e6 +size 33384 diff --git a/data/docs/_static/docstring_previews/rank_features_groups_violin_2.png b/data/docs/_static/docstring_previews/rank_features_groups_violin_2.png new file mode 100644 index 0000000000000000000000000000000000000000..6a61d704e8e8649f72730351ffe143ac558a0c86 --- /dev/null +++ b/data/docs/_static/docstring_previews/rank_features_groups_violin_2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3458ea277f9b8cfe6e1bcb2eac1f8767b973670f5014f3d8fc37541a1da0f7b2 +size 30777 diff --git a/data/docs/_static/docstring_previews/rank_features_groups_violin_3.png b/data/docs/_static/docstring_previews/rank_features_groups_violin_3.png new file mode 100644 index 0000000000000000000000000000000000000000..11ac764a81a35df7312f376260c937c08a8df5fa --- /dev/null +++ b/data/docs/_static/docstring_previews/rank_features_groups_violin_3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d3def4dc52d13b09e387c76cca1bc071a11d1802b965b56455adc831d0d0337 +size 20942 diff --git a/data/docs/_static/docstring_previews/rank_features_groups_violin_4.png b/data/docs/_static/docstring_previews/rank_features_groups_violin_4.png new file mode 100644 index 0000000000000000000000000000000000000000..12e982fe77ace908abea0bf722d0eb91016acca7 --- /dev/null +++ b/data/docs/_static/docstring_previews/rank_features_groups_violin_4.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5500245a438799e2b044e9b10c5318f02466dc9f4b26f54d047d6ea6fb7a9a05 +size 24729 diff --git a/data/docs/_static/docstring_previews/scatter.png b/data/docs/_static/docstring_previews/scatter.png new file mode 100644 index 0000000000000000000000000000000000000000..6c1b7e3a8516b29f25e2492ba99f467b227137cd --- /dev/null +++ b/data/docs/_static/docstring_previews/scatter.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5819e5336198f89c83b5e05584ee9ef3f5d0cd225d51e981533b99e06c12d438 +size 79628 diff --git a/data/docs/_static/docstring_previews/stacked_violin.png b/data/docs/_static/docstring_previews/stacked_violin.png new file mode 100644 index 0000000000000000000000000000000000000000..b717ad97b3656cf956296205dbef7ba48e6ca6fc --- /dev/null +++ b/data/docs/_static/docstring_previews/stacked_violin.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef57a8949cf5ebeb568649ac79115329cb3e56fa95bdb3bef0d731eb1ec65bb1 +size 29876 diff --git a/data/docs/_static/docstring_previews/tracksplot.png b/data/docs/_static/docstring_previews/tracksplot.png new file mode 100644 index 0000000000000000000000000000000000000000..2728e398494473af903c5afe5f7f971ae0be32e6 --- /dev/null +++ b/data/docs/_static/docstring_previews/tracksplot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:989bbf3c3214c12f179cc8bc28b3a97dd95f193262a01e87d4e3ef73e57b7a5f +size 110702 diff --git a/data/docs/_static/docstring_previews/tsne_1.png b/data/docs/_static/docstring_previews/tsne_1.png new file mode 100644 index 0000000000000000000000000000000000000000..7c76cbb559e8248d2883a5ab9d2cbd95a1c39c2a --- /dev/null +++ b/data/docs/_static/docstring_previews/tsne_1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f48d7c50627a4ad577a1fe8baae7381397eb5c3135e9815820c3c735cc9d83ec +size 34743 diff --git a/data/docs/_static/docstring_previews/tsne_2.png b/data/docs/_static/docstring_previews/tsne_2.png new file mode 100644 index 0000000000000000000000000000000000000000..c2a0148047f13273c51ffd0f518621c54344058b --- /dev/null +++ b/data/docs/_static/docstring_previews/tsne_2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:499053e7245d301a6631564af64489eefd1709f2ed0c719b35b4ea74e1f85c16 +size 133404 diff --git a/data/docs/_static/docstring_previews/tsne_3.png b/data/docs/_static/docstring_previews/tsne_3.png new file mode 100644 index 0000000000000000000000000000000000000000..324141f26643aac3273271361c7141c0b430a251 --- /dev/null +++ b/data/docs/_static/docstring_previews/tsne_3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88128052f81f44f3e918c5547ae21778c2692ee4c031654845341fc71d655fda +size 56384 diff --git a/data/docs/_static/docstring_previews/umap_1.png b/data/docs/_static/docstring_previews/umap_1.png new file mode 100644 index 0000000000000000000000000000000000000000..8763ef07c7982494e240bcb6c89af639cce6d72a --- /dev/null +++ b/data/docs/_static/docstring_previews/umap_1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:341babaca11944d99c646a7b39a38bdcae24b3440f9d1348a2f9614f4c133d0a +size 40303 diff --git a/data/docs/_static/docstring_previews/umap_2.png b/data/docs/_static/docstring_previews/umap_2.png new file mode 100644 index 0000000000000000000000000000000000000000..83cbc9d5f6396ca46562e828e46f4bd7d7be2d2c --- /dev/null +++ b/data/docs/_static/docstring_previews/umap_2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe304d9252450fb1baa92f0cb4e0e585a1c80d820fb28e0531dfe45f6c830156 +size 160594 diff --git a/data/docs/_static/docstring_previews/umap_3.png b/data/docs/_static/docstring_previews/umap_3.png new file mode 100644 index 0000000000000000000000000000000000000000..9dccb3dfffe343d7e9dde526fecc6959ec47dede --- /dev/null +++ b/data/docs/_static/docstring_previews/umap_3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8274418265bf173a580951cb93e4597a9fc4b373acf275a4eb4c98ca18fce0b2 +size 67044 diff --git a/data/docs/_static/docstring_previews/violin.png b/data/docs/_static/docstring_previews/violin.png new file mode 100644 index 0000000000000000000000000000000000000000..48b8ddbd517fac9251bbbd91947efe1562ebb88b --- /dev/null +++ b/data/docs/_static/docstring_previews/violin.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7259b9c41e7290d4ff2b268112a0e6b70f9abfb66dbd4c1d36390bac0233a7fd +size 49061 diff --git a/data/docs/_static/ehrapy_logos/ehrapy_logo.png b/data/docs/_static/ehrapy_logos/ehrapy_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..fe018c95fb864bdfc4dda712cce1c8fe1648b52f --- /dev/null +++ b/data/docs/_static/ehrapy_logos/ehrapy_logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd439f040d89371ddc5b2d668f9c2cd71e4f3fd335262b67d8b7a583b6c76215 +size 508830 diff --git a/data/docs/_static/ehrapy_logos/ehrapy_logo_wide.png b/data/docs/_static/ehrapy_logos/ehrapy_logo_wide.png new file mode 100644 index 0000000000000000000000000000000000000000..1e60d1a15172824b99cc5c133f3d0af4e59777ac --- /dev/null +++ b/data/docs/_static/ehrapy_logos/ehrapy_logo_wide.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfd3fa85371b30a349f7da60270938d3dcebe3ebeae01c81a66e7fa8bc9f7847 +size 250170 diff --git a/data/docs/_static/ehrapy_logos/ehrapy_logo_wide_2.png b/data/docs/_static/ehrapy_logos/ehrapy_logo_wide_2.png new file mode 100644 index 0000000000000000000000000000000000000000..1af23568691e42c3b69a8a75c32f59aec1287fbf --- /dev/null +++ b/data/docs/_static/ehrapy_logos/ehrapy_logo_wide_2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a81f6f19b839215215b01ba77867d12203133cd67efa3744206d8236d80f5d3 +size 308095 diff --git a/data/docs/_static/ehrapy_logos/ehrapy_pure.png b/data/docs/_static/ehrapy_logos/ehrapy_pure.png new file mode 100644 index 0000000000000000000000000000000000000000..201dd334f5c4fee146d00499121c9782e54034d2 --- /dev/null +++ b/data/docs/_static/ehrapy_logos/ehrapy_pure.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b416b1e0d48ac1bad4fe355e1edf5b4507f58599bc6f41973654bff5635077ca +size 465802 diff --git a/data/docs/_static/icons/code-24px.svg b/data/docs/_static/icons/code-24px.svg new file mode 100644 index 0000000000000000000000000000000000000000..db92e812a3db79cf90295f506c3b8ac722362af8 --- /dev/null +++ b/data/docs/_static/icons/code-24px.svg @@ -0,0 +1 @@ + diff --git a/data/docs/_static/icons/computer-24px.svg b/data/docs/_static/icons/computer-24px.svg new file mode 100644 index 0000000000000000000000000000000000000000..a01910bbe23b1e80b0ec6474239d5ac364ce9098 --- /dev/null +++ b/data/docs/_static/icons/computer-24px.svg @@ -0,0 +1 @@ + diff --git a/data/docs/_static/icons/library_books-24px.svg b/data/docs/_static/icons/library_books-24px.svg new file mode 100644 index 0000000000000000000000000000000000000000..d34970449d91c675348523a7c951b944d752f9a9 --- /dev/null +++ b/data/docs/_static/icons/library_books-24px.svg @@ -0,0 +1 @@ + diff --git a/data/docs/_static/icons/play_circle_outline-24px.svg b/data/docs/_static/icons/play_circle_outline-24px.svg new file mode 100644 index 0000000000000000000000000000000000000000..4982801ab44636c610b61af4442c176770dee542 --- /dev/null +++ b/data/docs/_static/icons/play_circle_outline-24px.svg @@ -0,0 +1 @@ + diff --git a/data/docs/_static/placeholder.png b/data/docs/_static/placeholder.png new file mode 100644 index 0000000000000000000000000000000000000000..387034b234426ba45e63e899e84e074d8311b21e --- /dev/null +++ b/data/docs/_static/placeholder.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91cc7fa66bde36142966e71de4f889ec9a061c729f36422d659b9d7e80f385ae +size 8227 diff --git a/data/docs/_static/readme_overview.png b/data/docs/_static/readme_overview.png new file mode 100644 index 0000000000000000000000000000000000000000..0f6c661192ae566bd17b78f6b1cc4ca9a2d3739f --- /dev/null +++ b/data/docs/_static/readme_overview.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac3138242410ac380826d6e39efe031bf3fb864a6dc2f93ef516b1bfee95f9af +size 273298 diff --git a/data/docs/_static/tutorials/bias.png b/data/docs/_static/tutorials/bias.png new file mode 100644 index 0000000000000000000000000000000000000000..d2812f9931809bd688eed22f0df8945c2e8b8a30 --- /dev/null +++ b/data/docs/_static/tutorials/bias.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15bd29b67b587c504bad376b12fb3cb16abf73d2e18b92faf7021f057837431c +size 5807 diff --git a/data/docs/_static/tutorials/catheter.png b/data/docs/_static/tutorials/catheter.png new file mode 100644 index 0000000000000000000000000000000000000000..36e5ed202fc82c76a51bfc05c267e118a9e34495 --- /dev/null +++ b/data/docs/_static/tutorials/catheter.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:536bebc22d88c3aebeead4db96ebd24eeeabbfa7fe19c221fa512254399ce30d +size 30294 diff --git a/data/docs/_static/tutorials/causal_inference.png b/data/docs/_static/tutorials/causal_inference.png new file mode 100644 index 0000000000000000000000000000000000000000..589c8a488f3835d3059ba2b56d436f989aca80a8 --- /dev/null +++ b/data/docs/_static/tutorials/causal_inference.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81ed71c08e5e3b0d4e73068f222e8c254adf3317219483f13e57caaa434a69bf +size 35492 diff --git a/data/docs/_static/tutorials/cohort_tracking.png b/data/docs/_static/tutorials/cohort_tracking.png new file mode 100644 index 0000000000000000000000000000000000000000..8d87e722667eae98eb5ac365fe20942bcf2e9be4 --- /dev/null +++ b/data/docs/_static/tutorials/cohort_tracking.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44f2e193dd74dce789dd83c11b94c4f8facb5e7b3238bfa4032aa74c57dac17b +size 27390 diff --git a/data/docs/_static/tutorials/fate.png b/data/docs/_static/tutorials/fate.png new file mode 100644 index 0000000000000000000000000000000000000000..e397957c4950673dd5d0f39e6038a20d389c0dbb --- /dev/null +++ b/data/docs/_static/tutorials/fate.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c1ed22ce885db4fbc68b1271ef0dae8d16e1d365726873a3f6562f06f200cf6 +size 85210 diff --git a/data/docs/_static/tutorials/fhir.png b/data/docs/_static/tutorials/fhir.png new file mode 100644 index 0000000000000000000000000000000000000000..09ec8a609737f05eee67b8b6ff8c9be868435b43 --- /dev/null +++ b/data/docs/_static/tutorials/fhir.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f84680d6d4a66c5922cd41f61e9a111a1d47505d9443bd9ff1e048e944778496 +size 134887 diff --git a/data/docs/_static/tutorials/machine_learning.png b/data/docs/_static/tutorials/machine_learning.png new file mode 100644 index 0000000000000000000000000000000000000000..5d89ac1a7b195feb6e9366a9cbce92b3cc4e9ac9 --- /dev/null +++ b/data/docs/_static/tutorials/machine_learning.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e97ab717effb0084e43691b65f18785e61a40ca28f2c44992028824cef6a80f +size 40862 diff --git a/data/docs/_static/tutorials/nlp.png b/data/docs/_static/tutorials/nlp.png new file mode 100644 index 0000000000000000000000000000000000000000..b99c905de02bfcb0e526fcff6f87f1f3ef5ae348 --- /dev/null +++ b/data/docs/_static/tutorials/nlp.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:426ae837c71cae0b0a15a38b1e872c501a8b15111cfd7651c69e555d56f0d5c6 +size 33093 diff --git a/data/docs/_static/tutorials/ontology.png b/data/docs/_static/tutorials/ontology.png new file mode 100644 index 0000000000000000000000000000000000000000..346ecf78a4b034c3105ec5b1fb5f10ac2de5bcdd --- /dev/null +++ b/data/docs/_static/tutorials/ontology.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fc9befa9e0ec81ec86bdcefb2832b9fa1fd1fb6e49aa68a36706c632e5f1831 +size 33037 diff --git a/data/docs/_static/tutorials/out_of_core.png b/data/docs/_static/tutorials/out_of_core.png new file mode 100644 index 0000000000000000000000000000000000000000..d78ad2192221ff0e1e5d7a4feca5506b84b3ee73 --- /dev/null +++ b/data/docs/_static/tutorials/out_of_core.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:165fb6c2e3fd8bd1778359c63f6452379513b5fbe40cea31a3d4401b7339f13a +size 18964 diff --git a/data/docs/_static/tutorials/patient_trajectory.png b/data/docs/_static/tutorials/patient_trajectory.png new file mode 100644 index 0000000000000000000000000000000000000000..1343ffc4e2f1e2fb3e14e409d5c70c9a352f6227 --- /dev/null +++ b/data/docs/_static/tutorials/patient_trajectory.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c85fba7c4ca1b56f494c429ba15967fd2201ca666e30e475a28d9c3433c081f +size 302149 diff --git a/data/docs/_static/tutorials/placeholder.png b/data/docs/_static/tutorials/placeholder.png new file mode 100644 index 0000000000000000000000000000000000000000..387034b234426ba45e63e899e84e074d8311b21e --- /dev/null +++ b/data/docs/_static/tutorials/placeholder.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91cc7fa66bde36142966e71de4f889ec9a061c729f36422d659b9d7e80f385ae +size 8227 diff --git a/data/docs/_static/tutorials/survival.png b/data/docs/_static/tutorials/survival.png new file mode 100644 index 0000000000000000000000000000000000000000..ce77ec0280b9a2e9708d08dc1ba93df58fe967fe --- /dev/null +++ b/data/docs/_static/tutorials/survival.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e4937a0eb97c40f88e0e753415630612914fb012380bdec5b0c5dcfd1922442 +size 28674 diff --git a/data/docs/_templates/autosummary/class.rst b/data/docs/_templates/autosummary/class.rst new file mode 100644 index 0000000000000000000000000000000000000000..49f45edd68fd68cbffa9cf94e74e7b14de035495 --- /dev/null +++ b/data/docs/_templates/autosummary/class.rst @@ -0,0 +1,65 @@ +{{ fullname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. add toctree option to make autodoc generate the pages + +.. autoclass:: {{ objname }} + +{% block attributes %} +{% if attributes %} +Attributes table +~~~~~~~~~~~~~~~~~~ + +.. autosummary:: +{% for item in attributes %} + ~{{ fullname }}.{{ item }} +{%- endfor %} +{% endif %} +{% endblock %} + +{% block methods %} +{% if methods %} +Methods table +~~~~~~~~~~~~~ + +.. autosummary:: +{% for item in methods %} + {%- if item != '__init__' %} + ~{{ fullname }}.{{ item }} + {%- endif -%} +{%- endfor %} +{% endif %} +{% endblock %} + +{% block attributes_documentation %} +{% if attributes %} +Attributes +~~~~~~~~~~~ + +{% for item in attributes %} +{{ item }} +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoattribute:: {{ [objname, item] | join(".") }} +{%- endfor %} + +{% endif %} +{% endblock %} + +{% block methods_documentation %} +{% if methods %} +Methods +~~~~~~~ + +{% for item in methods %} +{%- if item != '__init__' %} +{{ item }} +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. automethod:: {{ [objname, item] | join(".") }} +{%- endif -%} +{%- endfor %} + +{% endif %} +{% endblock %} diff --git a/data/docs/_templates/class_no_inherited.rst b/data/docs/_templates/class_no_inherited.rst new file mode 100644 index 0000000000000000000000000000000000000000..9e8bd3ebcae43ca3dae39e6c139c8947e576a263 --- /dev/null +++ b/data/docs/_templates/class_no_inherited.rst @@ -0,0 +1,69 @@ +{{ fullname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. add toctree option to make autodoc generate the pages + +.. autoclass:: {{ objname }} + :show-inheritance: + +{% block attributes %} +{% if attributes %} +Attributes table +~~~~~~~~~~~~~~~~ + +.. autosummary:: +{% for item in attributes %} + {%- if item not in inherited_members%} + ~{{ fullname }}.{{ item }} + {%- endif -%} +{%- endfor %} +{% endif %} +{% endblock %} + + +{% block methods %} +{% if methods %} +Methods table +~~~~~~~~~~~~~~ + +.. autosummary:: +{% for item in methods %} + {%- if item != '__init__' and item not in inherited_members%} + ~{{ fullname }}.{{ item }} + {%- endif -%} + +{%- endfor %} +{% endif %} +{% endblock %} + +{% block attributes_documentation %} +{% if attributes %} +Attributes +~~~~~~~~~~ + +{% for item in attributes %} +{{ item }} +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoattribute:: {{ [objname, item] | join(".") }} +{%- endfor %} + +{% endif %} +{% endblock %} + +{% block methods_documentation %} +{% if methods %} +Methods +~~~~~~~ + +{% for item in methods %} +{%- if item != '__init__' and item not in inherited_members%} +{{ item }} +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. automethod:: {{ [objname, item] | join(".") }} +{%- endif -%} +{%- endfor %} + +{% endif %} +{% endblock %} diff --git a/data/docs/conf.py b/data/docs/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..dd46ddabaff2c5cff217b40a697926c4f080f690 --- /dev/null +++ b/data/docs/conf.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python +# mypy: ignore-errors + +import sys +from datetime import datetime +from importlib.metadata import metadata +from pathlib import Path + +HERE = Path(__file__).parent +sys.path[:0] = [str(HERE.parent), str(HERE / "extensions")] + +needs_sphinx = "4.3" + +info = metadata("ehrapy") +project_name = info["Name"] +author = info["Author"] +copyright = f"{datetime.now():%Y}, {author}." +version = info["Version"] +urls = dict(pu.split(", ") for pu in info.get_all("Project-URL")) +repository_url = urls["Source"] +release = info["Version"] +github_repo = "ehrapy" + +extensions = [ + "myst_parser", + "sphinx.ext.autodoc", + "sphinx.ext.intersphinx", + "sphinx.ext.viewcode", + "nbsphinx", + "nbsphinx_link", + "sphinx.ext.mathjax", + "sphinx.ext.napoleon", + "sphinx_autodoc_typehints", # needs to be after napoleon + "sphinx.ext.autosummary", + "sphinx_copybutton", + "sphinx_gallery.load_style", + "sphinx_remove_toctrees", + "sphinx_design", +] + +# remove_from_toctrees = ["tutorials/notebooks/*", "api/reference/*"] + +# for sharing urls with nice info +ogp_site_url = "https://ehrapy.readthedocs.io/en/latest/" +ogp_image = "https://ehrapy.readthedocs.io/en/latest//_static/logo.png" + +# nbsphinx specific settings +exclude_patterns = [ + "_build", + "Thumbs.db", + ".DS_Store", + "auto_*/**.ipynb", + "auto_*/**.md5", + "auto_*/**.py", + "**.ipynb_checkpoints", +] +nbsphinx_execute = "never" + +templates_path = ["_templates"] +# source_suffix = ".md" + +autosummary_generate = True +autodoc_member_order = "bysource" +napoleon_google_docstring = True # for pytorch lightning +napoleon_numpy_docstring = True +napoleon_include_init_with_doc = False +napoleon_use_rtype = True # having a separate entry generally helps readability +napoleon_use_param = True +napoleon_custom_sections = [("Params", "Parameters")] +todo_include_todos = False +numpydoc_show_class_members = False +annotate_defaults = True # scanpydoc option, look into why we need this +myst_enable_extensions = [ + "colon_fence", + "dollarmath", + "amsmath", +] + +master_doc = "index" + +intersphinx_mapping = { + "anndata": ("https://anndata.readthedocs.io/en/stable/", None), + "ipython": ("https://ipython.readthedocs.io/en/stable/", None), + "matplotlib": ("https://matplotlib.org/", None), + "numpy": ("https://numpy.org/doc/stable/", None), + "pandas": ("https://pandas.pydata.org/docs/", None), + "python": ("https://docs.python.org/3", None), + "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None), + "pynndescent": ("https://pynndescent.readthedocs.io/en/latest/", None), + "sklearn": ("https://scikit-learn.org/stable/", None), + "torch": ("https://pytorch.org/docs/master/", None), + "scanpy": ("https://scanpy.readthedocs.io/en/stable/", None), + "pytorch_lightning": ("https://pytorch-lightning.readthedocs.io/en/stable/", None), + "pyro": ("http://docs.pyro.ai/en/stable/", None), + "pymde": ("https://pymde.org/", None), + "flax": ("https://flax.readthedocs.io/en/latest/", None), + "jax": ("https://jax.readthedocs.io/en/latest/", None), + "lamin": ("https://lamin.ai/docs", None), + "lifelines": ("https://lifelines.readthedocs.io/en/latest/", None), +} + +language = "en" + +typehints_defaults = "comma" + +pygments_style = "default" +pygments_dark_style = "native" + + +# html_show_sourcelink = True +html_theme = "furo" + +html_title = "ehrapy" +html_logo = "_static/ehrapy_logos/ehrapy_pure.png" + +html_theme_options = { + "sidebar_hide_name": True, + "light_css_variables": { + "color-brand-primary": "#003262", + "color-brand-content": "#003262", + "admonition-font-size": "var(--font-size-normal)", + "admonition-title-font-size": "var(--font-size-normal)", + "code-font-size": "var(--font-size--small)", + }, +} +html_static_path = ["_static"] +html_css_files = ["css/override.css", "css/sphinx_gallery.css"] +html_show_sphinx = False + + +nbsphinx_prolog = r""" +.. raw:: html + +{{% set docname = env.doc2path(env.docname, base=None).split("/")[-1] %}} + +.. raw:: html + + + +.. raw:: html + +
+

Note

+

+ This page was generated from + {docname}. + Some tutorial content may look better in light mode. +

+
+""".format(version=version, docname="{{ docname|e }}") +nbsphinx_thumbnails = { + "tutorials/notebooks/ehrapy_introduction": "_static/ehrapy_logos/ehrapy_pure.png", + "tutorials/notebooks/mimic_2_introduction": "_static/tutorials/catheter.png", + "tutorials/notebooks/mimic_2_fate": "_static/tutorials/fate.png", + "tutorials/notebooks/mimic_2_survival_analysis": "_static/tutorials/survival.png", + "tutorials/notebooks/mimic_2_causal_inference": "_static/tutorials/causal_inference.png", + "tutorials/notebooks/medcat": "_static/tutorials/nlp.png", + "tutorials/notebooks/ml_usecases": "_static/tutorials/machine_learning.png", + "tutorials/notebooks/ontology_mapping": "_static/tutorials/ontology.png", + "tutorials/notebooks/fhir": "_static/tutorials/fhir.png", + "tutorials/notebooks/cohort_tracking": "_static/tutorials/cohort_tracking.png", + "tutorials/notebooks/bias": "_static/tutorials/bias.png", + "tutorials/notebooks/out_of_core": "_static/tutorials/out_of_core.png", + "tutorials/notebooks/patient_trajectory": "_static/tutorials/patient_trajectory.png", +} diff --git a/data/docs/contributing.md b/data/docs/contributing.md new file mode 100644 index 0000000000000000000000000000000000000000..0a5b318e34aafc51a49c7cfe9d98e6411beae9ba --- /dev/null +++ b/data/docs/contributing.md @@ -0,0 +1,183 @@ +# Contributing guide + +Scanpy provides extensive [developer documentation][scanpy developer guide], most of which applies to this repo, too. +This document will not reproduce the entire content from there. Instead, it aims at summarizing the most important +information to get you started on contributing. + +We assume that you are already familiar with git and with making pull requests on GitHub. If not, please refer +to the [scanpy developer guide][]. + +## Installing dev dependencies + +In addition to the packages needed to _use_ this package, you need additional python packages to _run tests_ and _build +the documentation_. It's easy to install them using `pip`: + +```bash +cd ehrapy +pip install -e ".[dev,test,docs]" +``` + +## Code-style + +This project uses [pre-commit][] to enforce consistent code-styles. On every commit, pre-commit checks will either +automatically fix issues with the code, or raise an error message. + +To enable pre-commit locally, simply run + +```bash +pre-commit install +``` + +in the root of the repository. Pre-commit will automatically download all dependencies when it is run for the first time. + +Alternatively, you can rely on the [pre-commit.ci][] service enabled on GitHub. If you didn't run `pre-commit` before +pushing changes to GitHub it will automatically commit fixes to your pull request, or show an error message. + +If pre-commit.ci added a commit on a branch you still have been working on locally, simply use + +```bash +git pull --rebase +``` + +to integrate the changes into yours. +While the [pre-commit.ci][] is useful, we strongly encourage installing and running pre-commit locally first to understand its usage. + +Finally, most editors have an _autoformat on save_ feature. Consider enabling this option for [black][black-editors] +and [prettier][prettier-editors]. + +[black-editors]: https://black.readthedocs.io/en/stable/integrations/editors.html +[prettier-editors]: https://prettier.io/docs/en/editors.html + +## Writing tests + +```{note} +Remember to first install the package with `pip install -e ".[dev,test,docs]"` +``` + +This package uses the [pytest][] for automated testing. Please [write tests][scanpy-test-docs] for every function added +to the package. + +Most IDEs integrate with pytest and provide a GUI to run tests. Alternatively, you can run all tests from the +command line by executing + +```bash +pytest +``` + +in the root of the repository. Continuous integration will automatically run the tests on all pull requests. + +[scanpy-test-docs]: https://scanpy.readthedocs.io/en/latest/dev/testing.html#writing-tests + +## Publishing a release + +### Updating the version number + +Before making a release, you need to update the version number. Please adhere to [Semantic Versioning][semver], in brief + +> Given a version number MAJOR.MINOR.PATCH, increment the: +> +> 1. MAJOR version when you make incompatible API changes, +> 2. MINOR version when you add functionality in a backwards compatible manner, and +> 3. PATCH version when you make backwards compatible bug fixes. +> +> Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format. + +Once you are done, run + +``` +git push --tags +``` + +to publish the created tag on GitHub. + +### Building and publishing the package on PyPI + +Python packages are not distributed as source code, but as _distributions_. The most common distribution format is the so-called _wheel_. To build a _wheel_, run + +```bash +python -m build +``` + +This command creates a _source archive_ and a _wheel_, which are required for publishing your package to [PyPI][]. These files are created directly in the root of the repository. + +Before uploading them to [PyPI][] you can check that your _distribution_ is valid by running: + +```bash +twine check dist/* +``` + +and finally publishing it with: + +```bash +twine upload dist/* +``` + +Provide your username and password when requested and then go check out your package on [PyPI][]! + +For more information, follow the [Python packaging tutorial][]. + +It is possible to automate this with GitHub actions, see also [this feature request][pypi-feature-request] +in the cookiecutter-scverse template. + +[python packaging tutorial]: https://packaging.python.org/en/latest/tutorials/packaging-projects/#generating-distribution-archives +[pypi-feature-request]: https://github.com/scverse/cookiecutter-scverse/issues/88 + +## Writing documentation + +Please write documentation for new or changed features and use-cases. This project uses [sphinx][] with the following features: + +- the [myst][] extension allows to write documentation in markdown/Markedly Structured Text +- Google-style docstrings +- Jupyter notebooks as tutorials through [myst-nb][] (See [Tutorials with myst-nb](#tutorials-with-myst-nb-and-jupyter-notebooks)) +- [Sphinx autodoc typehints][], to automatically reference annotated input and output types +- Citations (like {cite:p}`Virshup_2023`) can be included with [sphinxcontrib-bibtex](https://sphinxcontrib-bibtex.readthedocs.io/) + +See the [scanpy developer docs](https://scanpy.readthedocs.io/en/latest/dev/documentation.html) for more information +on how to write documentation. + +### Tutorials with myst-nb and jupyter notebooks + +The documentation is set-up to render jupyter notebooks stored in the `docs/tutorials` directory using [myst-nb][]. +Currently, only notebooks in `.ipynb` format are supported that will be included with both their input and output cells. + +These notebooks come from [pert-tutorials](https://github.com/theislab/ehrapy-tutorials) which is a git submodule of ehrapy. + +#### Hints + +- If you refer to objects from other packages, please add an entry to `intersphinx_mapping` in `docs/conf.py`. Only + if you do so can sphinx automatically create a link to the external documentation. +- If building the documentation fails because of a missing link that is outside your control, you can add an entry to + the `nitpick_ignore` list in `docs/conf.py` + +#### Building the docs locally + +```bash +cd docs +make html +open _build/html/index.html +``` + + + +[scanpy developer guide]: https://scanpy.readthedocs.io/en/latest/dev/index.html +[cookiecutter-scverse-instance]: https://cookiecutter-scverse-instance.readthedocs.io/en/latest/template_usage.html +[github quickstart guide]: https://docs.github.com/en/get-started/quickstart/create-a-repo?tool=webui +[codecov]: https://about.codecov.io/sign-up/ +[codecov docs]: https://docs.codecov.com/docs +[codecov bot]: https://docs.codecov.com/docs/team-bot +[codecov app]: https://github.com/apps/codecov +[pre-commit.ci]: https://pre-commit.ci/ +[readthedocs.org]: https://readthedocs.org/ +[myst-nb]: https://myst-nb.readthedocs.io/en/latest/ +[jupytext]: https://jupytext.readthedocs.io/en/latest/ +[pre-commit]: https://pre-commit.com/ +[anndata]: https://github.com/scverse/anndata +[mudata]: https://github.com/scverse/mudata +[pytest]: https://docs.pytest.org/ +[semver]: https://semver.org/ +[sphinx]: https://www.sphinx-doc.org/en/master/ +[myst]: https://myst-parser.readthedocs.io/en/latest/intro.html +[numpydoc-napoleon]: https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html +[numpydoc]: https://numpydoc.readthedocs.io/en/latest/format.html +[sphinx autodoc typehints]: https://github.com/tox-dev/sphinx-autodoc-typehints +[pypi]: https://pypi.org/ diff --git a/data/docs/index.md b/data/docs/index.md new file mode 100644 index 0000000000000000000000000000000000000000..03a0987d561ce970ccaf7e028efa6cfd96685b10 --- /dev/null +++ b/data/docs/index.md @@ -0,0 +1,68 @@ +ehrapy logo + +ehrapy is a modular open-source Python framework designed for exploratory end-to-end analysis of heterogeneous epidemiology and electronic health record data. + +```{eval-rst} +.. card:: Installation :octicon:`plug;1em;` + :link: installation + :link-type: doc + + New to *ehrapy*? Check out the installation guide. +``` + +```{eval-rst} +.. card:: API reference :octicon:`book;1em;` + :link: usage/usage + :link-type: doc + + The API reference contains a detailed description of the ehrapy API. +``` + +```{eval-rst} +.. card:: Tutorials :octicon:`play;1em;` + :link: tutorials/index + :link-type: doc + + The tutorials walk you through real-world applications of ehrapy. +``` + +```{eval-rst} +.. card:: Discussion :octicon:`megaphone;1em;` + :link: https://discourse.scverse.org/ + + Need help? Reach out on our forum to get your questions answered! + +``` + +```{eval-rst} +.. card:: GitHub :octicon:`mark-github;1em;` + :link: https://github.com/theislab/ehrapy + + Find a bug? Interested in improving ehrapy? Checkout our GitHub for the latest developments. + +``` + +```{toctree} +:caption: 'Contents:' +:hidden: true +:maxdepth: 3 + +installation +usage/usage +tutorials/index +contributing +references +``` + +# Citation + +[Exploratory electronic health record analysis with ehrapy Lukas Heumos, Philipp Ehmele, Tim Treis, Julius Upmeier zu Belzen, Altana Namsaraeva, Nastassya Horlava, Vladimir A. Shitov, Xinyue Zhang, Luke Zappia, Rainer Knoll, Niklas J. Lang, Leon Hetzel, Isaac Virshup, Lisa Sikkema, Eljas Roellin, Fabiola Curion, Roland Eils, Herbert B. Schiller, Anne Hilgendorff, Fabian J. Theis +medRxiv 2023.12.11.23299816; doi: https://doi.org/10.1101/2023.12.11.23299816 ](https://www.medrxiv.org/content/10.1101/2023.12.11.23299816v1). + +# Indices and tables + +- {ref}`genindex` +- {ref}`modindex` +- {ref}`search` + +[scanpy genome biology (2018)]: https://doi.org/10.1186/s13059-017-1382-0 diff --git a/data/docs/installation.md b/data/docs/installation.md new file mode 100644 index 0000000000000000000000000000000000000000..b349394e08939983ed272295841ea4c26c40512c --- /dev/null +++ b/data/docs/installation.md @@ -0,0 +1,62 @@ +```{highlight} shell + +``` + +# Installation + +## Stable release + +To install ehrapy, run this command in your terminal: + +```console +pip install ehrapy +``` + +This is the preferred method to install ehrapy, as it will always install the most recent stable release. + +If you don't have [pip] installed, this [Python installation guide] can guide you through the process. + +If you run into "RuntimeError: CMake must be installed to build qdldl" ensure that you have CMake installed to build lightgbm. +Run `conda install -c anaconda cmake` and `conda install -c conda-forge lightgbm` to do so. + +If you intend to run MedCAT you have to install a language model like: + +```console +python -m spacy download en_core_web_sm +``` + +## From sources + +The sources for ehrapy can be downloaded from the [Github repo]. + +You can either clone the public repository: + +```console +git clone git://github.com/theislab/ehrapy +``` + +Or download the [tarball]: + +```console +curl -OJL https://github.com/theislab/ehrapy/tarball/master +``` + +## MedCAT/Spacy language models + +If you want to run and use medcat with ehrapy, you first have to install the medcat extra: + +```console +pip install ehrapy[medcat] +``` + +Available language models are + +- en_core_web_md (python -m spacy download en_core_web_md) +- en-core-sci-sm (pip install ) +- en-core-sci-md (pip install ) +- en-core-sci-lg (pip install ) + +[github repo]: https://github.com/theislab/ehrapy +[pip]: https://pip.pypa.io +[python installation guide]: http://docs.python-guide.org/en/latest/starting/installation/ +[tarball]: https://github.com/theislab/ehrapy/tarball/master diff --git a/data/docs/make.bat b/data/docs/make.bat new file mode 100644 index 0000000000000000000000000000000000000000..2e87c2f6765ddac42a3a0bc37830a38624f666ce --- /dev/null +++ b/data/docs/make.bat @@ -0,0 +1,36 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=python -msphinx +) +set SOURCEDIR=. +set BUILDDIR=_build +set SPHINXPROJ=ehrapy + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The Sphinx module was not found. Make sure you have Sphinx installed, + echo.then set the SPHINXBUILD environment variable to point to the full + echo.path of the 'sphinx-build' executable. Alternatively you may add the + echo.Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% + +:end +popd diff --git a/data/docs/references.bib b/data/docs/references.bib new file mode 100644 index 0000000000000000000000000000000000000000..d742f2b8cac500123868c717a00b2d18fc54d211 --- /dev/null +++ b/data/docs/references.bib @@ -0,0 +1,931 @@ +@article{Virshup_2023, + doi = {10.1038/s41587-023-01733-8}, + url = {https://doi.org/10.1038%2Fs41587-023-01733-8}, + year = 2023, + month = {apr}, + publisher = {Springer Science and Business Media {LLC}}, + author = {Isaac Virshup and Danila Bredikhin and Lukas Heumos and Giovanni Palla and Gregor Sturm and Adam Gayoso and Ilia Kats and Mikaela Koutrouli and Philipp Angerer and Volker Bergen and Pierre Boyeau and Maren Büttner and Gokcen Eraslan and David Fischer and Max Frank and Justin Hong and Michal Klein and Marius Lange and Romain Lopez and Mohammad Lotfollahi and Malte D. Luecken and Fidel Ramirez and Jeffrey Regier and Sergei Rybakov and Anna C. Schaar and Valeh Valiollah Pour Amiri and Philipp Weiler and Galen Xing and Bonnie Berger and Dana Pe'er and Aviv Regev and Sarah A. Teichmann and Francesca Finotello and F. Alexander Wolf and Nir Yosef and Oliver Stegle and Fabian J. Theis and}, + title = {The scverse project provides a computational ecosystem for single-cell omics data analysis}, + journal = {Nature Biotechnology} +} + +@misc{Amid2019, + author = {Amid, Ehsan and Warmuth, Manfred K.}, + doi = {10.48550/ARXIV.1910.00204}, + url = {https://arxiv.org/abs/1910.00204}, + keywords = {Machine Learning (cs.LG), Machine Learning (stat.ML), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {TriMap: Large-scale Dimensionality Reduction Using Triplets}, + publisher = {arXiv}, + year = {2019}, + copyright = {arXiv.org perpetual, non-exclusive license}, +} + +@article{Amir2013, + author = {Amir, El-ad David and Davis, Kara L and Tadmor, Michelle D and Simonds, Erin F and Levine, Jacob H and Bendall, Sean C and Shenfeld, Daniel K and Krishnaswamy, Smita and Nolan, Garry P and Pe’er, Dana}, + title = {viSNE enables visualization of high dimensional single-cell data and reveals phenotypic heterogeneity of leukemia}, + volume = {31}, + issn = {1546-1696}, + url = {https://doi.org/10.1038/nbt.2594}, + doi = {10.1038/nbt.2594}, + number = {6}, + journal = {Nature Biotechnology}, + publisher = {Springer Science and Business Media LLC}, + year = {2013}, + month = {may}, + pages = {545--552}, +} + +@article{Angerer2015, + author = {Angerer, Philipp and Haghverdi, Laleh and Büttner, Maren and Theis, Fabian J. and Marr, Carsten and Buettner, Florian}, + title = {destiny: diffusion maps for large-scale single-cell data in R}, + volume = {32}, + issn = {1367-4803}, + url = {https://doi.org/10.1093/bioinformatics/btv715}, + doi = {10.1093/bioinformatics/btv715}, + number = {8}, + journal = {Bioinformatics}, + publisher = {Oxford University Press (OUP)}, + year = {2015}, + month = {dec}, + pages = {1241--1243}, +} + +@article{Baron2016, + author = {Baron, Maayan and Veres, Adrian and Wolock, Samuel L. and Faust, Aubrey L. and Gaujoux, Renaud and Vetere, Amedeo and Ryu, Jennifer Hyoje and Wagner, Bridget K. and Shen-Orr, Shai S. and Klein, Allon M. and Melton, Douglas A. and Yanai, Itai}, + title = {A Single-Cell Transcriptomic Map of the Human and Mouse Pancreas Reveals Inter- and Intra-cell Population Structure}, + volume = {3}, + issn = {2405-4712}, + url = {https://doi.org/10.1016/j.cels.2016.08.011}, + doi = {10.1016/j.cels.2016.08.011}, + number = {4}, + journal = {Cell Systems}, + publisher = {Elsevier BV}, + year = {2016}, + month = {oct}, + pages = {346--360.e4}, +} + +@article{Becht2018, + author = {Becht, Etienne and McInnes, Leland and Healy, John and Dutertre, Charles-Antoine and Kwok, Immanuel W H and Ng, Lai Guan and Ginhoux, Florent and Newell, Evan W}, + title = {Dimensionality reduction for visualizing single-cell data using UMAP}, + volume = {37}, + issn = {1546-1696}, + url = {https://doi.org/10.1038/nbt.4314}, + doi = {10.1038/nbt.4314}, + number = {1}, + journal = {Nature Biotechnology}, + publisher = {Springer Science and Business Media LLC}, + year = {2018}, + month = {dec}, + pages = {38--44}, +} + +@article{Bernstein2020, + author = {Bernstein, Nicholas J. and Fong, Nicole L. and Lam, Irene and Roy, Margaret A. and Hendrickson, David G. and Kelley, David R.}, + title = {Solo: Doublet Identification in Single-Cell RNA-Seq via Semi-Supervised Deep Learning}, + volume = {11}, + issn = {2405-4712}, + url = {https://doi.org/10.1016/j.cels.2020.05.010}, + doi = {10.1016/j.cels.2020.05.010}, + number = {1}, + journal = {Cell Systems}, + publisher = {Elsevier BV}, + year = {2020}, + month = {jul}, + pages = {95--101.e5}, +} + +@article{Blondel2008, + author = {Blondel, Vincent D and Guillaume, Jean-Loup and Lambiotte, Renaud and Lefebvre, Etienne}, + title = {Fast unfolding of communities in large networks}, + volume = {2008}, + issn = {1742-5468}, + url = {https://doi.org/10.1088/1742-5468/2008/10/P10008}, + doi = {10.1088/1742-5468/2008/10/p10008}, + number = {10}, + journal = {Journal of Statistical Mechanics: Theory and Experiment}, + publisher = {IOP Publishing}, + year = {2008}, + month = {oct}, + pages = {P10008}, +} + +@article{Burczynski2006, + author = {Burczynski, Michael E. and Peterson, Ron L. and Twine, Natalie C. and Zuberek, Krystyna A. and Brodeur, Brendan J. and Casciotti, Lori and Maganti, Vasu and Reddy, Padma S. and Strahs, Andrew and Immermann, Fred and Spinelli, Walter and Schwertschlag, Ulrich and Slager, Anna M. and Cotreau, Monette M. and Dorner, Andrew J.}, + title = {Molecular Classification of Crohn’s Disease and Ulcerative Colitis Patients Using Transcriptional Profiles in Peripheral Blood Mononuclear Cells}, + volume = {8}, + issn = {1525-1578}, + url = {https://doi.org/10.2353/jmoldx.2006.050079}, + doi = {10.2353/jmoldx.2006.050079}, + number = {1}, + journal = {The Journal of Molecular Diagnostics}, + publisher = {Elsevier BV}, + year = {2006}, + month = {feb}, + pages = {51--61}, +} + +@article{Butler2018, + author = {Butler, Andrew and Hoffman, Paul and Smibert, Peter and Papalexi, Efthymia and Satija, Rahul}, + title = {Integrating single-cell transcriptomic data across different conditions, technologies, and species}, + volume = {36}, + issn = {1546-1696}, + url = {https://doi.org/10.1038/nbt.4096}, + doi = {10.1038/nbt.4096}, + number = {5}, + journal = {Nature Biotechnology}, + publisher = {Springer Science and Business Media LLC}, + year = {2018}, + month = {apr}, + pages = {411--420}, +} + +@misc{Chippada2018, + author = {Chippada, Bhargav}, + title = {ForceAtlas2 for Python}, + year = {2018}, + publisher = {GitHub}, + journal = {GitHub repository}, + url = {https://github.com/bhargavchippada/forceatlas2}, + howpublished = {\url{https://github.com/bhargavchippada/forceatlas2}}, +} + +@article{Coifman2005, + author = {Coifman, R. R. and Lafon, S. and Lee, A. B. and Maggioni, M. and Nadler, B. and Warner, F. and Zucker, S. W.}, + title = {Geometric diffusions as a tool for harmonic analysis and structure definition of data: Diffusion maps}, + volume = {102}, + issn = {1091-6490}, + url = {https://doi.org/10.1073/pnas.0500334102}, + doi = {10.1073/pnas.0500334102}, + number = {21}, + journal = {Proceedings of the National Academy of Sciences}, + publisher = {Proceedings of the National Academy of Sciences}, + year = {2005}, + month = {may}, + pages = {7426--7431}, +} + +@article{Csardi2006, + author = {Csárdi, G. and Nepusz, T.}, + added-at = {2011-03-14T01:02:14.000+0100}, + url = {https://www.bibsonomy.org/bibtex/252e3e774bac8424cc9a3845a9c597344/lantiq}, + groups = {public}, + journal = {InterJournal Complex Systems}, + keywords = {networks graphs}, + pages = {1695}, + title = {The igraph software package for complex network research}, + year = {2006}, +} + +@article{Eraslan2019, + author = {Eraslan, G\"{o}kcen and Simon, Lukas M. and Mircea, Maria and Mueller, Nikola S. and Theis, Fabian J.}, + title = {Single-cell RNA-seq denoising using a deep count autoencoder}, + volume = {10}, + issn = {2041-1723}, + url = {https://doi.org/10.1038/s41467-018-07931-2}, + doi = {10.1038/s41467-018-07931-2}, + number = {1}, + journal = {Nature Communications}, + publisher = {Springer Science and Business Media LLC}, + year = {2019}, + month = {jan}, +} + +@misc{Fechtner2018, + author = {Fechtner, Ron}, + title = {PyPairs - A python scRNA-Seq classifier}, + year = {2018}, + publisher = {GitHub}, + journal = {GitHub repository}, + url = {https://github.com/rfechtner/pypairs}, + howpublished = {\url{https://github.com/rfechtner/pypairs}}, +} + +@article{Fruchterman1991, + author = {Fruchterman, Thomas M. J. and Reingold, Edward M.}, + title = {Graph drawing by force‐directed placement}, + volume = {21}, + issn = {1097-024X}, + url = {https://doi.org/10.1002/spe.4380211102}, + doi = {10.1002/spe.4380211102}, + number = {11}, + journal = {Software: Practice and Experience}, + publisher = {Wiley}, + year = {1991}, + month = {nov}, + pages = {1129--1164}, +} + +@article{Gardner2000, + author = {Gardner, Timothy S. and Cantor, Charles R. and Collins, James J.}, + title = {Construction of a genetic toggle switch in Escherichia coli}, + volume = {403}, + issn = {1476-4687}, + url = {https://doi.org/10.1038/35002131}, + doi = {10.1038/35002131}, + number = {6767}, + journal = {Nature}, + publisher = {Springer Science and Business Media LLC}, + year = {2000}, + month = {jan}, + pages = {339--342}, +} + +@article{Haghverdi2015, + author = {Haghverdi, Laleh and Buettner, Florian and Theis, Fabian J.}, + title = {Diffusion maps for high-dimensional single-cell analysis of differentiation data}, + volume = {31}, + issn = {1367-4803}, + url = {https://doi.org/10.1093/bioinformatics/btv325}, + doi = {10.1093/bioinformatics/btv325}, + number = {18}, + journal = {Bioinformatics}, + publisher = {Oxford University Press (OUP)}, + year = {2015}, + month = {may}, + pages = {2989--2998}, +} + +@article{Haghverdi2016, + author = {Haghverdi, Laleh and Büttner, Maren and Wolf, F Alexander and Buettner, Florian and Theis, Fabian J}, + title = {Diffusion pseudotime robustly reconstructs lineage branching}, + volume = {13}, + issn = {1548-7105}, + url = {https://doi.org/10.1038/nmeth.3971}, + doi = {10.1038/nmeth.3971}, + number = {10}, + journal = {Nature Methods}, + publisher = {Springer Science and Business Media LLC}, + year = {2016}, + month = {aug}, + pages = {845--848}, +} + +@article{Haghverdi2018, + author = {Haghverdi, Laleh and Lun, Aaron T L and Morgan, Michael D and Marioni, John C}, + title = {Batch effects in single-cell RNA-sequencing data are corrected by matching mutual nearest neighbors}, + volume = {36}, + issn = {1546-1696}, + url = {https://doi.org/10.1038/nbt.4091}, + doi = {10.1038/nbt.4091}, + number = {5}, + journal = {Nature Biotechnology}, + publisher = {Springer Science and Business Media LLC}, + year = {2018}, + month = {apr}, + pages = {421--427}, +} + +@article{Hie2019, + author = {Hie, Brian and Bryson, Bryan and Berger, Bonnie}, + title = {Efficient integration of heterogeneous single-cell transcriptomes using Scanorama}, + volume = {37}, + issn = {1546-1696}, + url = {https://doi.org/10.1038/s41587-019-0113-3}, + doi = {10.1038/s41587-019-0113-3}, + number = {6}, + journal = {Nature Biotechnology}, + publisher = {Springer Science and Business Media LLC}, + year = {2019}, + month = {may}, + pages = {685--691}, +} + +@article{Islam2011, + author = {Islam, Saiful and Kjällquist, Una and Moliner, Annalena and Zajac, Pawel and Fan, Jian-Bing and Lönnerberg, Peter and Linnarsson, Sten}, + title = {Characterization of the single-cell transcriptional landscape by highly multiplex RNA-seq}, + volume = {21}, + issn = {1088-9051}, + url = {https://doi.org/10.1101/gr.110882.110}, + doi = {10.1101/gr.110882.110}, + number = {7}, + journal = {Genome Research}, + publisher = {Cold Spring Harbor Laboratory}, + year = {2011}, + month = {may}, + pages = {1160--1167}, +} + +@article{Jacomy2014, + author = {Jacomy, Mathieu and Venturini, Tommaso and Heymann, Sebastien and Bastian, Mathieu}, + editor = {Muldoon, Mark R.}, + title = {ForceAtlas2, a Continuous Graph Layout Algorithm for Handy Network Visualization Designed for the Gephi Software}, + volume = {9}, + issn = {1932-6203}, + url = {https://doi.org/10.1371/journal.pone.0098679}, + doi = {10.1371/journal.pone.0098679}, + number = {6}, + journal = {PLoS ONE}, + publisher = {Public Library of Science (PLoS)}, + year = {2014}, + month = {jun}, + pages = {e98679}, +} + +@article{Johnson2006, + author = {Johnson, W. Evan and Li, Cheng and Rabinovic, Ariel}, + title = {Adjusting batch effects in microarray expression data using empirical Bayes methods}, + volume = {8}, + issn = {1465-4644}, + url = {https://doi.org/10.1093/biostatistics/kxj037}, + doi = {10.1093/biostatistics/kxj037}, + number = {1}, + journal = {Biostatistics}, + publisher = {Oxford University Press (OUP)}, + year = {2006}, + month = {apr}, + pages = {118--127}, +} + +@misc{Kang2018, + author = {Kang, Chris}, + title = {mnnpy - MNN-correct in python}, + year = {2018}, + publisher = {GitHub}, + journal = {GitHub repository}, + url = {https://github.com/chriscainx/mnnpy}, + howpublished = {\url{https://github.com/chriscainx/mnnpy}}, +} + +@article{Korsunsky2019, + author = {Korsunsky, Ilya and Millard, Nghia and Fan, Jean and Slowikowski, Kamil and Zhang, Fan and Wei, Kevin and Baglaenko, Yuriy and Brenner, Michael and Loh, Po-ru and Raychaudhuri, Soumya}, + title = {Fast, sensitive and accurate integration of single-cell data with Harmony}, + volume = {16}, + issn = {1548-7105}, + url = {https://doi.org/10.1038/s41592-019-0619-0}, + doi = {10.1038/s41592-019-0619-0}, + number = {12}, + journal = {Nature Methods}, + publisher = {Springer Science and Business Media LLC}, + year = {2019}, + month = {nov}, + pages = {1289--1296}, +} + +@article{Krumsiek2011, + author = {Krumsiek, Jan and Marr, Carsten and Schroeder, Timm and Theis, Fabian J.}, + editor = {Pesce, Maurizio}, + title = {Hierarchical Differentiation of Myeloid Progenitors Is Encoded in the Transcription Factor Network}, + volume = {6}, + issn = {1932-6203}, + url = {https://doi.org/10.1371/journal.pone.0022649}, + doi = {10.1371/journal.pone.0022649}, + number = {8}, + journal = {PLoS ONE}, + publisher = {Public Library of Science (PLoS)}, + year = {2011}, + month = {aug}, + pages = {e22649}, +} + +@article{LaManno2018, + author = {La Manno, Gioele and Soldatov, Ruslan and Zeisel, Amit and Braun, Emelie and Hochgerner, Hannah and Petukhov, Viktor and Lidschreiber, Katja and Kastriti, Maria E. and Lönnerberg, Peter and Furlan, Alessandro and Fan, Jean and Borm, Lars E. and Liu, Zehua and van Bruggen, David and Guo, Jimin and He, Xiaoling and Barker, Roger and Sundström, Erik and Castelo-Branco, Gonçalo and Cramer, Patrick and Adameyko, Igor and Linnarsson, Sten and Kharchenko, Peter V.}, + title = {RNA velocity of single cells}, + volume = {560}, + issn = {1476-4687}, + url = {https://doi.org/10.1038/s41586-018-0414-6}, + doi = {10.1038/s41586-018-0414-6}, + number = {7719}, + journal = {Nature}, + publisher = {Springer Science and Business Media LLC}, + year = {2018}, + month = {aug}, + pages = {494--498}, +} + +@article{Lambiotte2014, + author = {Lambiotte, Renaud and Delvenne, Jean-Charles and Barahona, Mauricio}, + title = {Random Walks, Markov Processes and the Multiscale Modular Organization of Complex Networks}, + volume = {1}, + issn = {2327-4697}, + url = {https://doi.org/10.1109/TNSE.2015.2391998}, + doi = {10.1109/tnse.2015.2391998}, + number = {2}, + journal = {IEEE Transactions on Network Science and Engineering}, + publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, + year = {2014}, + month = {jul}, + pages = {76--90}, +} + +@article{Lause2021, + author = {Lause, Jan and Berens, Philipp and Kobak, Dmitry}, + title = {Analytic Pearson residuals for normalization of single-cell RNA-seq UMI data}, + volume = {22}, + issn = {1474-760X}, + url = {https://doi.org/10.1186/s13059-021-02451-7}, + doi = {10.1186/s13059-021-02451-7}, + number = {1}, + journal = {Genome Biology}, + publisher = {Springer Science and Business Media LLC}, + year = {2021}, + month = {sep}, +} + +@misc{Leek2012, + author = {Leek, Jeffrey T. and Johnson, W. Evan and Parker, Hilary S. and J.Fertig, Elana and Jaffe, Andrew E. and Storey, John D. and Zhang, Yuqing and Torres, Leonardo Collado}, + doi = {10.18129/B9.BIOC.SVA}, + url = {https://bioconductor.org/packages/sva}, + title = {sva}, + publisher = {Bioconductor}, + year = {2017}, +} + +@article{Levine2015, + author = {Levine, Jacob H. and Simonds, Erin F. and Bendall, Sean C. and Davis, Kara L. and Amir, El-ad D. and Tadmor, Michelle D. and Litvin, Oren and Fienberg, Harris G. and Jager, Astraea and Zunder, Eli R. and Finck, Rachel and Gedman, Amanda L. and Radtke, Ina and Downing, James R. and Pe’er, Dana and Nolan, Garry P.}, + title = {Data-Driven Phenotypic Dissection of AML Reveals Progenitor-like Cells that Correlate with Prognosis}, + volume = {162}, + issn = {0092-8674}, + url = {https://doi.org/10.1016/j.cell.2015.05.047}, + doi = {10.1016/j.cell.2015.05.047}, + number = {1}, + journal = {Cell}, + publisher = {Elsevier BV}, + year = {2015}, + month = {jul}, + pages = {184--197}, +} + +@article{Lotfollahi2019, + author = {Lotfollahi, Mohammad and Wolf, F. Alexander and Theis, Fabian J.}, + title = {scGen predicts single-cell perturbation responses}, + volume = {16}, + issn = {1548-7105}, + url = {https://doi.org/10.1038/s41592-019-0494-8}, + doi = {10.1038/s41592-019-0494-8}, + number = {8}, + journal = {Nature Methods}, + publisher = {Springer Science and Business Media LLC}, + year = {2019}, + month = {jul}, + pages = {715--721}, +} + +@inproceedings{Luecken2021, + author = {Luecken, Malte and Burkhardt, Daniel and Cannoodt, Robrecht and Lance, Christopher and Agrawal, Aditi and Aliee, Hananeh and Chen, Ann and Deconinck, Louise and Detweiler, Angela and Granados, Alejandro and Huynh, Shelly and Isacco, Laura and Kim, Yang and Klein, Dominik and De Kumar, Bony and Kuppasani, Sunil and Lickert, Heiko and McGeever, Aaron and Melgarejo, Joaquin and Mekonen, Honey and Morri, Maurizio and Müller, Michaela and Neff, Norma and Paul, Sheryl and Rieck, Bastian and Schneider, Kaylie and Steelman, Scott and Sterr, Michael and Treacy, Daniel and Tong, Alexander and Villani, Alexandra-Chloe and Wang, Guilin and Yan, Jia and Zhang, Ce and Pisco, Angela and Krishnaswamy, Smita and Theis, Fabian and Bloom, Jonathan M}, + editor = {Vanschoren, J. and Yeung, S.}, + booktitle = {Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks}, + pages = {}, + publisher = {Curran}, + title = {A sandbox for prediction and integration of DNA, RNA, and proteins in single cells}, + url = {https://datasets-benchmarks-proceedings.neurips.cc/paper_files/paper/2021/file/158f3069a435b314a80bdcb024f8e422-Paper-round2.pdf}, + volume = {1}, + year = {2021}, +} + +@article{McCarthy2017, + author = {McCarthy, Davis J and Campbell, Kieran R and Lun, Aaron T L and Wills, Quin F}, + editor = {Hofacker, Ivo}, + doi = {10.1093/bioinformatics/btw777}, + url = {https://doi.org/10.1093/bioinformatics/btw777}, + year = {2017}, + month = {jan}, + publisher = {Oxford University Press ({OUP})}, + volume = {33}, + number = {8}, + pages = {1179--1186}, + title = {Scater: pre-processing, quality control, normalization and visualization of single-cell {RNA}-seq data in R}, + journal = {Bioinformatics}, +} + +@misc{McInnes2018, + author = {McInnes, Leland and Healy, John and Melville, James}, + doi = {10.48550/ARXIV.1802.03426}, + url = {https://arxiv.org/abs/1802.03426}, + keywords = {Machine Learning (stat.ML), Computational Geometry (cs.CG), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {UMAP: Uniform Manifold Approximation and Projection for Dimension Reduction}, + publisher = {arXiv}, + year = {2018}, + copyright = {arXiv.org perpetual, non-exclusive license}, +} + +@article{Moignard2015, + author = {Moignard, Victoria and Woodhouse, Steven and Haghverdi, Laleh and Lilly, Andrew J and Tanaka, Yosuke and Wilkinson, Adam C and Buettner, Florian and Macaulay, Iain C and Jawaid, Wajid and Diamanti, Evangelia and Nishikawa, Shin-Ichi and Piterman, Nir and Kouskoff, Valerie and Theis, Fabian J and Fisher, Jasmin and Göttgens, Berthold}, + title = {Decoding the regulatory network of early blood development from single-cell gene expression measurements}, + volume = {33}, + issn = {1546-1696}, + url = {https://doi.org/10.1038/nbt.3154}, + doi = {10.1038/nbt.3154}, + number = {3}, + journal = {Nature Biotechnology}, + publisher = {Springer Science and Business Media LLC}, + year = {2015}, + month = {feb}, + pages = {269--276}, +} + +@article{Moon2019, + author = {Moon, Kevin R. and van Dijk, David and Wang, Zheng and Gigante, Scott and Burkhardt, Daniel B. and Chen, William S. and Yim, Kristina and Elzen, Antonia van den and Hirn, Matthew J. and Coifman, Ronald R. and Ivanova, Natalia B. and Wolf, Guy and Krishnaswamy, Smita}, + title = {Visualizing structure and transitions in high-dimensional biological data}, + volume = {37}, + issn = {1546-1696}, + url = {https://doi.org/10.1038/s41587-019-0336-3}, + doi = {10.1038/s41587-019-0336-3}, + number = {12}, + journal = {Nature Biotechnology}, + publisher = {Springer Science and Business Media LLC}, + year = {2019}, + month = {dec}, + pages = {1482--1492}, +} + +@article{Muraro2016, + author = {Muraro, Mauro J. and Dharmadhikari, Gitanjali and Gr\"{u}n, Dominic and Groen, Nathalie and Dielen, Tim and Jansen, Erik and van Gurp, Leon and Engelse, Marten A. and Carlotti, Francoise and de Koning, Eelco J.P. and van Oudenaarden, Alexander}, + title = {A Single-Cell Transcriptome Atlas of the Human Pancreas}, + volume = {3}, + issn = {2405-4712}, + url = {https://doi.org/10.1016/j.cels.2016.09.002}, + doi = {10.1016/j.cels.2016.09.002}, + number = {4}, + journal = {Cell Systems}, + publisher = {Elsevier BV}, + year = {2016}, + month = {oct}, + pages = {385--394.e3}, +} + +@article{Nowotschin2019, + author = {Nowotschin, Sonja and Setty, Manu and Kuo, Ying-Yi and Liu, Vincent and Garg, Vidur and Sharma, Roshan and Simon, Claire S. and Saiz, Nestor and Gardner, Rui and Boutet, Stéphane C. and Church, Deanna M. and Hoodless, Pamela A. and Hadjantonakis, Anna-Katerina and Pe’er, Dana}, + title = {The emergent landscape of the mouse gut endoderm at single-cell resolution}, + volume = {569}, + issn = {1476-4687}, + url = {https://doi.org/10.1038/s41586-019-1127-1}, + doi = {10.1038/s41586-019-1127-1}, + number = {7756}, + journal = {Nature}, + publisher = {Springer Science and Business Media LLC}, + year = {2019}, + month = {apr}, + pages = {361--367}, +} + +@article{Ntranos2019, + author = {Ntranos, Vasilis and Yi, Lynn and Melsted, Páll and Pachter, Lior}, + title = {A discriminative learning approach to differential expression analysis for single-cell RNA-seq}, + volume = {16}, + issn = {1548-7105}, + url = {https://doi.org/10.1038/s41592-018-0303-9}, + doi = {10.1038/s41592-018-0303-9}, + number = {2}, + journal = {Nature Methods}, + publisher = {Springer Science and Business Media LLC}, + year = {2019}, + month = {jan}, + pages = {163--166}, +} + +@article{Paul2015, + author = {Paul, Franziska and Arkin, Ya’ara and Giladi, Amir and Jaitin, Diego Adhemar and Kenigsberg, Ephraim and Keren-Shaul, Hadas and Winter, Deborah and Lara-Astiaso, David and Gury, Meital and Weiner, Assaf and David, Eyal and Cohen, Nadav and Lauridsen, Felicia Kathrine Bratt and Haas, Simon and Schlitzer, Andreas and Mildner, Alexander and Ginhoux, Florent and Jung, Steffen and Trumpp, Andreas and Porse, Bo Torben and Tanay, Amos and Amit, Ido}, + title = {Transcriptional Heterogeneity and Lineage Commitment in Myeloid Progenitors}, + volume = {163}, + issn = {0092-8674}, + url = {https://doi.org/10.1016/j.cell.2015.11.013}, + doi = {10.1016/j.cell.2015.11.013}, + number = {7}, + journal = {Cell}, + publisher = {Elsevier BV}, + year = {2015}, + month = {dec}, + pages = {1663--1677}, +} + +@misc{Pedersen2012, + author = {Pedersen, Brent}, + title = {combat.py}, + year = {2012}, + publisher = {GitHub}, + journal = {GitHub repository}, + url = {https://github.com/brentp/combat.py}, + howpublished = {\url{https://github.com/brentp/combat.py}}, +} + +@article{Pedregosa2011, + author = {Pedregosa, Fabian and Varoquaux, Gaël and Gramfort, Alexandre and Michel, Vincent and Thirion, Bertrand and Grisel, Olivier and Blondel, Mathieu and Prettenhofer, Peter and Weiss, Ron and Dubourg, Vincent and Vanderplas, Jake and Passos, Alexandre and Cournapeau, David and Brucher, Matthieu and Perrot, Matthieu and Duchesnay, Édouard}, + title = {Scikit-learn: Machine Learning in Python}, + year = {2011}, + publisher = {JMLR.org}, + volume = {12}, + number = {null}, + issn = {1532-4435}, + journal = {Journal of Machine Learning Research}, + month = {nov}, + pages = {2825--2830}, + numpages = {6}, +} + +@article{Plass2018, + author = {Plass, Mireya and Solana, Jordi and Wolf, F. Alexander and Ayoub, Salah and Misios, Aristotelis and Glažar, Petar and Obermayer, Benedikt and Theis, Fabian J. and Kocks, Christine and Rajewsky, Nikolaus}, + title = {Cell type atlas and lineage tree of a whole complex animal by single-cell transcriptomics}, + volume = {360}, + issn = {1095-9203}, + url = {https://doi.org/10.1126/science.aaq1723}, + doi = {10.1126/science.aaq1723}, + number = {6391}, + journal = {Science}, + publisher = {American Association for the Advancement of Science (AAAS)}, + year = {2018}, + month = {may}, +} + +@article{Polanski2019, + author = {Polański, Krzysztof and Young, Matthew D and Miao, Zhichao and Meyer, Kerstin B and Teichmann, Sarah A and Park, Jong-Eun}, + editor = {Berger, Bonnie}, + title = {BBKNN: fast batch alignment of single cell transcriptomes}, + volume = {36}, + issn = {1367-4811}, + url = {https://doi.org/10.1093/bioinformatics/btz625}, + doi = {10.1093/bioinformatics/btz625}, + number = {3}, + journal = {Bioinformatics}, + publisher = {Oxford University Press (OUP)}, + year = {2019}, + month = {aug}, + pages = {964--965}, +} + +@article{Satija2015, + author = {Satija, Rahul and Farrell, Jeffrey A and Gennert, David and Schier, Alexander F and Regev, Aviv}, + doi = {10.1038/nbt.3192}, + url = {https://doi.org/10.1038/nbt.3192}, + year = {2015}, + month = {apr}, + publisher = {Springer Science and Business Media {LLC}}, + volume = {33}, + number = {5}, + pages = {495--502}, + title = {Spatial reconstruction of single-cell gene expression data}, + journal = {Nature Biotechnology}, +} + +@article{Scialdone2015, + author = {Scialdone, Antonio and Natarajan, Kedar N. and Saraiva, Luis R. and Proserpio, Valentina and Teichmann, Sarah A. and Stegle, Oliver and Marioni, John C. and Buettner, Florian}, + title = {Computational assignment of cell-cycle stage from single-cell transcriptome data}, + volume = {85}, + issn = {1046-2023}, + url = {https://doi.org/10.1016/j.ymeth.2015.06.021}, + doi = {10.1016/j.ymeth.2015.06.021}, + journal = {Methods}, + publisher = {Elsevier BV}, + year = {2015}, + month = {sep}, + pages = {54--61}, +} + +@article{Segerstolpe2016, + author = {Segerstolpe, Åsa and Palasantza, Athanasia and Eliasson, Pernilla and Andersson, Eva-Marie and Andréasson, Anne-Christine and Sun, Xiaoyan and Picelli, Simone and Sabirsh, Alan and Clausen, Maryam and Bjursell, Magnus K. and Smith, David M. and Kasper, Maria and Ämmälä, Carina and Sandberg, Rickard}, + title = {Single-Cell Transcriptome Profiling of Human Pancreatic Islets in Health and Type 2 Diabetes}, + volume = {24}, + issn = {1550-4131}, + url = {https://doi.org/10.1016/j.cmet.2016.08.020}, + doi = {10.1016/j.cmet.2016.08.020}, + number = {4}, + journal = {Cell Metabolism}, + publisher = {Elsevier BV}, + year = {2016}, + month = {oct}, + pages = {593--607}, +} + +@article{Setty2016, + author = {Setty, Manu and Tadmor, Michelle D and Reich-Zeliger, Shlomit and Angel, Omer and Salame, Tomer Meir and Kathail, Pooja and Choi, Kristy and Bendall, Sean and Friedman, Nir and Pe’er, Dana}, + title = {Wishbone identifies bifurcating developmental trajectories from single-cell data}, + volume = {34}, + issn = {1546-1696}, + url = {https://doi.org/10.1038/nbt.3569}, + doi = {10.1038/nbt.3569}, + number = {6}, + journal = {Nature Biotechnology}, + publisher = {Springer Science and Business Media LLC}, + year = {2016}, + month = {may}, + pages = {637--645}, +} + +@article{Setty2019, + author = {Setty, Manu and Kiseliovas, Vaidotas and Levine, Jacob and Gayoso, Adam and Mazutis, Linas and Pe’er, Dana}, + title = {Characterization of cell fate probabilities in single-cell data with Palantir}, + volume = {37}, + issn = {1546-1696}, + url = {https://doi.org/10.1038/s41587-019-0068-4}, + doi = {10.1038/s41587-019-0068-4}, + number = {4}, + journal = {Nature Biotechnology}, + publisher = {Springer Science and Business Media LLC}, + year = {2019}, + month = {mar}, + pages = {451--460}, +} + +@article{Stuart2019, + author = {Stuart, Tim and Butler, Andrew and Hoffman, Paul and Hafemeister, Christoph and Papalexi, Efthymia and Mauck, William M. and Hao, Yuhan and Stoeckius, Marlon and Smibert, Peter and Satija, Rahul}, + title = {Comprehensive Integration of Single-Cell Data}, + volume = {177}, + issn = {0092-8674}, + url = {https://doi.org/10.1016/j.cell.2019.05.031}, + doi = {10.1016/j.cell.2019.05.031}, + number = {7}, + journal = {Cell}, + publisher = {Elsevier BV}, + year = {2019}, + month = {jun}, + pages = {1888--1902.e21}, +} + +@article{Tarashansky2019, + author = {Tarashansky, Alexander J and Xue, Yuan and Li, Pengyang and Quake, Stephen R and Wang, Bo}, + title = {Self-assembling manifolds in single-cell RNA sequencing data}, + volume = {8}, + issn = {2050-084X}, + url = {https://doi.org/10.7554/eLife.48994}, + doi = {10.7554/elife.48994}, + journal = {eLife}, + publisher = {eLife Sciences Publications, Ltd}, + year = {2019}, + month = {sep}, +} + +@misc{Traag2017, + author = {Traag, Vincent}, + doi = {10.5281/ZENODO.35117}, + url = {https://zenodo.org/record/35117}, + title = {louvain-igraph: v0.5.3}, + publisher = {Zenodo}, + year = {2015}, + copyright = {Open Access}, +} + +@article{Traag2019, + author = {Traag, V. A. and Waltman, L. and van Eck, N. J.}, + title = {From Louvain to Leiden: guaranteeing well-connected communities}, + volume = {9}, + issn = {2045-2322}, + url = {https://doi.org/10.1038/s41598-019-41695-z}, + doi = {10.1038/s41598-019-41695-z}, + number = {1}, + journal = {Scientific Reports}, + publisher = {Springer Science and Business Media LLC}, + year = {2019}, + month = {mar}, +} + +@misc{Ulyanov2016, + author = {Ulyanov, Dmitry}, + title = {Multicore-TSNE}, + year = {2016}, + publisher = {GitHub}, + journal = {GitHub repository}, + url = {https://github.com/DmitryUlyanov/Multicore-TSNE}, + howpublished = {\url{https://github.com/DmitryUlyanov/Multicore-TSNE}}, +} + +@article{Wang2016, + author = {Wang, Yue J. and Golson, Maria L. and Schug, Jonathan and Traum, Daniel and Liu, Chengyang and Vivek, Kumar and Dorrell, Craig and Naji, Ali and Powers, Alvin C. and Chang, Kyong-Mi and Grompe, Markus and Kaestner, Klaus H.}, + title = {Single-Cell Mass Cytometry Analysis of the Human Endocrine Pancreas}, + volume = {24}, + issn = {1550-4131}, + url = {https://doi.org/10.1016/j.cmet.2016.09.007}, + doi = {10.1016/j.cmet.2016.09.007}, + number = {4}, + journal = {Cell Metabolism}, + publisher = {Elsevier BV}, + year = {2016}, + month = {oct}, + pages = {616--626}, +} + +@misc{Waskom2016, + author = {Waskom, Michael and Botvinnik, Olga and {Drewokane} and Hobson, Paul and {, David} and Halchenko, Yaroslav and Lukauskas, Saulius and Cole, John B. and Warmenhoven, Jordi and De Ruiter, Julian and Hoyer, Stephan and Vanderplas, Jake and Villalba, Santi and Kunter, Gero and Quintero, Eric and Martin, Marcel and Miles, Alistair and Meyer, Kyle and Augspurger, Tom and Yarkoni, Tal and Bachant, Pete and Williams, Mike and Evans, Constantine and Fitzgerald, Clark and {, Brian} and Wehner, Daniel and Hitz, Gregory and Ziegler, Erik and Qalieh, Adel and Lee, Antony}, + doi = {10.5281/ZENODO.54844}, + url = {https://zenodo.org/record/54844}, + title = {seaborn: v0.7.1 (June 2016)}, + publisher = {Zenodo}, + year = {2016}, + copyright = {Open Access}, +} + +@article{Weinreb2017, + author = {Weinreb, Caleb and Wolock, Samuel and Klein, Allon M}, + editor = {Berger, Bonnie}, + title = {SPRING: a kinetic interface for visualizing high dimensional single-cell expression data}, + volume = {34}, + issn = {1367-4811}, + url = {https://doi.org/10.1093/bioinformatics/btx792}, + doi = {10.1093/bioinformatics/btx792}, + number = {7}, + journal = {Bioinformatics}, + publisher = {Oxford University Press (OUP)}, + year = {2017}, + month = {dec}, + pages = {1246--1248}, +} + +@article{Wittmann2009, + author = {Wittmann, Dominik M and Krumsiek, Jan and Saez-Rodriguez, Julio and Lauffenburger, Douglas A and Klamt, Steffen and Theis, Fabian J}, + title = {Transforming Boolean models to continuous models: methodology and application to T-cell receptor signaling}, + volume = {3}, + issn = {1752-0509}, + url = {https://doi.org/10.1186/1752-0509-3-98}, + doi = {10.1186/1752-0509-3-98}, + number = {1}, + journal = {BMC Systems Biology}, + publisher = {Springer Science and Business Media LLC}, + year = {2009}, + month = {sep}, +} + +@article{Wolf2018, + author = {Wolf, F. Alexander and Angerer, Philipp and Theis, Fabian J.}, + title = {SCANPY: large-scale single-cell gene expression data analysis}, + journal = {Genome Biology}, + year = {2018}, + month = {feb}, + day = {06}, + volume = {19}, + number = {1}, + pages = {15}, + issn = {1474-760X}, + doi = {10.1186/s13059-017-1382-0}, + url = {https://doi.org/10.1186/s13059-017-1382-0}, +} + +@article{Wolf2019, + author = {Wolf, F. Alexander and Hamey, Fiona K. and Plass, Mireya and Solana, Jordi and Dahlin, Joakim S. and Göttgens, Berthold and Rajewsky, Nikolaus and Simon, Lukas and Theis, Fabian J.}, + title = {PAGA: graph abstraction reconciles clustering with trajectory inference through a topology preserving map of single cells}, + volume = {20}, + issn = {1474-760X}, + url = {https://doi.org/10.1186/s13059-019-1663-x}, + doi = {10.1186/s13059-019-1663-x}, + number = {1}, + journal = {Genome Biology}, + publisher = {Springer Science and Business Media LLC}, + year = {2019}, + month = {mar}, +} + +@article{Wolock2019, + author = {Wolock, Samuel L. and Lopez, Romain and Klein, Allon M.}, + doi = {10.1016/j.cels.2018.11.005}, + url = {https://doi.org/10.1016/j.cels.2018.11.005}, + year = {2019}, + month = {apr}, + publisher = {Elsevier {BV}}, + volume = {8}, + number = {4}, + pages = {281--291.e9}, + title = {Scrublet: Computational Identification of Cell Doublets in Single-Cell Transcriptomic Data}, + journal = {Cell Systems}, +} + +@article{Zheng2017, + author = {Zheng, Grace X. Y. and Terry, Jessica M. and Belgrader, Phillip and Ryvkin, Paul and Bent, Zachary W. and Wilson, Ryan and Ziraldo, Solongo B. and Wheeler, Tobias D. and McDermott, Geoff P. and Zhu, Junjie and Gregory, Mark T. and Shuga, Joe and Montesclaros, Luz and Underwood, Jason G. and Masquelier, Donald A. and Nishimura, Stefanie Y. and Schnall-Levin, Michael and Wyatt, Paul W. and Hindson, Christopher M. and Bharadwaj, Rajiv and Wong, Alexander and Ness, Kevin D. and Beppu, Lan W. and Deeg, H. Joachim and McFarland, Christopher and Loeb, Keith R. and Valente, William J. and Ericson, Nolan G. and Stevens, Emily A. and Radich, Jerald P. and Mikkelsen, Tarjei S. and Hindson, Benjamin J. and Bielas, Jason H.}, + doi = {10.1038/ncomms14049}, + url = {https://doi.org/10.1038/ncomms14049}, + year = {2017}, + month = {jan}, + publisher = {Springer Science and Business Media {LLC}}, + volume = {8}, + number = {1}, + title = {Massively parallel digital transcriptional profiling of single cells}, + journal = {Nature Communications}, +} + +@article{Zunder2015, + author = {Zunder, Eli R. and Lujan, Ernesto and Goltsev, Yury and Wernig, Marius and Nolan, Garry P.}, + title = {A Continuous Molecular Roadmap to iPSC Reprogramming through Progression Analysis of Single-Cell Mass Cytometry}, + volume = {16}, + issn = {1934-5909}, + url = {https://doi.org/10.1016/j.stem.2015.01.015}, + doi = {10.1016/j.stem.2015.01.015}, + number = {3}, + journal = {Cell Stem Cell}, + publisher = {Elsevier BV}, + year = {2015}, + month = {mar}, + pages = {323--337}, +} + +@article{vanDerMaaten2008, + author = {van der Maaten, Laurens and Hinton, Geoffrey}, + title = {Visualizing Data using t-SNE}, + journal = {Journal of Machine Learning Research}, + year = {2008}, + volume = {9}, + number = {86}, + pages = {2579--2605}, + url = {http://jmlr.org/papers/v9/vandermaaten08a.html}, +} + +@article{vanDijk2018, + author = {van Dijk, David and Sharma, Roshan and Nainys, Juozas and Yim, Kristina and Kathail, Pooja and Carr, Ambrose J. and Burdziak, Cassandra and Moon, Kevin R. and Chaffer, Christine L. and Pattabiraman, Diwakar and Bierie, Brian and Mazutis, Linas and Wolf, Guy and Krishnaswamy, Smita and Pe’er, Dana}, + title = {Recovering Gene Interactions from Single-Cell Data Using Data Diffusion}, + volume = {174}, + issn = {0092-8674}, + url = {https://doi.org/10.1016/j.cell.2018.05.061}, + doi = {10.1016/j.cell.2018.05.061}, + number = {3}, + journal = {Cell}, + publisher = {Elsevier BV}, + year = {2018}, + month = {jul}, + pages = {716--729.e27}, +} diff --git a/data/docs/references.md b/data/docs/references.md new file mode 100644 index 0000000000000000000000000000000000000000..00ad6a6ea9418df96256615a564065e38c8332ea --- /dev/null +++ b/data/docs/references.md @@ -0,0 +1,5 @@ +# References + +```{bibliography} +:cited: +``` diff --git a/data/docs/tutorials/index.md b/data/docs/tutorials/index.md new file mode 100644 index 0000000000000000000000000000000000000000..4a5667d5ef4e465782418e59092d5fdd46997dea --- /dev/null +++ b/data/docs/tutorials/index.md @@ -0,0 +1,56 @@ +--- +orphan: true +--- + +# Tutorials + +The easiest way to get familiar with ehrapy is to follow along with our tutorials. +Many are also designed to work seamlessly in Binder, a free cloud computing platform. + +:::{note} +For questions about the usage of ehrapy use the [zulip forum](https://scverse.zulipchat.com/#narrow/channel/465075-ehrapy). +::: + +## Quick start + +```{eval-rst} +.. nbgallery:: + + notebooks/ehrapy_introduction + notebooks/mimic_2_introduction + notebooks/mimic_2_fate + notebooks/mimic_2_survival_analysis + notebooks/mimic_2_causal_inference + notebooks/medcat + notebooks/ml_usecases + notebooks/ontology_mapping + notebooks/fhir + notebooks/cohort_tracking + notebooks/bias + notebooks/out_of_core + notebooks/patient_trajectory + +``` + +### Glossary + +```{eval-rst} +.. tab-set:: + + .. tab-item:: AnnData + + `AnnData `_ is short for Annotated Data and is the primary datastructure that ehrapy uses. + It is based on the principle of a single Numpy matrix X embraced by two Pandas DataFrames. + All rows are called observations (in our case patients/patient visits or similar) and the columns + are known as variables (any feature such as e.g. age, B12 level or similar). + For a more in depth introduction please read the `AnnData paper `_. + + + .. tab-item:: scanpy + + The implementation of ehrapy is based on `scanpy `_, a framework to analyze single-cell sequencing data. + ehrapy reuses the implemented algorithms in scanpy and wraps them for simple access. + For a more in depth introduction please read the `Scanpy paper `_. +``` + +[zulip forum]: https://scverse.zulipchat.com/#narrow/channel/465075-ehrapy diff --git a/data/docs/usage/usage.md b/data/docs/usage/usage.md new file mode 100644 index 0000000000000000000000000000000000000000..6f3f2366ad03b61f7b55155d4aa0f39e21e6fb52 --- /dev/null +++ b/data/docs/usage/usage.md @@ -0,0 +1,431 @@ +# API + +Import the ehrapy API as follows: + +```python +import ehrapy as ep +``` + +You can then access the respective modules like: + +```python +ep.pl.cool_fancy_plot() +``` + +```{eval-rst} +.. currentmodule:: ehrapy +``` + +## Reading and writing + +```{eval-rst} +.. module:: ehrapy +``` + +```{eval-rst} +.. autosummary:: + :toctree: io + :nosignatures: + + io.read_csv + io.read_h5ad + io.read_fhir + io.write +``` + +## Data + +```{eval-rst} +.. autosummary:: + :toctree: data + :nosignatures: + + data.mimic_2 + data.mimic_2_preprocessed + data.mimic_3_demo + data.diabetes_130_raw + data.diabetes_130_fairlearn + data.heart_failure + data.chronic_kidney_disease + data.breast_tissue + data.cervical_cancer_risk_factors + data.dermatology + data.echocardiogram + data.heart_disease + data.hepatitis + data.statlog_heart + data.thyroid + data.breast_cancer_coimbra + data.parkinson_dataset_with_replicated_acoustic_features + data.parkinsons + data.parkinsons_disease_classification + data.parkinsons_telemonitoring +``` + +## Preprocessing + +Any transformation of the data matrix that is not a tool. +Other than tools, preprocessing steps usually don’t return an easily interpretable annotation, but perform a basic transformation on the data matrix. + +### Basic preprocessing + +```{eval-rst} +.. autosummary:: + :toctree: preprocessing + :nosignatures: + + preprocessing.encode + preprocessing.pca + preprocessing.regress_out + preprocessing.subsample + preprocessing.balanced_sample + preprocessing.highly_variable_features + preprocessing.winsorize + preprocessing.clip_quantile + preprocessing.summarize_measurements +``` + +### Quality control + +```{eval-rst} +.. autosummary:: + :toctree: preprocessing + :nosignatures: + + preprocessing.qc_metrics + preprocessing.qc_lab_measurements + preprocessing.mcar_test + preprocessing.detect_bias +``` + +### Imputation + +```{eval-rst} +.. autosummary:: + :toctree: preprocessing + :nosignatures: + + preprocessing.explicit_impute + preprocessing.simple_impute + preprocessing.knn_impute + preprocessing.miss_forest_impute + preprocessing.mice_forest_impute +``` + +### Normalization + +```{eval-rst} +.. autosummary:: + :toctree: preprocessing + :nosignatures: + + preprocessing.log_norm + preprocessing.maxabs_norm + preprocessing.minmax_norm + preprocessing.power_norm + preprocessing.quantile_norm + preprocessing.robust_scale_norm + preprocessing.scale_norm + preprocessing.offset_negative_values +``` + +### Dataset Shift Correction + +Partially overlaps with dataset integration. Note that a simple batch correction method is available via `pp.regress_out()`. + +```{eval-rst} +.. autosummary:: + :toctree: preprocessing + :nosignatures: + + preprocessing.combat +``` + +### Neighbors + +```{eval-rst} +.. autosummary:: + :toctree: preprocessing + :nosignatures: + + preprocessing.neighbors +``` + +## Tools + +Any transformation of the data matrix that is not preprocessing. +In contrast to a preprocessing function, a tool usually adds an easily interpretable annotation to the data matrix, which can then be visualized with a corresponding plotting function. + +### Embeddings + +```{eval-rst} +.. autosummary:: + :toctree: tools + :nosignatures: + + tools.tsne + tools.umap + tools.draw_graph + tools.diffmap + tools.embedding_density +``` + +### Clustering and trajectory inference + +```{eval-rst} +.. autosummary:: + :toctree: tools + :nosignatures: + + tools.leiden + tools.dendrogram + tools.dpt + tools.paga +``` + +### Feature Ranking + +```{eval-rst} +.. autosummary:: + :toctree: tools + :nosignatures: + + tools.rank_features_groups + tools.filter_rank_features_groups + tools.rank_features_supervised +``` + +### Dataset integration + +```{eval-rst} +.. autosummary:: + :toctree: tools + :nosignatures: + + tools.ingest +``` + +### Natural language processing + +```{eval-rst} +.. autosummary:: + :toctree: tools + :nosignatures: + + tools.annotate_text + tools.get_medcat_annotation_overview + tools.add_medcat_annotation_to_obs +``` + +### Survival Analysis + +```{eval-rst} +.. autosummary:: + :toctree: tools + :nosignatures: + + tools.ols + tools.glm + tools.kaplan_meier + tools.test_kmf_logrank + tools.test_nested_f_statistic + tools.cox_ph + tools.weibull_aft + tools.log_logistic_aft + tools.nelson_aalen + tools.weibull + +``` + +### Causal Inference + +```{eval-rst} +.. autosummary:: + :toctree: tools + :nosignatures: + + tools.causal_inference +``` + +### Cohort Tracking + +```{eval-rst} +.. autosummary:: + :toctree: tools + :nosignatures: + + tools.CohortTracker +``` + +## Plotting + +The plotting module `ehrapy.pl.\*` largely parallels the `tl.\*` and a few of the `pp.\*` functions. +For most tools and for some preprocessing functions, you will find a plotting function with the same name. + +### Generic + +```{eval-rst} +.. autosummary:: + :toctree: plot + :nosignatures: + + plot.scatter + plot.heatmap + plot.dotplot + plot.tracksplot + plot.violin + plot.stacked_violin + plot.matrixplot + plot.clustermap + plot.ranking + plot.dendrogram + plot.catplot +``` + +### Quality Control and missing values + +```{eval-rst} +.. autosummary:: + :toctree: plot + :nosignatures: + + plot.missing_values_matrix + plot.missing_values_barplot + plot.missing_values_heatmap + plot.missing_values_dendrogram +``` + +### Classes + +Please refer to [Scanpy's plotting classes documentation](https://scanpy.readthedocs.io/en/stable/api.html#classes). + +### Tools + +Methods that extract and visualize tool-specific annotation in an AnnData object. For any method in module `tl`, there is a method with the same name in `pl`. + +```{eval-rst} +.. autosummary:: + :toctree: plot + :nosignatures: + + plot.pca + plot.pca_loadings + plot.pca_variance_ratio + plot.pca_overview +``` + +### Embeddings + +```{eval-rst} +.. autosummary:: + :toctree: plot + :nosignatures: + + plot.tsne + plot.umap + plot.diffmap + plot.draw_graph + plot.embedding + plot.embedding_density +``` + +### Branching trajectories and pseudotime + +```{eval-rst} +.. autosummary:: + :toctree: plot + :nosignatures: + + plot.dpt_groups_pseudotime + plot.dpt_timeseries + plot.paga + plot.paga_path + plot.paga_compare +``` + +### Feature Ranking + +```{eval-rst} +.. autosummary:: + :toctree: plot + :nosignatures: + + plot.rank_features_groups + plot.rank_features_groups_violin + plot.rank_features_groups_stacked_violin + plot.rank_features_groups_heatmap + plot.rank_features_groups_dotplot + plot.rank_features_groups_matrixplot + plot.rank_features_groups_tracksplot + plot.rank_features_supervised +``` + +### Survival Analysis + +```{eval-rst} +.. autosummary:: + :toctree: plot + :nosignatures: + + plot.ols + plot.kaplan_meier +``` + +### Causal Inference + +```{eval-rst} +.. autosummary:: + :toctree: plot + :nosignatures: + + plot.causal_effect +``` + +## AnnData utilities + +```{eval-rst} +.. autosummary:: + :toctree: anndata + :nosignatures: + + anndata.infer_feature_types + anndata.feature_type_overview + anndata.replace_feature_types + anndata.df_to_anndata + anndata.anndata_to_df + anndata.move_to_obs + anndata.delete_from_obs + anndata.move_to_x + anndata.get_obs_df + anndata.get_var_df + anndata.get_rank_features_df +``` + +## Settings + +A convenience object for setting some default {obj}`matplotlib.rcParams` and a +high-resolution jupyter display backend useful for use in notebooks. + +An instance of the {class}`~scanpy._settings.ScanpyConfig` is available as `ehrapy.settings` and allows configuring ehrapy. + +```python +import ehrapy as ep + +ep.settings.set_figure_params(dpi=150) +``` + +Please refer to the [Scanpy settings documentation](https://scanpy.readthedocs.io/en/stable/api.html#settings) +for configuration options. ehrapy will adapt these in the future and update the documentation. + +## Dependency Versions + +ehrapy is complex software with many dependencies. To ensure a consistent runtime environment you should save all +versions that were used for an analysis. This comes in handy when trying to diagnose issues and to reproduce results. + +Call the function via: + +```python +import ehrapy as ep + +ep.print_versions() +``` diff --git a/data/docs/utils.py b/data/docs/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/data/ehrapy/SCANPY_LICENSE b/data/ehrapy/SCANPY_LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..6f78d4a9fde60589bfd1b04125badcbcbd5f8a86 --- /dev/null +++ b/data/ehrapy/SCANPY_LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2017 F. Alexander Wolf, P. Angerer, Theis Lab +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/data/ehrapy/__init__.py b/data/ehrapy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..81bdecb382d7be5a93c9f8026fc5c0de59e50020 --- /dev/null +++ b/data/ehrapy/__init__.py @@ -0,0 +1,22 @@ +"""Top-level package for ehrapy.""" + +__author__ = "Lukas Heumos" +__email__ = "lukas.heumos@posteo.net" +__version__ = "0.12.0" + +import os + +# https://docs.scipy.org/doc/scipy/dev/api-dev/array_api.html +os.environ["SCIPY_ARRAY_API"] = "1" + +from ehrapy._settings import EhrapyConfig, ehrapy_settings + +settings: EhrapyConfig = ehrapy_settings + +from ehrapy import anndata as ad +from ehrapy import data as dt +from ehrapy import io +from ehrapy import plot as pl +from ehrapy import preprocessing as pp +from ehrapy import tools as tl +from ehrapy.core.meta_information import print_versions diff --git a/data/ehrapy/_compat.py b/data/ehrapy/_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..dc94a6d3be7499f52986249b20ecb0d87d3b59b0 --- /dev/null +++ b/data/ehrapy/_compat.py @@ -0,0 +1,23 @@ +# Since we might check whether an object is an instance of dask.array.Array +# without requiring dask installed in the environment. +from collections.abc import Callable + +try: + import dask.array as da + + DASK_AVAILABLE = True +except ImportError: + DASK_AVAILABLE = False + + +def _raise_array_type_not_implemented(func: Callable, type_: type) -> NotImplementedError: + raise NotImplementedError( + f"{func.__name__} does not support array type {type_}. Must be of type {func.registry.keys()}." # type: ignore + ) + + +def is_dask_array(array): + if DASK_AVAILABLE: + return isinstance(array, da.Array) + else: + return False diff --git a/data/ehrapy/_settings.py b/data/ehrapy/_settings.py new file mode 100644 index 0000000000000000000000000000000000000000..f733c059436fadcf95907e119a4a755ddf58f576 --- /dev/null +++ b/data/ehrapy/_settings.py @@ -0,0 +1,366 @@ +from __future__ import annotations + +import inspect +from pathlib import Path +from time import time +from typing import TYPE_CHECKING, Any, Literal + +from lamin_utils._logger import logger +from matplotlib import pyplot as plt +from scanpy.plotting import set_rcParams_scanpy + +if TYPE_CHECKING: + from collections.abc import Iterable + +VERBOSITY_TO_INT = { + "error": 0, # 40 + "warning": 1, # 30 + "success": 2, # 25 + "info": 3, # 20 + "hint": 4, # 15 + "debug": 5, # 10 +} +VERBOSITY_TO_STR: dict[int, str] = dict( + [reversed(i) for i in VERBOSITY_TO_INT.items()] # type: ignore +) + + +def _type_check(var: Any, varname: str, types: type | tuple[type, ...]): # pragma: no cover + if isinstance(var, types): + return + if isinstance(types, type): + possible_types_str = types.__name__ + else: + type_names = [t.__name__ for t in types] + possible_types_str = "{} or {}".format(", ".join(type_names[:-1]), type_names[-1]) + raise TypeError(f"{varname} must be of type {possible_types_str}") + + +class EhrapyConfig: # pragma: no cover + """Configuration manager for ehrapy.""" + + def __init__( + self, + *, + plot_suffix: str = "", + file_format_data: str = "h5ad", + file_format_figs: str = "pdf", + autosave: bool = False, + autoshow: bool = True, + writedir: str | Path = "./ehrapy_write/", + cachedir: str | Path = "./ehrapy_cache/", + datasetdir: str | Path = "./ehrapy_data/", + figdir: str | Path = "./figures/", + cache_compression: str | None = "lzf", + max_memory=15, + n_jobs: int = -1, + logfile: str | Path | None = None, + categories_to_ignore: Iterable[str] = ("N/A", "dontknow", "no_gate", "?"), + _frameon: bool = True, + _vector_friendly: bool = False, + _low_resolution_warning: bool = True, + n_pcs=50, + ): + # logging + self._verbosity_int: int = 1 # warning-level logging + logger.set_verbosity(self._verbosity_int) + # rest + self.plot_suffix = plot_suffix + self.file_format_data = file_format_data + self.file_format_figs = file_format_figs + self.autosave = autosave + self.autoshow = autoshow + self.writedir = writedir # type: ignore + self.cachedir = cachedir # type: ignore + self.datasetdir = datasetdir # type: ignore + self.figdir = figdir # type: ignore + self.cache_compression = cache_compression + self.max_memory = max_memory + self.n_jobs = n_jobs + self.categories_to_ignore = categories_to_ignore # type: ignore + self._frameon = _frameon + """bool: See set_figure_params.""" + + self._vector_friendly = _vector_friendly + """Set to true if you want to include pngs in svgs and pdfs.""" + + self._low_resolution_warning = _low_resolution_warning + """Print warning when saving a figure with low resolution.""" + + self._start = time() + """Time when the settings module is first imported.""" + + self._previous_time = self._start + """Variable for timing program parts.""" + + self._previous_memory_usage = -1 + """Stores the previous memory usage.""" + + self.N_PCS = n_pcs + """Default number of principal components to use.""" + + @property + def verbosity(self) -> str: + """Logger verbosity (default 'warning'). + + - 'error': ❌ only show error messages + - 'warning': ❗ also show warning messages + - 'success': ✅ also show success and save messages + - 'info': 💡 also show info messages + - 'hint': 💡 also show hint messages + - 'debug': 🐛 also show detailed debug messages + """ + return VERBOSITY_TO_STR[self._verbosity_int] + + @verbosity.setter + def verbosity(self, verbosity: str | int): + if isinstance(verbosity, str): + verbosity_int = VERBOSITY_TO_INT[verbosity] + else: + verbosity_int = verbosity + self._verbosity_int = verbosity_int + logger.set_verbosity(verbosity_int) + + @property + def plot_suffix(self) -> str: + """Global suffix that is appended to figure filenames.""" + return self._plot_suffix + + @plot_suffix.setter + def plot_suffix(self, plot_suffix: str): + _type_check(plot_suffix, "plot_suffix", str) + self._plot_suffix = plot_suffix + + @property + def file_format_data(self) -> str: + """File format for saving AnnData objects. + + Allowed are 'txt', 'csv' (comma separated value file) for exporting and 'h5ad' (hdf5) for lossless saving. + """ + return self._file_format_data + + @file_format_data.setter + def file_format_data(self, file_format: str): + _type_check(file_format, "file_format_data", str) + file_format_options = {"csv", "h5ad"} + if file_format not in file_format_options: + raise ValueError(f"Cannot set file_format_data to {file_format}. " f"Must be one of {file_format_options}") + self._file_format_data = file_format + + @property + def file_format_figs(self) -> str: + """File format for saving figures. + + For example 'png', 'pdf' or 'svg'. Many other formats work as well (see `matplotlib.pyplot.savefig`). + """ + return self._file_format_figs + + @file_format_figs.setter + def file_format_figs(self, figure_format: str): + _type_check(figure_format, "figure_format_data", str) + self._file_format_figs = figure_format + + @property + def autosave(self) -> bool: + """Automatically save figures in :attr:`~scanpy._settings.ScanpyConfig.figdir` (default `False`). + + Do not show plots/figures interactively. + """ + return self._autosave + + @autosave.setter + def autosave(self, autosave: bool): + _type_check(autosave, "autosave", bool) + self._autosave = autosave + + @property + def autoshow(self) -> bool: + """Automatically show figures if `autosave == False` (default `True`). + + There is no need to call the matplotlib pl.show() in this case. + """ + return self._autoshow + + @autoshow.setter + def autoshow(self, autoshow: bool): + _type_check(autoshow, "autoshow", bool) + self._autoshow = autoshow + + @property + def writedir(self) -> Path: + """Directory where the function scanpy.write writes to by default.""" + return self._writedir + + @writedir.setter + def writedir(self, writedir: str | Path): + _type_check(writedir, "writedir", (str, Path)) + self._writedir = Path(writedir) + + @property + def cachedir(self) -> Path: + """Directory for cache files (default `'./cache/'`).""" + return self._cachedir + + @cachedir.setter + def cachedir(self, cachedir: str | Path): + _type_check(cachedir, "cachedir", (str, Path)) + self._cachedir = Path(cachedir) + + @property + def datasetdir(self) -> Path: + """Directory for example :mod:`~scanpy.datasets` (default `'./data/'`).""" + return self._datasetdir + + @datasetdir.setter + def datasetdir(self, datasetdir: str | Path): + _type_check(datasetdir, "datasetdir", (str, Path)) + self._datasetdir = Path(datasetdir).resolve() + + @property + def figdir(self) -> Path: + """Directory for saving figures (default `'./figures/'`).""" + return self._figdir + + @figdir.setter + def figdir(self, figdir: str | Path): + _type_check(figdir, "figdir", (str, Path)) + self._figdir = Path(figdir) + + @property + def cache_compression(self) -> str | None: + """Compression for `sc.read(..., cache=True)` (default `'lzf'`). + + May be `'lzf'`, `'gzip'`, or `None`. + """ + return self._cache_compression + + @cache_compression.setter + def cache_compression(self, cache_compression: str | None): + if cache_compression not in {"lzf", "gzip", None}: + raise ValueError(f"`cache_compression` ({cache_compression}) " "must be in {'lzf', 'gzip', None}") + self._cache_compression = cache_compression + + @property + def max_memory(self) -> int | float: + """Maximal memory usage in Gigabyte. + + Is currently not well respected.... + """ + return self._max_memory + + @max_memory.setter + def max_memory(self, max_memory: int | float): + _type_check(max_memory, "max_memory", (int, float)) + self._max_memory = max_memory + + @property + def n_jobs(self) -> int: + """Default number of jobs/ CPUs to use for parallel computing.""" + return self._n_jobs + + @n_jobs.setter + def n_jobs(self, n_jobs: int): + _type_check(n_jobs, "n_jobs", int) + self._n_jobs = n_jobs + + @property + def categories_to_ignore(self) -> list[str]: + """Categories that are omitted in plotting etc.""" + return self._categories_to_ignore + + @categories_to_ignore.setter + def categories_to_ignore(self, categories_to_ignore: Iterable[str]): + categories_to_ignore = list(categories_to_ignore) + for i, cat in enumerate(categories_to_ignore): + _type_check(cat, f"categories_to_ignore[{i}]", str) + self._categories_to_ignore = categories_to_ignore + + # -------------------------------------------------------------------------------- + # Functions + # -------------------------------------------------------------------------------- + + # Collected from the print_* functions in matplotlib.backends + # fmt: off + _Format = Literal[ + 'png', 'jpg', 'tif', 'tiff', + 'pdf', 'ps', 'eps', 'svg', 'svgz', 'pgf', + 'raw', 'rgba', + ] + + # fmt: on + + def set_figure_params( + self, + scanpy: bool = True, + dpi: int = 80, + dpi_save: int = 150, + frameon: bool = True, + vector_friendly: bool = True, + fontsize: int = 14, + figsize: int | None = None, + color_map: str | None = None, + format: _Format = "pdf", + facecolor: str | None = None, + transparent: bool = False, + ipython_format: str = "png2x", + dark: bool = False, + ): + """Set resolution/size, styling and format of figures. + + Args: + scanpy: Init default values for :obj:`matplotlib.rcParams` based on Scanpy's. + dpi: Resolution of rendered figures – this influences the size of figures in notebooks. + dpi_save: Resolution of saved figures. This should typically be higher to achieve publication quality. + frameon: Add frames and axes labels to scatter plots. + vector_friendly: Plot scatter plots using `png` backend even when exporting as `pdf` or `svg`. + fontsize: Set the fontsize for several `rcParams` entries. Ignored if `scanpy=False`. + figsize: Set plt.rcParams['figure.figsize']. + color_map: Convenience method for setting the default color map. Ignored if `scanpy=False`. + format: This sets the default format for saving figures: `file_format_figs`. + facecolor: Sets backgrounds via `rcParams['figure.facecolor'] = facecolor` and `rcParams['axes.facecolor'] = facecolor`. + transparent: Save figures with transparent back ground. Sets `rcParams['savefig.transparent']`. + ipython_format: Only concerns the notebook/IPython environment; see :func:`~IPython.display.set_matplotlib_formats` for details. + dark: Whether to enable Matplotlibs dark styled. Inverts all colors. + """ + if self._is_run_from_ipython(): + if isinstance(ipython_format, str): + ipython_format = [ipython_format] # type: ignore + from matplotlib_inline.backend_inline import set_matplotlib_formats + + set_matplotlib_formats(*ipython_format) + + from matplotlib import rcParams + + self._vector_friendly = vector_friendly + self.file_format_figs = format + if dpi is not None: + rcParams["figure.dpi"] = dpi + if dpi_save is not None: + rcParams["savefig.dpi"] = dpi_save + if transparent is not None: + rcParams["savefig.transparent"] = transparent + if facecolor is not None: + rcParams["figure.facecolor"] = facecolor + rcParams["axes.facecolor"] = facecolor + if scanpy: + set_rcParams_scanpy(fontsize=fontsize, color_map=color_map) + if figsize is not None: + rcParams["figure.figsize"] = figsize + if dark: + plt.style.use("dark_background") + self._frameon = frameon + + @staticmethod + def _is_run_from_ipython() -> bool: + """Determines whether we are currently in IPython.""" + import builtins + + return getattr(builtins, "__IPYTHON__", False) + + def __str__(self) -> str: + return "\n".join( + f"{k} = {v!r}" for k, v in inspect.getmembers(self) if not k.startswith("_") and not k == "getdoc" + ) + + +ehrapy_settings = EhrapyConfig() diff --git a/data/ehrapy/_utils_available.py b/data/ehrapy/_utils_available.py new file mode 100644 index 0000000000000000000000000000000000000000..7b11668160ed5c88399b403e816f5fd754a4f6d5 --- /dev/null +++ b/data/ehrapy/_utils_available.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +import importlib.util +from subprocess import PIPE, Popen + + +def _check_module_importable(package: str) -> bool: + """Checks whether a module is installed and can be loaded. + + Args: + package: The package to check. + + Returns: + True if the package is installed, False otherwise. + """ + module_information = importlib.util.find_spec(package) + module_available = module_information is not None + + return module_available + + +def _shell_command_accessible(command: list[str]) -> bool: + """Checks whether the provided command is accessible in the current shell. + + Args: + command: The command to check. Spaces are separated as list elements. + + Returns: + True if the command is accessible, False otherwise. + """ + command_accessible = Popen(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True) + command_accessible.communicate() + if command_accessible.returncode != 0: + return False + + return True diff --git a/data/ehrapy/_utils_doc.py b/data/ehrapy/_utils_doc.py new file mode 100644 index 0000000000000000000000000000000000000000..3d6b07f7168d28ec02db144247452d958a284e77 --- /dev/null +++ b/data/ehrapy/_utils_doc.py @@ -0,0 +1,231 @@ +import inspect +from collections.abc import Callable +from textwrap import dedent + + +def getdoc(c_or_f: Callable | type) -> str | None: # pragma: no cover + if getattr(c_or_f, "__doc__", None) is None: + return None + doc = inspect.getdoc(c_or_f) + if isinstance(c_or_f, type) and hasattr(c_or_f, "__init__"): + sig = inspect.signature(c_or_f.__init__) # type: ignore + else: + sig = inspect.signature(c_or_f) + + def type_doc(name: str): + param: inspect.Parameter = sig.parameters[name] + cls = getattr(param.annotation, "__qualname__", repr(param.annotation)) + if param.default is not param.empty: + return f"{cls}, optional (default: {param.default!r})" + else: + return cls + + return "\n".join( + f"{line} : {type_doc(line)}" if line.strip() in sig.parameters else line for line in doc.split("\n") + ) + + +def _doc_params(**kwds): # pragma: no cover + """\ + Docstrings should start with "\" in the first line for proper formatting. + """ + + def dec(obj): + obj.__orig_doc__ = obj.__doc__ + obj.__doc__ = dedent(obj.__doc__).format_map(kwds) + return obj + + return dec + + +"""\ +Shared docstrings for plotting function parameters. +""" + + +doc_adata_color_etc = """\ +adata: :class:`~anndata.AnnData` object object containing all observations. + color: Keys for annotations of observations/patients or features, e.g., `'ann1'` or `['ann1', 'ann2']`. + feature_symbols: Column name in `.var` DataFrame that stores feature symbols. By default `var_names` + refer to the index column of the `.var` DataFrame. Setting this option allows alternative names to be used. + use_raw: Use `.raw` attribute of `adata` for coloring with feature values. If `None`, + defaults to `True` if `layer` isn't provided and `adata.raw` is present. + layer: Name of the AnnData object layer that wants to be plotted. By default + adata.raw.X is plotted. If `use_raw=False` is set, then `adata.X` is plotted. + If `layer` is set to a valid layer name, then the layer is plotted. `layer` takes precedence over `use_raw`.\ +""" + +doc_edges_arrows = """\ +edges: Show edges. + edges_width: Width of edges. + edges_color: Color of edges. See :func:`~networkx.drawing.nx_pylab.draw_networkx_edges`. + neighbors_key: Where to look for neighbors connectivities. + If not specified, this looks .obsp['connectivities'] for connectivities + (default storage place for pp.neighbors). If specified, this looks at + `.obsp[.uns[neighbors_key]['connectivities_key']]` for connectivities. + arrows: Show arrows (deprecated in favour of `scvelo.pl.velocity_embedding`). + arrows_kwds: Passed to :meth:`~matplotlib.axes.Axes.quiver`\ +""" + +# Docs for pl.scatter +doc_scatter_basic = """\ +sort_order: For continuous annotations used as color parameter, plot data points with higher values on top of others. + groups: Restrict to a few categories in categorical observation annotation. + The default is not to restrict to any groups. + components: For instance, `['1,2', '2,3']`. To plot all available components use `components='all'`. + projection: Projection of plot (default: `'2d'`). + legend_loc: Location of legend, either `'on data'`, `'right margin'` or a valid keyword for the `loc` parameter of :class:`~matplotlib.legend.Legend`. + legend_fontsize: Numeric size in pt or string describing the size. See :meth:`~matplotlib.text.Text.set_fontsize`. + legend_fontweight: Legend font weight. A numeric value in range 0-1000 or a string. + Defaults to `'bold'` if `legend_loc == 'on data'`, otherwise to `'normal'`. + See :meth:`~matplotlib.text.Text.set_fontweight`. + legend_fontoutline: Line width of the legend font outline in pt. Draws a white outline using the path effect :class:`~matplotlib.patheffects.withStroke`. + size: Point size. If `None`, is automatically computed as 120000 / n_features. + Can be a sequence containing the size for each observation. The order should be the same as in adata.obs. + color_map: Color map to use for continous variables. Can be a name or a + :class:`~matplotlib.colors.Colormap` instance (e.g. `"magma`", `"viridis"` + or `mpl.cm.cividis`), see :func:`~matplotlib.cm.get_cmap`. + If `None`, the value of `mpl.rcParams["image.cmap"]` is used. + The default `color_map` can be set using :func:`~scanpy.set_figure_params`. + palette: Colors to use for plotting categorical annotation groups. + The palette can be a valid :class:`~matplotlib.colors.ListedColormap` name + (`'Set2'`, `'tab20'`, …), a :class:`~cycler.Cycler` object, a dict mapping + categories to colors, or a sequence of colors. Colors must be valid to + matplotlib. (see :func:`~matplotlib.colors.is_color_like`). + If `None`, `mpl.rcParams["axes.prop_cycle"]` is used unless the categorical + variable already has colors stored in `adata.uns["{var}_colors"]`. + If provided, values of `adata.uns["{var}_colors"]` will be set. + na_color: Color to use for null or masked values. Can be anything matplotlib accepts as a color. + Used for all points if `color=None`. + na_in_legend: If there are missing values, whether they get an entry in the legend. + Currently only implemented for categorical legends. + frameon: Draw a frame around the scatter plot. Defaults to value set in :func:`~scanpy.set_figure_params` (default: True). + title: Provide title for panels either as string or list of strings, e.g. `['title1', 'title2', ...]`.\ +""" + +doc_vbound_percentile = """\ + vmin: The value representing the lower limit of the color scale. Values smaller than vmin are plotted + with the same color as vmin. vmin can be a number, a string, a function or `None`. If + vmin is a string and has the format `pN`, this is interpreted as a vmin=percentile(N). + For example vmin='p1.5' is interpreted as the 1.5 percentile. If vmin is function, then + vmin is interpreted as the return value of the function over the list of values to plot. + For example to set vmin tp the mean of the values to plot, `def my_vmin(values): return + np.mean(values)` and then set `vmin=my_vmin`. If vmin is None (default) an automatic + minimum value is used as defined by matplotlib `scatter` function. When making multiple + plots, vmin can be a list of values, one for each plot. For example `vmin=[0.1, 'p1', None, my_vmin]` + vmax: The value representing the upper limit of the color scale. The format is the same as for `vmin`. + vcenter: The value representing the center of the color scale. Useful for diverging colormaps. + The format is the same as for `vmin`. + Example: sc.pl.umap(adata, color='TREM2', vcenter='p50', cmap='RdBu_r')\ +""" + +doc_vboundnorm = """\ + vmin: The value representing the lower limit of the color scale. Values smaller than vmin are plotted with the same color as vmin. + vmax: The value representing the upper limit of the color scale. Values larger than vmax are plotted with the same color as vmax. + vcenter: The value representing the center of the color scale. Useful for diverging colormaps. + norm: Custom color normalization object from matplotlib. See `https://matplotlib.org/stable/tutorials/colors/colormapnorms.html` for details.\ +""" + +doc_outline = """\ + add_outline: If set to True, this will add a thin border around groups of dots. In some situations + this can enhance the aesthetics of the resulting image + outline_color: Tuple with two valid color names used to adjust the add_outline. The first color is the + border color (default: black), while the second color is a gap color between the + border color and the scatter dot (default: white). + outline_width: Tuple with two width numbers used to adjust the outline. The first value is the width + of the border color as a fraction of the scatter dot size (default: 0.3). The second value is + width of the gap color (default: 0.05).\ +""" + +doc_panels = """\ + ncols: Number of panels per row. + wspace: Adjust the width of the space between multiple panels. + hspace: Adjust the height of the space between multiple panels. + return_fig: Return the matplotlib figure.\ +""" + +# Docs for pl.pca, pl.tsne, … (everything in _tools.scatterplots) +doc_scatter_embedding = f"""\ +{doc_scatter_basic} +{doc_vbound_percentile} +{doc_outline} +{doc_panels} + kwargs: Arguments to pass to :func:`matplotlib.pyplot.scatter`, + for instance: the maximum and minimum values (e.g. `vmin=-2, vmax=5`).\ +""" + + +doc_show_save_ax = """\ +show: Whether to display the figure or return axis. + save: If `True` or a `str`, save the figure. + A string is appended to the default filename. + Infer the filetype if ending on {`'.pdf'`, `'.png'`, `'.svg'`}. + ax: A matplotlib axes object. Only works if plotting a single component.\ +""" + +doc_common_plot_args = """\ +adata: Annotated data matrix. + var_names: `var_names` should be a valid subset of `adata.var_names`. + If `var_names` is a mapping, then the key is used as label + to group the values (see `var_group_labels`). The mapping values + should be sequences of valid `adata.var_names`. In this + case either coloring or 'brackets' are used for the grouping + of var names depending on the plot. When `var_names` is a mapping, + then the `var_group_labels` and `var_group_positions` are set. + groupby: The key of the observation grouping to consider. + use_raw: Use `raw` attribute of `adata` if present. + log: Plot on logarithmic axis. + num_categories: Only used if groupby observation is not categorical. This value + determines the number of groups into which the groupby observation should be subdivided. + categories_order: Order in which to show the categories. Note: add_dendrogram or add_totals + can change the categories order. + figsize: Figure size when `multi_panel=True`. Otherwise the `rcParam['figure.figsize]` value is used. + Format is (width, height) + dendrogram: If True or a valid dendrogram key, a dendrogram based on the hierarchical + clustering between the `groupby` categories is added. + The dendrogram information is computed using :func:`scanpy.tl.dendrogram`. + If `tl.dendrogram` has not been called previously the function is called with default parameters. + feature_symbols: Column name in `.var` DataFrame that stores feature symbols. + By default `var_names` refer to the index column of the `.var` DataFrame. + Setting this option allows alternative names to be used. + var_group_positions: Use this parameter to highlight groups of `var_names`. + This will draw a 'bracket' or a color block between the given start and end + positions. If the parameter `var_group_labels` is set, the corresponding + labels are added on top/left. E.g. `var_group_positions=[(4,10)]` + will add a bracket between the fourth `var_name` and the tenth `var_name`. + By giving more positions, more brackets/color blocks are drawn. + var_group_labels: Labels for each of the `var_group_positions` that want to be highlighted. + var_group_rotation: Label rotation degrees. By default, labels larger than 4 characters are rotated 90 degrees. + layer: Name of the AnnData object layer that wants to be plotted. By default adata.raw.X is plotted. + If `use_raw=False` is set, then `adata.X` is plotted. If `layer` is set to a valid layer name, + then the layer is plotted. `layer` takes precedence over `use_raw`.\ +""" + +doc_scatter_spatial = """\ +library_id: library_id for Visium data, e.g. key in `adata.uns["spatial"]`. + img_key: Key for image data, used to get `img` and `scale_factor` from `"images"` + and `"scalefactors"` entires for this library. To use spatial coordinates, + but not plot an image, pass `img_key=None`. + img: image data to plot, overrides `img_key`. + scale_factor: Scaling factor used to map from coordinate space to pixel space. + Found by default if `library_id` and `img_key` can be resolved. Otherwise defaults to `1.`. + spot_size: Diameter of spot (in coordinate space) for each point. Diameter + in pixels of the spots will be `size * spot_size * scale_factor`. + This argument is required if it cannot be resolved from library info. + crop_coord: Coordinates to use for cropping the image (left, right, top, bottom). + These coordinates are expected to be in pixel space (same as `basis`) and will be transformed by `scale_factor`. + If not provided, image is automatically cropped to bounds of `basis`, plus a border. + alpha_img: Alpha value for image. + bw: Plot image data in gray scale.\ +""" + +doc_common_groupby_plot_args = """\ +title: Title for the figure + colorbar_title: Title for the color bar. New line character (\\n) can be used. + cmap: String denoting matplotlib color map. + standard_scale: Whether or not to standardize the given dimension between 0 and 1, meaning for + each variable or group, subtract the minimum and divide each by its maximum. + swap_axes: By default, the x axis contains `var_names` (e.g. genes) and the y axis + the `groupby` categories. By setting `swap_axes` then x are the `groupby` categories and y the `var_names`. + return_fig: Returns :class:`DotPlot` object. Useful for fine-tuning the plot. Takes precedence over `show=False`.\ +""" diff --git a/data/ehrapy/_utils_rendering.py b/data/ehrapy/_utils_rendering.py new file mode 100644 index 0000000000000000000000000000000000000000..43596c54858c2862ea1da88d0b228b512852dca1 --- /dev/null +++ b/data/ehrapy/_utils_rendering.py @@ -0,0 +1,21 @@ +import functools + +from rich.progress import Progress, SpinnerColumn + + +def spinner(message: str = "Running task"): + def wrap(func): + @functools.wraps(func) + def wrapped_f(*args, **kwargs): + with Progress( + "[progress.description]{task.description}", + SpinnerColumn(), + refresh_per_second=1500, + ) as progress: + progress.add_task(f"[blue]{message}", total=1) + result = func(*args, **kwargs) + return result + + return wrapped_f + + return wrap diff --git a/data/ehrapy/anndata/__init__.py b/data/ehrapy/anndata/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70f1e09c43bd8f7a6b50f3d0247fc017411c3b81 --- /dev/null +++ b/data/ehrapy/anndata/__init__.py @@ -0,0 +1,35 @@ +from ehrapy.anndata._feature_specifications import ( + check_feature_types, + feature_type_overview, + infer_feature_types, + replace_feature_types, +) +from ehrapy.anndata.anndata_ext import ( + anndata_to_df, + delete_from_obs, + df_to_anndata, + generate_anndata, + get_obs_df, + get_rank_features_df, + get_var_df, + move_to_obs, + move_to_x, + rank_genes_groups_df, +) + +__all__ = [ + "check_feature_types", + "replace_feature_types", + "feature_type_overview", + "infer_feature_types", + "anndata_to_df", + "delete_from_obs", + "df_to_anndata", + "generate_anndata", + "get_obs_df", + "get_rank_features_df", + "get_var_df", + "move_to_obs", + "move_to_x", + "rank_genes_groups_df", +] diff --git a/data/ehrapy/anndata/_constants.py b/data/ehrapy/anndata/_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..68ae69432c34c8813a0928ab7cb016f841feb362 --- /dev/null +++ b/data/ehrapy/anndata/_constants.py @@ -0,0 +1,8 @@ +# Typing Column +# ----------------------- +# The column name and used values in adata.var for column types. + +FEATURE_TYPE_KEY = "feature_type" +NUMERIC_TAG = "numeric" +CATEGORICAL_TAG = "categorical" +DATE_TAG = "date" diff --git a/data/ehrapy/anndata/_feature_specifications.py b/data/ehrapy/anndata/_feature_specifications.py new file mode 100644 index 0000000000000000000000000000000000000000..fdebb9ffd33f61583cf0aa15afb57bdc4d3d7473 --- /dev/null +++ b/data/ehrapy/anndata/_feature_specifications.py @@ -0,0 +1,242 @@ +from __future__ import annotations + +from functools import wraps +from typing import TYPE_CHECKING, Literal + +import numpy as np +import pandas as pd +from anndata import AnnData +from dateutil.parser import isoparse # type: ignore +from lamin_utils import logger +from rich import print +from rich.tree import Tree + +from ehrapy.anndata._constants import CATEGORICAL_TAG, DATE_TAG, FEATURE_TYPE_KEY, NUMERIC_TAG + +if TYPE_CHECKING: + from collections.abc import Iterable + + +def _detect_feature_type(col: pd.Series) -> tuple[Literal["date", "categorical", "numeric"], bool]: + """Detect the feature type of a column in a pandas DataFrame. + + Args: + col: The column to detect the feature type for. + verbose: Whether to print warnings for uncertain feature types. + + Returns: + The detected feature type (one of 'date', 'categorical', or 'numeric') and a boolean, which is True if the feature type is uncertain. + """ + n_elements = len(col) + col = col.dropna() + if len(col) == 0: + raise ValueError( + f"Feature '{col.name}' contains only NaN values. Please drop this feature to infer the feature type." + ) + majority_type = col.apply(type).value_counts().idxmax() + + if majority_type == pd.Timestamp: + return DATE_TAG, False # type: ignore + + if majority_type is str: + try: + col.apply(isoparse) + return DATE_TAG, False # type: ignore + except ValueError: + try: + col = pd.to_numeric(col, errors="raise") # Could be an encoded categorical or a numeric feature + majority_type = float + except ValueError: + # Features stored as Strings that cannot be converted to float are assumed to be categorical + return CATEGORICAL_TAG, False # type: ignore + + if majority_type not in [int, float]: + return CATEGORICAL_TAG, False # type: ignore + + # Guess categorical if the feature is an integer and the values are 0/1 to n-1/n with no gaps + if ( + (majority_type is int or (np.all(i.is_integer() for i in col))) + and (n_elements != col.nunique()) + and ( + (col.min() == 0 and np.all(np.sort(col.unique()) == np.arange(col.nunique()))) + or (col.min() == 1 and np.all(np.sort(col.unique()) == np.arange(1, col.nunique() + 1))) + ) + ): + return CATEGORICAL_TAG, True # type: ignore + + return NUMERIC_TAG, False # type: ignore + + +def infer_feature_types( + adata: AnnData, layer: str | None = None, output: Literal["tree", "dataframe"] | None = "tree", verbose: bool = True +): + """Infer feature types from AnnData object. + + For each feature in adata.var_names, the method infers one of the following types: 'date', 'categorical', or 'numeric'. + The inferred types are stored in adata.var['feature_type']. Please check the inferred types and adjust if necessary using + adata.var['feature_type']['feature1']='corrected_type'. + Be aware that not all features stored numerically are of 'numeric' type, as categorical features might be stored in a numerically encoded format. + For example, a feature with values [0, 1, 2] might be a categorical feature with three categories. This is accounted for in the method, but it is + recommended to check the inferred types. + + Args: + adata: :class:`~anndata.AnnData` object storing the EHR data. + layer: The layer to use from the AnnData object. If None, the X layer is used. + output: The output format. Choose between 'tree', 'dataframe', or None. If 'tree', the feature types will be printed to the console in a tree format. + If 'dataframe', a pandas DataFrame with the feature types will be returned. If None, nothing will be returned. + verbose: Whether to print warnings for uncertain feature types. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=False) + >>> ep.ad.infer_feature_types(adata) + """ + from ehrapy.anndata.anndata_ext import anndata_to_df + + feature_types = {} + uncertain_features = [] + + df = anndata_to_df(adata, layer=layer) + for feature in adata.var_names: + if ( + FEATURE_TYPE_KEY in adata.var.keys() + and adata.var[FEATURE_TYPE_KEY][feature] is not None + and not pd.isna(adata.var[FEATURE_TYPE_KEY][feature]) + ): + feature_types[feature] = adata.var[FEATURE_TYPE_KEY][feature] + else: + feature_types[feature], raise_warning = _detect_feature_type(df[feature]) + if raise_warning: + uncertain_features.append(feature) + + adata.var[FEATURE_TYPE_KEY] = pd.Series(feature_types)[adata.var_names] + + if verbose: + logger.warning( + f"{'Features' if len(uncertain_features) >1 else 'Feature'} {str(uncertain_features)[1:-1]} {'were' if len(uncertain_features) >1 else 'was'} detected as categorical features stored numerically." + f"Please verify and correct using `ep.ad.replace_feature_types` if necessary." + ) + + logger.info( + f"Stored feature types in adata.var['{FEATURE_TYPE_KEY}']." + f" Please verify and adjust if necessary using `ep.ad.replace_feature_types`." + ) + + if output == "tree": + feature_type_overview(adata) + elif output == "dataframe": + return adata.var[FEATURE_TYPE_KEY] + elif output is not None: + raise ValueError(f"Output format {output} not recognized. Choose between 'tree', 'dataframe', or None.") + + +def check_feature_types(func): + @wraps(func) + def wrapper(adata, *args, **kwargs): + # Account for class methods that pass self as first argument + _self = None + if not isinstance(adata, AnnData) and len(args) > 0 and isinstance(args[0], AnnData): + _self = adata + adata = args[0] + args = args[1:] + + if FEATURE_TYPE_KEY not in adata.var.keys(): + infer_feature_types(adata, output=None) + logger.warning( + f"Feature types were inferred and stored in adata.var[{FEATURE_TYPE_KEY}]. Please verify using `ep.ad.feature_type_overview` and adjust if necessary using `ep.ad.replace_feature_types`." + ) + + for feature in adata.var_names: + feature_type = adata.var[FEATURE_TYPE_KEY][feature] + if ( + feature_type is not None + and (not pd.isna(feature_type)) + and feature_type not in [CATEGORICAL_TAG, NUMERIC_TAG, DATE_TAG] + ): + logger.warning( + f"Feature '{feature}' has an invalid feature type '{feature_type}'. Please correct using `ep.ad.replace_feature_types`." + ) + + if _self is not None: + return func(_self, adata, *args, **kwargs) + return func(adata, *args, **kwargs) + + return wrapper + + +@check_feature_types +def feature_type_overview(adata: AnnData): + """Print an overview of the feature types and encoding modes in the AnnData object. + + Args: + adata: The AnnData object storing the EHR data. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.ad.feature_type_overview(adata) + """ + from ehrapy.anndata.anndata_ext import anndata_to_df + + tree = Tree( + f"[b] Detected feature types for AnnData object with {len(adata.obs_names)} obs and {len(adata.var_names)} vars", + guide_style="underline2", + ) + + branch = tree.add("📅[b] Date features") + for date in sorted(adata.var_names[adata.var[FEATURE_TYPE_KEY] == DATE_TAG]): + branch.add(date) + + branch = tree.add("📐[b] Numerical features") + for numeric in sorted(adata.var_names[adata.var[FEATURE_TYPE_KEY] == NUMERIC_TAG]): + branch.add(numeric) + + branch = tree.add("🗂️[b] Categorical features") + cat_features = adata.var_names[adata.var[FEATURE_TYPE_KEY] == CATEGORICAL_TAG] + df = anndata_to_df(adata[:, cat_features]) + + if "encoding_mode" in adata.var.keys(): + unencoded_vars = adata.var.loc[cat_features, "unencoded_var_names"].unique().tolist() + + for unencoded in sorted(unencoded_vars): + if unencoded in adata.var_names: + branch.add(f"{unencoded} ({df.loc[:, unencoded].nunique()} categories)") + else: + enc_mode = adata.var.loc[adata.var["unencoded_var_names"] == unencoded, "encoding_mode"].values[0] + branch.add(f"{unencoded} ({adata.obs[unencoded].nunique()} categories); {enc_mode} encoded") + + else: + for categorical in sorted(cat_features): + branch.add(f"{categorical} ({df.loc[:, categorical].nunique()} categories)") + + print(tree) + + +def replace_feature_types(adata, features: Iterable[str], corrected_type: str): + """Correct the feature types for a list of features inplace. + + Args: + adata: :class:`~anndata.AnnData` object storing the EHR data. + features: The features to correct. + corrected_type: The corrected feature type. One of 'date', 'categorical', or 'numeric'. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.diabetes_130_fairlearn() + >>> ep.ad.infer_feature_types(adata) + >>> ep.ad.replace_feature_types(adata, ["time_in_hospital", "number_diagnoses", "num_procedures"], "numeric") + """ + if corrected_type not in [CATEGORICAL_TAG, NUMERIC_TAG, DATE_TAG]: + raise ValueError( + f"Corrected type {corrected_type} not recognized. Choose between '{DATE_TAG}', '{CATEGORICAL_TAG}', or '{NUMERIC_TAG}'." + ) + + if FEATURE_TYPE_KEY not in adata.var.keys(): + raise ValueError( + "Feature types were not inferred. Please infer feature types using 'infer_feature_types' before correcting." + ) + + if isinstance(features, str): + features = [features] + + adata.var.loc[features, FEATURE_TYPE_KEY] = corrected_type diff --git a/data/ehrapy/anndata/anndata_ext.py b/data/ehrapy/anndata/anndata_ext.py new file mode 100644 index 0000000000000000000000000000000000000000..85e53f6c45dcb2885b4b37d092b2b120597654d1 --- /dev/null +++ b/data/ehrapy/anndata/anndata_ext.py @@ -0,0 +1,717 @@ +from __future__ import annotations + +import random +from collections import OrderedDict +from string import ascii_letters +from typing import TYPE_CHECKING, Any, NamedTuple + +import numpy as np +import pandas as pd +from anndata import AnnData, concat +from lamin_utils import logger +from scanpy.get import obs_df, rank_genes_groups_df, var_df +from scipy import sparse +from scipy.sparse import issparse + +from ehrapy.anndata import check_feature_types +from ehrapy.anndata._constants import FEATURE_TYPE_KEY, NUMERIC_TAG + +if TYPE_CHECKING: + from collections.abc import Collection, Iterable, Sequence + + +class BaseDataframes(NamedTuple): + obs: pd.DataFrame + df: pd.DataFrame + + +def df_to_anndata( + df: pd.DataFrame, columns_obs_only: list[str] | None = None, index_column: str | None = None +) -> AnnData: + """Transform a given Pandas DataFrame into an AnnData object. + + Note that columns containing boolean values (either 0/1 or T(t)rue/F(f)alse) + will be stored as boolean columns whereas the other non-numerical columns will be stored as categorical values. + + Args: + df: The pandas dataframe to be transformed. + columns_obs_only: An optional list of column names that should belong to obs only and not X. + index_column: The index column of obs. This can be either a column name (or its numerical index in the DataFrame) or the index of the dataframe. + + Returns: + An AnnData object created from the given Pandas DataFrame. + + Examples: + >>> import ehrapy as ep + >>> import pandas as pd + >>> df = pd.DataFrame( + ... { + ... "patient_id": ["0", "1", "2", "3", "4"], + ... "age": [65, 72, 58, 78, 82], + ... "sex": ["M", "F", "F", "M", "F"], + ... } + ... ) + >>> adata = ep.ad.df_to_anndata(df, index_column="patient_id") + """ + # Check and handle the overlap of index_column in columns_obs_only + if index_column is not None: + if isinstance(index_column, int): + if index_column >= len(df.columns): + raise IndexError("index_column integer index is out of bounds.") + index_column = df.columns[index_column] + if not df.index.name or df.index.name != index_column: + if index_column in df.columns: + df.set_index(index_column, inplace=True) + else: + raise ValueError(f"Column {index_column} not found in DataFrame.") + + # Now handle columns_obs_only with consideration of the new index + if columns_obs_only: + if index_column in columns_obs_only: + columns_obs_only.remove(index_column) + missing_cols = [col for col in columns_obs_only if col not in df.columns] + if missing_cols: + raise ValueError(f"Columns {missing_cols} specified in columns_obs_only are not in the DataFrame.") + obs = df.loc[:, columns_obs_only].copy() + df.drop(columns=columns_obs_only, inplace=True, errors="ignore") + else: + obs = pd.DataFrame(index=df.index) + + for col in obs.columns: + if obs[col].dtype == "bool": + obs[col] = obs[col].astype(bool) + elif obs[col].dtype == "object": + obs[col] = obs[col].astype("category") + + # Prepare the AnnData object + X = df.to_numpy(copy=True) + obs.index = obs.index.astype(str) + var = pd.DataFrame(index=df.columns) + var.index = var.index.astype(str) + uns = OrderedDict() # type: ignore + + # Handle dtype of X based on presence of numerical columns only + all_numeric = df.select_dtypes(include=[np.number]).shape[1] == df.shape[1] + X = X.astype(np.float32 if all_numeric else object) + + adata = AnnData(X=X, obs=obs, var=var, uns=uns, layers={"original": X.copy()}) + adata.obs_names = adata.obs_names.astype(str) + adata.var_names = adata.var_names.astype(str) + + return adata + + +def anndata_to_df( + adata: AnnData, + layer: str = None, + obs_cols: Iterable[str] | str | None = None, + var_cols: Iterable[str] | str | None = None, +) -> pd.DataFrame: + """Transform an AnnData object to a Pandas DataFrame. + + Args: + adata: The AnnData object to be transformed into a pandas DataFrame + layer: The layer to access the values of. If not specified, it uses the `X` matrix. + obs_cols: The columns of `obs` to add to the DataFrame. + var_cols: The columns of `var` to fetch values from. + + Returns: + The AnnData object as a pandas DataFrame + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> df = ep.ad.anndata_to_df(adata) + """ + if layer is not None: + X = adata.layers[layer] + else: + X = adata.X + if issparse(X): # pragma: no cover + X = X.toarray() + + df = pd.DataFrame(X, columns=list(adata.var_names)) + if obs_cols: + if len(adata.obs.columns) == 0: + raise ValueError("Cannot slice columns from empty obs!") + if isinstance(obs_cols, str): + obs_cols = list(obs_cols) + if isinstance(obs_cols, list): # pragma: no cover + obs_slice = adata.obs[obs_cols] + # reset index needed since we slice all or at least some columns from obs DataFrame + obs_slice = obs_slice.reset_index(drop=True) + df = pd.concat([df, obs_slice], axis=1) + if var_cols: + if len(adata.var.columns) == 0: + raise ValueError("Cannot slice columns from empty var!") + if isinstance(var_cols, str): + var_cols = list(var_cols) + if isinstance(var_cols, list): + var_slice = adata.var[var_cols] + # reset index needed since we slice all or at least some columns from var DataFrame + var_slice = var_slice.reset_index(drop=True) + df = pd.concat([df, var_slice], axis=1) + + return df + + +def move_to_obs(adata: AnnData, to_obs: list[str] | str, copy_obs: bool = False) -> AnnData: + """Move inplace or copy features from X to obs. + + Note that columns containing boolean values (either 0/1 or True(true)/False(false)) + will be stored as boolean columns whereas the other non-numerical columns will be stored as categorical. + + Args: + adata: The AnnData object + to_obs: The columns to move to obs + copy_obs: The values are copied to obs (and therefore kept in X) instead of moved completely + + Returns: + The original AnnData object with moved or copied columns from X to obs + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.ad.move_to_obs(adata, ["age"], copy_obs=False) + """ + if isinstance(to_obs, str): # pragma: no cover + to_obs = [to_obs] + + # don't allow moving encoded columns as this could lead to inconsistent data in X and obs + if any(column.startswith("ehrapycat") for column in to_obs): + raise ValueError( + "Cannot move encoded columns from X to obs. Either undo encoding or remove them from the list!" + ) + + if not all(elem in adata.var_names.values for elem in to_obs): + raise ValueError( + f"Columns `{[col for col in to_obs if col not in adata.var_names.values]}` are not in var_names." + ) + + cols_to_obs_indices = adata.var_names.isin(to_obs) + + num_set = _get_var_indices_for_type(adata, NUMERIC_TAG) + var_num = list(set(to_obs) & set(num_set)) + + if copy_obs: + cols_to_obs = adata[:, cols_to_obs_indices].to_df() + adata.obs = adata.obs.join(cols_to_obs) + adata.obs[var_num] = adata.obs[var_num].apply(pd.to_numeric, downcast="float") + + adata.obs = _cast_obs_columns(adata.obs) + else: + df = adata[:, cols_to_obs_indices].to_df() + adata._inplace_subset_var(~cols_to_obs_indices) + adata.obs = adata.obs.join(df) + adata.obs[var_num] = adata.obs[var_num].apply(pd.to_numeric, downcast="float") + adata.obs = _cast_obs_columns(adata.obs) + + return adata + + +@check_feature_types +def _get_var_indices_for_type(adata: AnnData, tag: str) -> list[str]: + """Get indices of columns in var for a given tag. + + Args: + adata: The AnnData object + tag: The tag to search for, should be one of 'CATEGORIGAL_TAG', 'NUMERIC_TAG', 'DATE_TAG' + + Returns: + List of numeric columns + """ + return adata.var_names[adata.var[FEATURE_TYPE_KEY] == tag].tolist() + + +def delete_from_obs(adata: AnnData, to_delete: list[str]) -> AnnData: + """Delete features from obs. + + Args: + adata: The AnnData object + to_delete: The columns to delete from obs + + Returns: + The original AnnData object with deleted columns from obs. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.ad.move_to_obs(adata, ["age"], copy_obs=True) + >>> ep.ad.delete_from_obs(adata, ["age"]) + """ + if isinstance(to_delete, str): # pragma: no cover + to_delete = [to_delete] + + if not all(elem in adata.obs.columns.values for elem in to_delete): + raise ValueError( + f"Columns `{[col for col in to_delete if col not in adata.obs.columns.values]}` are not in obs." + ) + + adata.obs = adata.obs[adata.obs.columns[~adata.obs.columns.isin(to_delete)]] + + return adata + + +def move_to_x(adata: AnnData, to_x: list[str] | str, copy_x: bool = False) -> AnnData: + """Move features from obs to X inplace. + + Args: + adata: The AnnData object + to_x: The columns to move to X + copy_x: The values are copied to X (and therefore kept in obs) instead of moved completely + + Returns: + A new AnnData object with moved columns from obs to X. This should not be used for datetime columns currently. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.ad.move_to_obs(adata, ["age"], copy_obs=False) + >>> new_adata = ep.ad.move_to_x(adata, ["age"]) + """ + if isinstance(to_x, str): # pragma: no cover + to_x = [to_x] + + if not all(elem in adata.obs.columns.values for elem in to_x): + raise ValueError(f"Columns `{[col for col in to_x if col not in adata.obs.columns.values]}` are not in obs.") + + cols_present_in_x = [] + cols_not_in_x = [] + + for col in to_x: + if col in set(adata.var_names): + cols_present_in_x.append(col) + else: + cols_not_in_x.append(col) + + if cols_present_in_x: + logger.warn( + f"Columns `{cols_present_in_x}` are already in X. Skipped moving `{cols_present_in_x}` to X. " + f"If you want to permanently delete these columns from obs, please use the function delete_from_obs()." + ) + + if cols_not_in_x: + new_adata = concat([adata, AnnData(adata.obs[cols_not_in_x])], axis=1) + if copy_x: + new_adata.obs = adata.obs + else: + new_adata.obs = adata.obs[adata.obs.columns[~adata.obs.columns.isin(cols_not_in_x)]] + + # AnnData's concat discards var if they don't match in their keys, so we need to create a new var + created_var = pd.DataFrame(index=cols_not_in_x) + new_adata.var = pd.concat([adata.var, created_var], axis=0) + else: + new_adata = adata + + return new_adata + + +def get_column_indices(adata: AnnData, col_names: str | Iterable[str]) -> list[int]: + """Fetches the column indices in X for a given list of column names + + Args: + adata: :class:`~anndata.AnnData` object. + col_names: Column names to extract the indices for. + + Returns: + List of column indices. + """ + col_names = [col_names] if isinstance(col_names, str) else col_names + mask = np.isin(adata.var_names, col_names) + indices = np.where(mask)[0].tolist() + + return indices + + +def _assert_encoded(adata: AnnData): + try: + assert np.issubdtype(adata.X.dtype, np.number) + except AssertionError: + raise NotEncodedError("The AnnData object has not yet been encoded.") from AssertionError + + +@check_feature_types +def get_numeric_vars(adata: AnnData) -> list[str]: + """Fetches the column names for numeric variables in X. + + Args: + adata: :class:`~anndata.AnnData` object + + Returns: + List of column numeric column names + """ + _assert_encoded(adata) + + return _get_var_indices_for_type(adata, NUMERIC_TAG) + + +def assert_numeric_vars(adata: AnnData, vars: Sequence[str]): + num_vars = get_numeric_vars(adata) + + try: + assert set(vars) <= set(num_vars) + except AssertionError: + raise ValueError("Some selected vars are not numeric") from None + + +def set_numeric_vars( + adata: AnnData, values: np.ndarray, vars: Sequence[str] | None = None, copy: bool = False +) -> AnnData | None: + """Sets the numeric values in given column names in X. + + Args: + adata: :class:`~anndata.AnnData` object + values: Matrix containing the replacement values + vars: List of names of the numeric variables to replace. If `None` they will be detected using :func:`~ehrapy.preprocessing.get_numeric_vars`. + copy: Whether to return a copy with the normalized data. + + Returns: + :class:`~anndata.AnnData` object with updated X + """ + _assert_encoded(adata) + + if vars is None: + vars = get_numeric_vars(adata) + else: + assert_numeric_vars(adata, vars) + + if not np.issubdtype(values.dtype, np.number): + raise TypeError(f"Values must be numeric (current dtype is {values.dtype})") + + n_values = values.shape[1] + + if n_values != len(vars): + raise ValueError(f"Number of values ({n_values}) does not match number of vars ({len(vars)})") + + if copy: + adata = adata.copy() + + vars_idx = get_column_indices(adata, vars) + + # if e.g. adata.X is of type int64, and values of dtype float64, the floats will be casted to int + adata.X = adata.X.astype(values.dtype) + + adata.X[:, vars_idx] = values + + return adata + + +def _detect_binary_columns(df: pd.DataFrame, numerical_columns: list[str]) -> list[str]: + """Detect all columns that contain only 0 and 1 (besides NaNs). + + Args: + df: The dataframe to check. + numerical_columns: All numerical columns of the dataframe. + + Returns: + List of column names that are binary (containing only 0 and 1 (+NaNs)) + """ + binary_columns = [] + for column in numerical_columns: + # checking for float and int as well as NaNs (this is safe since checked columns are numericals only) + # only columns that contain at least one 0 and one 1 are counted as binary (or 0.0/1.0) + if df[column].isin([0.0, 1.0, np.nan, 0, 1]).all() and df[column].nunique() == 2: + binary_columns.append(column) + + return binary_columns + + +def _cast_obs_columns(obs: pd.DataFrame) -> pd.DataFrame: + """Cast non numerical obs columns to either category or bool. + Args: + obs: Obs of an AnnData object + + Returns: + The type casted obs. + """ + # only cast non numerical columns + object_columns = list(obs.select_dtypes(exclude=["number", "category", "bool"]).columns) + # type cast each non-numerical column to either bool (if possible) or category else + obs[object_columns] = obs[object_columns].apply( + lambda obs_name: obs_name.astype("category") + if not set(pd.unique(obs_name)).issubset({False, True, np.nan}) + else obs_name.astype("bool"), + axis=0, + ) + return obs + + +def generate_anndata( # pragma: no cover + shape: tuple[int, int], + X_type=sparse.csr_matrix, + X_dtype=np.float32, + obsm_types: Collection = (sparse.csr_matrix, np.ndarray, pd.DataFrame), + varm_types: Collection = (sparse.csr_matrix, np.ndarray, pd.DataFrame), + layers_types: Collection = (sparse.csr_matrix, np.ndarray, pd.DataFrame), + include_nlp: bool = False, +) -> AnnData: + """Generates a predefined AnnData with random values. + + Args: + shape: Shape of the X matrix. + X_type: Type of the X matrix. + X_dtype: Data type of the X matrix. + obsm_types: Types of the obsm matrices. + varm_types: Types of the varm matrices. + layers_types: Types of additional layers. + include_nlp: Whether to include diseases for NLP in all of X, obs and var. + Sets the X_dtype to object by default and overwrites the passed X_dtype. + + Returns: + A specified AnnData object. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.ad.generate_anndata((2, 2), include_nlp=True) + """ + example_diseases: list[str] = ["diabetes melitus", "breast cancer", "dementia", "pneumonia"] + + M, N = shape + obs_names = pd.Index(f"patient{i}" for i in range(shape[0])) + var_names = pd.Index(f"feature{i}" for i in range(shape[1])) + + def _generate_typed_df(n_values, index=None, nlp: bool = False) -> pd.DataFrame: + """Generates a typed DataFrame with categoricals and numericals. + + Args: + n_values: Number of values to generate per type. + index: Name of the index column. + nlp: Whether to include disease names. + + Returns: + Pandas DataFrame with the specified number of values. + """ + letters = np.fromiter(iter(ascii_letters), "U1") + if n_values > len(letters): + letters = letters[: n_values // 2] # Make sure categories are repeated + df = pd.DataFrame( + { + "cat": pd.Categorical(np.random.choice(letters, n_values)), + "cat_ordered": pd.Categorical(np.random.choice(letters, n_values), ordered=True), + "int64": np.random.randint(-50, 50, n_values), + "float64": np.random.random(n_values), + "uint8": np.random.randint(255, size=n_values, dtype="uint8"), + }, + index=index, + ) + + if nlp: + df["nlp"] = random.sample(example_diseases, k=n_values) + + return df + + obs = _generate_typed_df(M, obs_names, nlp=include_nlp) + var = _generate_typed_df(N, var_names, nlp=include_nlp) + + obs.rename(columns={"cat": "obs_cat"}, inplace=True) + var.rename(columns={"cat": "var_cat"}, inplace=True) + + if X_type is None: + X = None + else: + if include_nlp: + X_np_array = np.random.binomial(100, 0.005, (M, N - 1)).astype(object) + X = np.append(X_np_array, [[el] for el in random.sample(example_diseases, k=M)], axis=1) + else: + X_np_array = np.random.binomial(100, 0.005, (M, N)) + X = X_type(X_np_array).astype(X_dtype) + + obsm = { + "array": np.random.random((M, 50)), + "sparse": sparse.random(M, 100, format="csr"), + "df": _generate_typed_df(M, obs_names), + } + obsm = {k: v for k, v in obsm.items() if type(v) in obsm_types} + varm = { + "array": np.random.random((N, 50)), + "sparse": sparse.random(N, 100, format="csr"), + "df": _generate_typed_df(N, var_names), + } + varm = {k: v for k, v in varm.items() if type(v) in varm_types} + layers = {"array": np.random.random((M, N)), "sparse": sparse.random(M, N, format="csr")} + layers = {k: v for k, v in layers.items() if type(v) in layers_types} + obsp = {"array": np.random.random((M, M)), "sparse": sparse.random(M, M, format="csr")} + varp = {"array": np.random.random((N, N)), "sparse": sparse.random(N, N, format="csr")} + + def _generate_vstr_recarray(m, n, dtype=None): + size = m * n + lengths = np.random.randint(3, 5, size) + letters = np.array(list(ascii_letters)) + gen_word = lambda w: "".join(np.random.choice(letters, w)) + arr = np.array([gen_word(length) for length in lengths]).reshape(m, n) + + return pd.DataFrame(arr, columns=[gen_word(5) for _ in range(n)]).to_records(index=False, column_dtypes=dtype) + + uns = { + "O_recarray": _generate_vstr_recarray(N, 5), + "nested": { + "scalar_str": "str", + "scalar_int": 42, + "scalar_float": 3.0, + "nested_further": {"array": np.arange(5)}, + }, + } + + if include_nlp: + X_dtype = np.dtype(object) + + adata = AnnData( + X=X, + obs=obs, + var=var, + obsm=obsm, + varm=varm, + layers=layers, + obsp=obsp, + varp=varp, + uns=uns, + ) + + return adata + + +def get_obs_df( # pragma: no cover + adata: AnnData, + keys: Iterable[str] = (), + obsm_keys: Iterable[tuple[str, int]] = (), + *, + layer: str = None, + features: str = None, +): + """Return values for observations in adata. + + Args: + adata: AnnData object to get values from. + keys: Keys from either `.var_names`, `.var[gene_symbols]`, or `.obs.columns`. + obsm_keys: Tuple of `(key from obsm, column index of obsm[key])`. + layer: Layer of `adata`. + features: Column of `adata.var` to search for `keys` in. + + Returns: + A dataframe with `adata.obs_names` as index, and values specified by `keys` and `obsm_keys`. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ages = ep.ad.get_obs_df(adata, keys=["age"]) + """ + return obs_df(adata=adata, keys=keys, obsm_keys=obsm_keys, layer=layer, gene_symbols=features) + + +def get_var_df( # pragma: no cover + adata: AnnData, + keys: Iterable[str] = (), + varm_keys: Iterable[tuple[str, int]] = (), + *, + layer: str = None, +): + """Return values for observations in adata. + + Args: + adata: AnnData object to get values from. + keys: Keys from either `.obs_names`, or `.var.columns`. + varm_keys: Tuple of `(key from varm, column index of varm[key])`. + layer: Layer of `adata`. + + Returns: + A dataframe with `adata.var_names` as index, and values specified by `keys` and `varm_keys`. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> four_patients = ep.ad.get_var_df(adata, keys=["0", "1", "2", "3"]) + """ + return var_df(adata=adata, keys=keys, varm_keys=varm_keys, layer=layer) + + +def get_rank_features_df( + adata: AnnData, + group: str | Iterable[str], + *, + key: str = "rank_features_groups", + pval_cutoff: float | None = None, + log2fc_min: float | None = None, + log2fc_max: float | None = None, + features: str | None = None, +): + """:func:`ehrapy.tl.rank_features_groups` results in the form of a :class:`~pandas.DataFrame`. + + Args: + adata: AnnData object to get values from. + group: Which group (as in :func:`ehrapy.tl.rank_genes_groups`'s `groupby` argument) + to return results from. Can be a list. All groups are returned if groups is `None`. + key: Key differential groups were stored under. + pval_cutoff: Return only adjusted p-values below the cutoff. + log2fc_min: Minimum logfc to return. + log2fc_max: Maximum logfc to return. + features: Column name in `.var` DataFrame that stores gene symbols. + Specifying this will add that column to the returned DataFrame. + + Returns: + A Pandas DataFrame of all rank genes groups results. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.tl.rank_features_groups(adata, "service_unit") + >>> df = ep.ad.get_rank_features_df(adata, group="FICU") + """ + return rank_genes_groups_df( + adata=adata, + group=group, + key=key, + pval_cutoff=pval_cutoff, + log2fc_min=log2fc_min, + log2fc_max=log2fc_max, + gene_symbols=features, + ) + + +class NotEncodedError(AssertionError): + pass + + +def _are_ndarrays_equal(arr1: np.ndarray, arr2: np.ndarray) -> np.bool_: + """Check if two arrays are equal member-wise. + + Note: Two NaN are considered equal. + + Args: + arr1: First array to compare + arr2: Second array to compare + + Returns: + True if the two arrays are equal member-wise + """ + return np.all(np.equal(arr1, arr2, dtype=object) | ((arr1 != arr1) & (arr2 != arr2))) + + +def _is_val_missing(data: np.ndarray) -> np.ndarray[Any, np.dtype[np.bool_]]: + """Check if values in a AnnData matrix are missing. + + Args: + data: The AnnData matrix to check + + Returns: + An array of bool representing the missingness of the original data, with the same shape + """ + return np.isin(data, [None, ""]) | (data != data) + + +def _to_dense_matrix(adata: AnnData, layer: str | None = None) -> np.ndarray: # pragma: no cover + """Extract a layer from an AnnData object and convert it to a dense matrix if required. + + Args: + adata: The AnnData where to extract the layer from. + layer: Name of the layer to extract. If omitted, X is considered. + + Returns: + The layer as a dense matrix. If a conversion was required, this function returns a copy of the original layer, + othersize this function returns a reference. + """ + from scipy.sparse import issparse + + if layer is None: + return adata.X.toarray() if issparse(adata.X) else adata.X + else: + return adata.layers[layer].toarray() if issparse(adata.layers[layer]) else adata.layers[layer] diff --git a/data/ehrapy/core/__init__.py b/data/ehrapy/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/data/ehrapy/core/meta_information.py b/data/ehrapy/core/meta_information.py new file mode 100644 index 0000000000000000000000000000000000000000..b08ba1eafa7a97a89669147e006f91c41f72fec2 --- /dev/null +++ b/data/ehrapy/core/meta_information.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +import sys +from datetime import datetime + +from rich import print + +from ehrapy import __version__ + + +def print_versions(): # pragma: no cover + """Print versions of imported packages. + + Examples: + >>> import ehrapy as ep + >>> ep.print_versions() + """ + print_header() + + +def print_version_and_date(*, file=None): # pragma: no cover + """Useful for starting a notebook so you see when you started working.""" + if file is None: + file = sys.stdout + print( + f"Running ehrapy {__version__}, " f"on {datetime.now():%Y-%m-%d %H:%M}.", + file=file, + ) + + +def print_header(*, file=None): # pragma: no cover + """Versions that might influence the numerical results.""" + from session_info2 import session_info + + sinfo = session_info(os=True, cpu=True, gpu=True, dependencies=True) + + if file is not None: + print(sinfo, file=file) + return + + return sinfo diff --git a/data/ehrapy/data/__init__.py b/data/ehrapy/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5ba5b17d40c1beec5f2f87dcfa5a197ba2d072d6 --- /dev/null +++ b/data/ehrapy/data/__init__.py @@ -0,0 +1,47 @@ +from ehrapy.data._datasets import ( + breast_cancer_coimbra, + breast_tissue, + cervical_cancer_risk_factors, + chronic_kidney_disease, + dermatology, + diabetes_130_fairlearn, + diabetes_130_raw, + echocardiogram, + heart_disease, + heart_failure, + hepatitis, + mimic_2, + mimic_2_preprocessed, + mimic_3_demo, + parkinson_dataset_with_replicated_acoustic_features, + parkinsons, + parkinsons_disease_classification, + parkinsons_telemonitoring, + statlog_heart, + synthea_1k_sample, + thyroid, +) + +__all__ = [ + "breast_cancer_coimbra", + "breast_tissue", + "cervical_cancer_risk_factors", + "chronic_kidney_disease", + "dermatology", + "diabetes_130_fairlearn", + "diabetes_130_raw", + "echocardiogram", + "heart_disease", + "heart_failure", + "hepatitis", + "mimic_2", + "mimic_2_preprocessed", + "mimic_3_demo", + "parkinson_dataset_with_replicated_acoustic_features", + "parkinsons", + "parkinsons_disease_classification", + "parkinsons_telemonitoring", + "statlog_heart", + "synthea_1k_sample", + "thyroid", +] diff --git a/data/ehrapy/data/_dataloader.py b/data/ehrapy/data/_dataloader.py new file mode 100644 index 0000000000000000000000000000000000000000..bbfabc267d09247557b9a00d81a5ed1f2701bc69 --- /dev/null +++ b/data/ehrapy/data/_dataloader.py @@ -0,0 +1,111 @@ +from __future__ import annotations + +import os +import shutil +import tempfile +from pathlib import Path +from random import choice +from string import ascii_lowercase +from typing import Literal + +import requests +from filelock import FileLock +from rich import print +from rich.progress import Progress + + +def download( + url: str, + archive_format: Literal["zip", "tar", "tar.gz", "tgz"] = None, + output_file_name: str = None, + output_path: str | Path = None, + block_size: int = 1024, + overwrite: bool = False, +) -> None: # pragma: no cover + """Downloads a file irrespective of format. + + Args: + url: URL to download. + archive_format: The format if an archive file. + output_file_name: Name of the downloaded file. + output_path: Path to download/extract the files to. Defaults to 'OS tmpdir' if not specified. + block_size: Block size for downloads in bytes. + overwrite: Whether to overwrite existing files. + """ + if output_file_name is None: + letters = ascii_lowercase + output_file_name = f"ehrapy_tmp_{''.join(choice(letters) for _ in range(10))}" + + if output_path is None: + output_path = tempfile.gettempdir() + + def _sanitize_file_name(file_name): + if os.name == "nt": + file_name = file_name.replace("?", "_").replace("*", "_") + return file_name + + download_to_path = Path( + _sanitize_file_name( + f"{output_path}{output_file_name}" + if str(output_path).endswith("/") + else f"{output_path}/{output_file_name}" + ) + ) + + Path(output_path).mkdir(parents=True, exist_ok=True) + lock_path = f"{download_to_path}.lock" + with FileLock(lock_path): + if download_to_path.exists(): + warning = f"[bold red]File {download_to_path} already exists!" + if not overwrite: + print(warning) + return + else: + print(f"{warning} Overwriting...") + + response = requests.get(url, stream=True) + total = int(response.headers.get("content-length", 0)) + + temp_file_name = f"{download_to_path}.part" + + with Progress(refresh_per_second=1500) as progress: + task = progress.add_task("[red]Downloading...", total=total) + with Path(temp_file_name).open("wb") as file: + for data in response.iter_content(block_size): + file.write(data) + progress.update(task, advance=block_size) + + # force the progress bar to 100% at the end + progress.update(task, completed=total, refresh=True) + + Path(temp_file_name).replace(download_to_path) + + if archive_format: + output_path = output_path or tempfile.gettempdir() + shutil.unpack_archive(download_to_path, output_path, format=archive_format) + download_to_path.unlink() + list_of_paths = [path for path in Path(output_path).resolve().glob("*/") if not path.name.startswith(".")] + latest_path = max(list_of_paths, key=lambda path: path.stat().st_ctime) + shutil.move(latest_path, latest_path.parent / remove_archive_extension(output_file_name)) # type: ignore + + Path(lock_path).unlink(missing_ok=True) + + +def remove_archive_extension(file_path): + return ( + str(Path(file_path).with_suffix("")) + if any( + Path(file_path).suffix.endswith(ext) + for ext in [ + ".zip", + ".tar", + ".tar.gz", + ".tgz", + ".tar.bz2", + ".tbz2", + ".tar.xz", + ".txz", + ] + ) + else file_path + ) diff --git a/data/ehrapy/data/_datasets.py b/data/ehrapy/data/_datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..7957d17cf54fe288c30335fa0728b95b22aace2c --- /dev/null +++ b/data/ehrapy/data/_datasets.py @@ -0,0 +1,756 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from ehrapy import ehrapy_settings +from ehrapy.anndata import anndata_to_df, df_to_anndata, infer_feature_types, replace_feature_types +from ehrapy.anndata._constants import CATEGORICAL_TAG, DATE_TAG, FEATURE_TYPE_KEY, NUMERIC_TAG +from ehrapy.io._read import read_csv, read_fhir, read_h5ad +from ehrapy.preprocessing._encoding import encode + +if TYPE_CHECKING: + import pandas as pd + from anndata import AnnData + + +def mimic_2( + encoded: bool = False, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, +) -> AnnData: + """Loads the MIMIC-II dataset. + + More details: https://physionet.org/content/mimic2-iaccd/1.0/ + + Args: + encoded: Whether to return an already encoded object + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the MIMIC-II dataset + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + """ + adata = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/ehrapy_mimic2.csv", + download_dataset_name="ehrapy_mimic2.csv", + backup_url="https://www.physionet.org/files/mimic2-iaccd/1.0/full_cohort_data.csv?download", + columns_obs_only=columns_obs_only, + ) + if encoded: + infer_feature_types(adata, output=None, verbose=False) + replace_feature_types(adata, "hour_icu_intime", NUMERIC_TAG) + return encode(adata, autodetect=True) + + return adata + + +def mimic_2_preprocessed() -> AnnData: + """Loads the preprocessed MIMIC-II dataset. + + More details: https://physionet.org/content/mimic2-iaccd/1.0/ + + The dataset was preprocessed according to: https://github.com/theislab/ehrapy-datasets/tree/main/mimic_2 + + Returns: + :class:`~anndata.AnnData` object of the prprocessed MIMIC-II dataset + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2_preprocessed() + """ + adata = read_h5ad( + dataset_path=f"{ehrapy_settings.datasetdir}/ehrapy_mimic2.csv", + download_dataset_name="ehrapy_mimic_2_preprocessed.h5ad", + backup_url="https://figshare.com/ndownloader/files/39727936", + ) + + return adata + + +def mimic_3_demo( + anndata: bool = False, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, +) -> dict[str, AnnData] | dict[str, pd.DataFrame]: + """Loads the MIMIC-III demo dataset as a dictionary of Pandas DataFrames. + + The MIMIC-III dataset comes in the form of 26 CSV tables. Although, it is possible to return one AnnData object per + csv table, it might be easier to start with Pandas DataFrames to aggregate the desired measurements with Pandas SQL. + https://github.com/yhat/pandasql/ might be useful. + The resulting DataFrame can then be transformed into an AnnData object with :func:`~ehrapy.anndata.df_to_anndata`. + + Args: + anndata: Whether to return one AnnData object per CSV file. + columns_obs_only: Columns to include in obs only and not X. + + Returns: + A dictionary of AnnData objects or a dictionary of Pandas DataFrames + + Examples: + >>> import ehrapy as ep + >>> dfs = ep.dt.mimic_3_demo() + """ + data = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/ehrapy_mimic_3", + download_dataset_name="ehrapy_mimic_3", + backup_url="https://physionet.org/static/published-projects/mimiciii-demo/mimic-iii-clinical-database-demo-1.4.zip", + return_dfs=False if anndata else True, + columns_obs_only=columns_obs_only, + archive_format="zip", + ) + + return data + + +def heart_failure(encoded: bool = False, columns_obs_only: dict[str, list[str]] | list[str] | None = None) -> AnnData: + """Loads the heart failure dataset. + + More details: http://archive.ics.uci.edu/ml/datasets/Heart+failure+clinical+records + + Preprocessing: https://github.com/theislab/ehrapy-datasets/tree/main/heart_failure + + This dataset only contains numericals and therefore does not need any encoding. + + Args: + encoded: Whether to return an already encoded object + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the heart failure dataset + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.heart_failure(encoded=True) + """ + adata = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/heart_failure.csv", + download_dataset_name="heart_failure.csv", + backup_url="https://figshare.com/ndownloader/files/33952934", + columns_obs_only=columns_obs_only, + index_column="patient_id", + ) + if encoded: + infer_feature_types(adata, output=None, verbose=False) + return encode(adata, autodetect=True) + + return adata + + +def diabetes_130_raw( + encoded: bool = False, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, +) -> AnnData: + """Loads the raw diabetes-130 dataset + + More details: http://archive.ics.uci.edu/ml/datasets/Diabetes+130-US+hospitals+for+years+1999-2008 [1] + + Preprocessing: None except for the data preparation outlined on the link above. + + Args: + encoded: Whether to return an already encoded object + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the Diabetes 130 dataset + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.diabetes_130_raw(encoded=True) + + References: + [1] Beata Strack, Jonathan P. DeShazo, Chris Gennings, Juan L. Olmo, Sebastian Ventura, Krzysztof J. Cios, and John N. Clore, “Impact of HbA1c Measurement on Hospital Readmission Rates: Analysis of 70,000 Clinical Database Patient Records,” BioMed Research International, vol. 2014, Article ID 781670, 11 pages, 2014. + """ + + adata = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/diabetes_130_raw.csv", + download_dataset_name="diabetes_130_raw.csv", + backup_url="https://figshare.com/ndownloader/files/45110029", + columns_obs_only=columns_obs_only, + ) + if encoded: + infer_feature_types(adata, output=None, verbose=False) + replace_feature_types( + adata, ["admission_source_id", "discharge_disposition_id", "encounter_id", "patient_nbr"], CATEGORICAL_TAG + ) + replace_feature_types(adata, ["num_procedures", "number_diagnoses", "time_in_hospital"], NUMERIC_TAG) + return encode(adata, autodetect=True) + + return adata + + +def diabetes_130_fairlearn( + encoded: bool = False, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, +) -> AnnData: + """Loads the preprocessed diabetes-130 dataset by fairlearn + + This loads the dataset from the `fairlearn.datasets.fetch_diabetes_hospital` function. + + More details: http://archive.ics.uci.edu/ml/datasets/Diabetes+130-US+hospitals+for+years+1999-2008 [1] + + Preprocessing: https://fairlearn.org/v0.10/api_reference/generated/fairlearn.datasets.fetch_diabetes_hospital.html#fairlearn.datasets.fetch_diabetes_hospital [2] + + Args: + encoded: Whether to return an already encoded object + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the diabetes-130 dataset processed by the fairlearn team + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.diabetes_130_fairlearn() + + References: + [1] Beata Strack, Jonathan P. DeShazo, Chris Gennings, Juan L. Olmo, Sebastian Ventura, Krzysztof J. Cios, and John N. Clore, “Impact of HbA1c Measurement on Hospital Readmission Rates: Analysis of 70,000 Clinical Database Patient Records,” BioMed Research International, vol. 2014, Article ID 781670, 11 pages, 2014. + + [2] Bird, S., Dudík, M., Edgar, R., Horn, B., Lutz, R., Milan, V., ... & Walker, K. (2020). Fairlearn: A toolkit for assessing and improving fairness in AI. Microsoft, Tech. Rep. MSR-TR-2020-32. + """ + adata = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/diabetes_130_fairlearn.csv", + download_dataset_name="diabetes_130_fairlearn.csv", + backup_url="https://figshare.com/ndownloader/files/45110371", + columns_obs_only=columns_obs_only, + ) + + if encoded: + infer_feature_types(adata, output=None, verbose=False) + replace_feature_types(adata, ["time_in_hospital", "number_diagnoses", "num_procedures"], NUMERIC_TAG) + return encode(adata, autodetect=True) + + return adata + + +def chronic_kidney_disease( + encoded: bool = False, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, +) -> AnnData: # pragma: no cover + """Loads the Chronic Kidney Disease dataset + + More details: https://archive.ics.uci.edu/ml/datasets/Chronic_Kidney_Disease + + Preprocessing: https://github.com/theislab/ehrapy-datasets/tree/main/chronic_kidney_disease/chronic_kidney_disease.ipynb + + Args: + encoded: Whether to return an already encoded object + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the Chronic Kidney Disease dataset + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.chronic_kidney_disease(encoded=True) + """ + adata = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/chronic_kidney_disease.csv", + download_dataset_name="chronic_kidney_disease.csv", + backup_url="https://figshare.com/ndownloader/files/33989261", + columns_obs_only=columns_obs_only, + index_column="Patient_id", + ) + if encoded: + infer_feature_types(adata, output=None, verbose=False) + return encode(adata, autodetect=True) + + return adata + + +def breast_tissue( + encoded: bool = False, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, +) -> AnnData: + """Loads the Breast Tissue Data Set + + More details: http://archive.ics.uci.edu/ml/datasets/Breast+Tissue + + Preprocessing: https://github.com/theislab/ehrapy-datasets/blob/main/breast_tissue/breast_tissue.ipynb + + Args: + encoded: Whether to return an already encoded object + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the Breast Tissue Data Set + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.breast_tissue(encoded=True) + """ + adata = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/breast_tissue.csv", + download_dataset_name="breast_tissue.csv", + backup_url="https://figshare.com/ndownloader/files/34179264", + columns_obs_only=columns_obs_only, + index_column="patient_id", + ) + if encoded: + infer_feature_types(adata, output=None, verbose=False) + return encode(adata, autodetect=True) + + return adata + + +def cervical_cancer_risk_factors( + encoded: bool = False, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, +) -> AnnData: + """Loads the Cervical cancer (Risk Factors) Data Set + + More details: http://archive.ics.uci.edu/ml/datasets/Cervical+cancer+%28Risk+Factors%29 + Preprocessing: https://github.com/theislab/ehrapy-datasets/blob/main/cervical_cancer_risk_factors/cervical_cancer_risk_factors.ipynb + + Args: + encoded: Whether to return an already encoded object + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the Cervical cancer (Risk Factors) Data Set + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.cervical_cancer_risk_factors(encoded=True) + """ + adata = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/cervical_cancer_risk_factors.csv", + download_dataset_name="cervical_cancer_risk_factors.csv", + backup_url="https://figshare.com/ndownloader/files/34179291", + columns_obs_only=columns_obs_only, + index_column="patient_id", + ) + if encoded: + infer_feature_types(adata, output=None, verbose=False) + replace_feature_types(adata, ["STDs (number)", "STDs: Number of diagnosis"], NUMERIC_TAG) + return encode(adata, autodetect=True) + + return adata + + +def dermatology( + encoded: bool = False, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, +) -> AnnData: + """Loads the Dermatology Data Set + + More details: http://archive.ics.uci.edu/ml/datasets/Dermatology + + Preprocessing: https://github.com/theislab/ehrapy-datasets/blob/main/dermatology/dermatology.ipynb + + Args: + encoded: Whether to return an already encoded object + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the Dermatology Data Set + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.dermatology(encoded=True) + """ + adata = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/dermatology.csv", + download_dataset_name="dermatology.csv", + backup_url="https://figshare.com/ndownloader/files/34179300", + columns_obs_only=columns_obs_only, + index_column="patient_id", + ) + if encoded: + infer_feature_types(adata, output=None, verbose=False) + return encode(adata, autodetect=True) + + return adata + + +def echocardiogram( + encoded: bool = False, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, +) -> AnnData: + """Loads the Echocardiogram Data Set + + More details: http://archive.ics.uci.edu/ml/datasets/Echocardiogram + + Preprocessing: https://github.com/theislab/ehrapy-datasets/blob/main/echocardiogram/echocardiogram.ipynb + + Args: + encoded: Whether to return an already encoded object + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the Echocardiogram Data Set + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.echocardiogram(encoded=True) + """ + adata = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/echocardiogram.csv", + download_dataset_name="echocardiogram.csv", + backup_url="https://figshare.com/ndownloader/files/34179306", + columns_obs_only=columns_obs_only, + index_column="patient_id", + ) + if encoded: + infer_feature_types(adata, output=None, verbose=False) + return encode(adata, autodetect=True) + + return adata + + +def hepatitis( + encoded: bool = False, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, +) -> AnnData: + """Loads the Hepatitis Data Set + + More details: http://archive.ics.uci.edu/ml/datasets/Hepatitis + Preprocessing: https://github.com/theislab/ehrapy-datasets/blob/main/hepatitis/hepatitis.ipynb + + Args: + encoded: Whether to return an already encoded object + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the Hepatitis Data Set + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.hepatitis(encoded=True) + """ + adata = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/hepatitis.csv", + download_dataset_name="hepatitis.csv", + backup_url="https://figshare.com/ndownloader/files/34179318", + columns_obs_only=columns_obs_only, + index_column="patient_id", + ) + if encoded: + infer_feature_types(adata, output=None, verbose=False) + return encode(adata, autodetect=True) + + return adata + + +def statlog_heart( + encoded: bool = False, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, +) -> AnnData: + """Loads the Statlog (Heart) Data Set + + More details: http://archive.ics.uci.edu/ml/datasets/Statlog+%28Heart%29 + + Preprocessing: https://github.com/theislab/ehrapy-datasets/blob/main/statlog_heart/statlog_heart.ipynb + + Args: + encoded: Whether to return an already encoded object + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the Statlog (Heart) Data Set + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.statlog_heart(encoded=True) + """ + adata = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/statlog_heart.csv", + download_dataset_name="statlog_heart.csv", + backup_url="https://figshare.com/ndownloader/files/34179327", + columns_obs_only=columns_obs_only, + index_column="patient_id", + ) + if encoded: + infer_feature_types(adata, output=None, verbose=False) + replace_feature_types(adata, "number of major vessels", NUMERIC_TAG) + return encode(adata, autodetect=True) + + return adata + + +def thyroid( + encoded: bool = False, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, +) -> AnnData: + """Loads the Thyroid Data Set + + More details: http://archive.ics.uci.edu/ml/datasets/Thyroid+Disease + Preprocessing: https://github.com/theislab/ehrapy-datasets/blob/main/thyroid/thyroid.ipynb + + Args: + encoded: Whether to return an already encoded object. + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the Thyroid Data Set + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.thyroid(encoded=True) + """ + adata: AnnData = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/thyroid.csv", + download_dataset_name="thyroid.csv", + backup_url="https://figshare.com/ndownloader/files/34179333", + columns_obs_only=columns_obs_only, + index_column="patient_id", + ) + if encoded: + infer_feature_types(adata, output=None, verbose=False) + return encode(adata, autodetect=True) + + return adata + + +def breast_cancer_coimbra( + encoded: bool = False, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, +) -> AnnData: + """Loads the Breast Cancer Coimbra Data Set + + More details: http://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Coimbra + + Preprocessing: https://github.com/theislab/ehrapy-datasets/blob/main/breast_cancer_coimbra/breast_cancer_coimbra.ipynb + + Args: + encoded: Whether to return an already encoded object. + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the Breast Cancer Coimbra Data Set + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.breast_cancer_coimbra(encoded=True) + """ + adata: AnnData = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/breast_cancer_coimbra.csv", + download_dataset_name="breast_cancer_coimbra.csv", + backup_url="https://figshare.com/ndownloader/files/34439681", + columns_obs_only=columns_obs_only, + index_column="patient_id", + ) + if encoded: + infer_feature_types(adata, output=None, verbose=False) + return encode(adata, autodetect=True) + + return adata + + +def parkinsons( + encoded: bool = False, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, +) -> AnnData: + """Loads the Parkinsons Data Set + + More details: http://archive.ics.uci.edu/ml/datasets/Parkinsons + + Preprocessing: https://github.com/theislab/ehrapy-datasets/blob/main/parkinsons/parkinsons.ipynb + + Args: + encoded: Whether to return an already encoded object. + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the Parkinsons Data Set + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.parkinsons(columns_obs_only=["name"], encoded=True) + """ + adata: AnnData = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/parkinsons.csv", + download_dataset_name="parkinsons.csv", + backup_url="https://figshare.com/ndownloader/files/34439684", + columns_obs_only=columns_obs_only, + index_column="measurement_id", + ) + if encoded: + infer_feature_types(adata, output=None, verbose=False) + return encode(adata, autodetect=True) + + return adata + + +def parkinsons_telemonitoring( + encoded: bool = False, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, +) -> AnnData: + """Loads the Parkinsons Telemonitoring Data Set + + More details: http://archive.ics.uci.edu/ml/datasets/Parkinsons+Telemonitoring + Preprocessing: https://github.com/theislab/ehrapy-datasets/blob/main/parkinsons_telemonitoring/parkinsons_telemonitoring.ipynb + + Args: + encoded: Whether to return an already encoded object. + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the Parkinsons Telemonitoring Data Set + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.parkinsons_telemonitoring(encoded=True) + """ + adata: AnnData = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/parkinsons_telemonitoring.csv", + download_dataset_name="parkinsons_telemonitoring.csv", + backup_url="https://figshare.com/ndownloader/files/34439708", + columns_obs_only=columns_obs_only, + index_column="measurement_id", + ) + if encoded: + infer_feature_types(adata, output=None, verbose=False) + return encode(adata, autodetect=True) + + return adata + + +def parkinsons_disease_classification( + encoded: bool = False, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, +) -> AnnData: + """Loads the Parkinson's Disease Classification Data Set + + More details: http://archive.ics.uci.edu/ml/datasets/Parkinson%27s+Disease+Classification + + Preprocessing: https://github.com/theislab/ehrapy-datasets/blob/main/parkinson's_disease_classification/parkinson's_disease_classification.ipynb + + Args: + encoded: Whether to return an already encoded object. + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the Parkinson's Disease Classification Data Set + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.parkinsons_disease_classification(encoded=True) + """ + adata: AnnData = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/parkinson's_disease_classification_prepared.csv", + download_dataset_name="parkinson's_disease_classification_prepared.csv", + backup_url="https://figshare.com/ndownloader/files/34439714", + columns_obs_only=columns_obs_only, + index_column="measurement_id", + ) + if encoded: + infer_feature_types(adata, output=None, verbose=False) + return encode(adata, autodetect=True) + + return adata + + +def parkinson_dataset_with_replicated_acoustic_features( + encoded: bool = False, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, +) -> AnnData: + """Loads the Parkinson Dataset with replicated acoustic features Data Set + + More details: http://archive.ics.uci.edu/ml/datasets/Parkinson+Dataset+with+replicated+acoustic+features+ + + Preprocessing: https://github.com/theislab/ehrapy-datasets/blob/main/parkinson_dataset_with_replicated_acoustic_features/parkinson_dataset_with_replicated_acoustic_features.ipynb + + Args: + encoded: Whether to return an already encoded object. + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the Parkinson Dataset with replicated acoustic features Data Set + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.parkinson_dataset_with_replicated_acoustic_features(columns_obs_only=["ID"], encoded=True) + """ + adata: AnnData = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/parkinson_dataset_with_replicated_acoustic_features.csv", + download_dataset_name="parkinson_dataset_with_replicated_acoustic_features.csv", + backup_url="https://figshare.com/ndownloader/files/34439801", + columns_obs_only=columns_obs_only, + index_column="measurement_id", + ) + if encoded: + infer_feature_types(adata, output=None, verbose=False) + return encode(adata, autodetect=True) + + return adata + + +def heart_disease( + encoded: bool = False, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, +) -> AnnData: + """Loads the Heart Disease Data Set + + More details: http://archive.ics.uci.edu/ml/datasets/Heart+Disease + + Preprocessing: https://github.com/theislab/ehrapy-datasets/blob/main/heart_disease/heart_disease.ipynb + + Args: + encoded: Whether to return an already encoded object. + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the Heart Disease Data Set + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.heart_disease(encoded=True) + """ + adata: AnnData = read_csv( + dataset_path=f"{ehrapy_settings.datasetdir}/processed_heart_disease.csv", + download_dataset_name="processed_heart_disease.csv", + backup_url="https://figshare.com/ndownloader/files/34906647", + columns_obs_only=columns_obs_only, + index_column="patient_id", + ) + if encoded: + infer_feature_types(adata, output=None, verbose=False) + replace_feature_types(adata, ["num"], NUMERIC_TAG) + replace_feature_types(adata, ["thal"], CATEGORICAL_TAG) + return encode(adata, autodetect=True) + + return adata + + +def synthea_1k_sample( + encoded: bool = False, + columns_obs_only: list[str] | None = None, +) -> AnnData: + """Loads the 1K Sample Synthetic Patient Records Data Set. + + More details: https://synthea.mitre.org/downloads + Preprocessing: TODO: add preprocessing link + + Args: + encoded: Whether to return an already encoded object. + columns_obs_only: Columns to include in obs only and not X. + + Returns: + :class:`~anndata.AnnData` object of the 1K Sample Synthetic Patient Records Data Set + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.synthea_1k_sample(encoded=True) + """ + adata: AnnData = read_fhir( + dataset_path=f"{ehrapy_settings.datasetdir}/synthea_sample", + download_dataset_name="synthea_sample", + backup_url="https://synthetichealth.github.io/synthea-sample-data/downloads/synthea_sample_data_fhir_dstu2_sep2019.zip", + columns_obs_only=columns_obs_only, + index_column="id", + archive_format="zip", + ) + + df = anndata_to_df(adata) + df.drop( + columns=[col for col in df.columns if any(isinstance(x, list | dict) for x in df[col].dropna())], inplace=True + ) + df.drop(columns=df.columns[df.isna().all()], inplace=True) + adata = df_to_anndata(df, index_column="id") + + if encoded: + infer_feature_types(adata, output=None, verbose=False) + replace_feature_types(adata, ["resource.multipleBirthInteger", "resource.numberOfSeries"], NUMERIC_TAG) + return encode(adata, autodetect=True) + + return adata diff --git a/data/ehrapy/io/__init__.py b/data/ehrapy/io/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1497d8086608459d9470381b0a83266979628799 --- /dev/null +++ b/data/ehrapy/io/__init__.py @@ -0,0 +1,10 @@ +from ehrapy.io._read import df_to_anndata, read_csv, read_fhir, read_h5ad +from ehrapy.io._write import write + +__all__ = [ + "df_to_anndata", + "read_csv", + "read_fhir", + "read_h5ad", + "write", +] diff --git a/data/ehrapy/io/_read.py b/data/ehrapy/io/_read.py new file mode 100644 index 0000000000000000000000000000000000000000..219958fc88af27f6662289056b174ab16f1917f9 --- /dev/null +++ b/data/ehrapy/io/_read.py @@ -0,0 +1,745 @@ +from __future__ import annotations + +from _collections import OrderedDict +from pathlib import Path +from typing import TYPE_CHECKING, Literal + +import fhiry.parallel as fp +import numpy as np +import pandas as pd +from anndata import AnnData +from anndata import read as read_h5 +from lamin_utils import logger +from rich import print + +from ehrapy import ehrapy_settings, settings +from ehrapy.anndata.anndata_ext import df_to_anndata +from ehrapy.data._dataloader import download, remove_archive_extension +from ehrapy.preprocessing._encoding import encode + +if TYPE_CHECKING: + from collections.abc import Iterator + + +def read_csv( + dataset_path: Path | str, + sep: str = ",", + index_column: dict[str, str | int] | str | int | None = None, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, + columns_x_only: dict[str, list[str]] | list[str] | None = None, + return_dfs: bool = False, + cache: bool = False, + download_dataset_name: str | None = None, + backup_url: str | None = None, + archive_format: Literal["zip", "tar", "tar.gz", "tgz"] = None, + **kwargs, +) -> AnnData | dict[str, AnnData]: + """Reads or downloads a desired directory of csv/tsv files or a single csv/tsv file. + + Args: + dataset_path: Path to the file or directory to read. + sep: Separator in the file. Delegates to pandas.read_csv(). + index_column: The index column of obs. Usually the patient visit ID or the patient ID. + columns_obs_only: These columns will be added to obs only and not X. + columns_x_only: These columns will be added to X only and all remaining columns to obs. + Note that datetime columns will always be added to .obs though. + return_dfs: Whether to return one or several Pandas DataFrames. + cache: Whether to write to cache when reading or not. + download_dataset_name: Name of the file or directory after download. + backup_url: URL to download the data file(s) from, if the dataset is not yet on disk. + archive_format: Whether the downloaded file is an archive. + + Returns: + An :class:`~anndata.AnnData` object or a dict with an identifier (the filename, without extension) + for each :class:`~anndata.AnnData` object in the dict + + Examples: + >>> import ehrapy as ep + >>> adata = ep.io.read_csv("myfile.csv") + """ + _check_columns_only_params(columns_obs_only, columns_x_only) + dataset_path = Path(dataset_path) + if not dataset_path.exists(): + dataset_path = _get_non_existing_files(dataset_path, download_dataset_name, backup_url, archive_format) + + adata = _read_csv( + file_path=dataset_path, + sep=sep, + index_column=index_column, + columns_obs_only=columns_obs_only, + columns_x_only=columns_x_only, + return_dfs=return_dfs, + cache=cache, + **kwargs, + ) + return adata + + +def _read_csv( + file_path: Path, + sep: str, + index_column: dict[str, str | int] | str | int | None, + columns_obs_only: dict[str, list[str]] | list[str] | None, + columns_x_only: dict[str, list[str]] | list[str] | None, + return_dfs: bool = False, + cache: bool = False, + **kwargs, +) -> AnnData | dict[str, AnnData]: + """Internal interface of the read_csv method.""" + if cache and return_dfs: + raise CachingNotSupported("Caching is currently not supported for Pandas DataFrame objects.") + if return_dfs and (columns_x_only or columns_obs_only): + raise Warning( + "Parameters columns_x_only and columns_obs_only are not supported when returning Pandas DataFrames." + ) + + path_cache = settings.cachedir / file_path + # reading from (cache) file is separated in the read_h5ad function + if cache and (path_cache.is_dir() or path_cache.is_file()): + raise CacheExistsException( + f"{path_cache} already exists. Use the read_h5ad function instead to read from cache!" + ) + + # If the the file path is a directory, assume it is a dataset with multiple files + elif file_path.is_dir(): + return _read_from_directory( + file_path, + cache, + path_cache, + extension=sep, + index_column=index_column, + columns_obs_only=columns_obs_only, + columns_x_only=columns_x_only, + return_dfs=return_dfs, + ) + # input is a single file + else: + adata, columns_obs_only = _do_read_csv( + file_path, + sep, + index_column, # type: ignore + columns_obs_only, # type: ignore + columns_x_only, # type: ignore + cache, + **kwargs, + ) + # cache results if desired + if cache: + if not path_cache.parent.is_dir(): + path_cache.parent.mkdir(parents=True) + return _write_cache(adata, path_cache, columns_obs_only) # type: ignore + return adata + + +def read_h5ad( + dataset_path: Path | str, + backup_url: str | None = None, + download_dataset_name: str | None = None, + archive_format: Literal["zip", "tar", "tar.gz", "tgz"] = None, +) -> AnnData | dict[str, AnnData]: + """Reads or downloads a desired directory of h5ad files or a single h5ad file. + + Args: + dataset_path: Path to the file or directory to read. + backup_url: URL to download the data file(s) from if not yet existing. + download_dataset_name: Name of the file or directory in case the dataset is downloaded. + archive_format: Whether the downloaded file is an archive. + + Returns: + An :class:`~anndata.AnnData` object or a dict with an identifier (the filename, without extension) + for each :class:`~anndata.AnnData` object in the dict. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.io.write("mimic_2.h5ad", adata) + >>> adata_2 = ep.io.read_h5ad("mimic_2.h5ad") + """ + file_path: Path = Path(dataset_path) + if not file_path.exists(): + file_path = _get_non_existing_files(file_path, download_dataset_name, backup_url, archive_format=archive_format) + + if file_path.is_dir(): + adata = _read_from_directory(file_path, False, None, "h5ad") + else: + adata = _do_read_h5ad(file_path) + + return adata + + +def _read_from_directory( + file_path: Path, + cache: bool, + path_cache_dir: Path | None, + extension: str, + index_column: dict[str, str | int] | str | int | None = None, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, + columns_x_only: dict[str, list[str]] | list[str] | None = None, + return_dfs: bool = False, +) -> dict[str, AnnData] | dict[str, pd.DataFrame]: + """Parse AnnData objects or Pandas DataFrames from a directory containing the data files""" + if return_dfs: + dfs = _read_multiple_csv(file_path, sep=extension, return_dfs=True) + return dfs # type: ignore + if extension in {",", "\t"}: + adata_objects, columns_obs_only = _read_multiple_csv( # type: ignore + file_path, + sep=extension, + index_column=index_column, + columns_obs_only=columns_obs_only, + columns_x_only=columns_x_only, + return_dfs=False, + ) + # cache results + if cache: + if not path_cache_dir.parent.is_dir(): + path_cache_dir.parent.mkdir(parents=True) + path_cache_dir.mkdir() + return _write_cache_dir(adata_objects, path_cache_dir, columns_obs_only, index_column) # type: ignore + return adata_objects # type: ignore + elif extension == "h5ad": + return _read_multiple_h5ad(file_path) + else: + raise NotImplementedError(f"Reading from directory with .{extension} files is not implemented yet!") + + +def _read_multiple_csv( + file_path: Path, + sep: str, + index_column: dict[str, str | int] | str | int | None = None, + columns_obs_only: dict[str, list[str]] | list[str] | None = None, + columns_x_only: dict[str, list[str]] | list[str] | None = None, + return_dfs: bool = False, + cache: bool = False, + **kwargs, +) -> tuple[dict[str, AnnData], dict[str, list[str] | None]] | dict[str, pd.DataFrame]: + """Read a dataset containing multiple .csv/.tsv files. + + Args: + file_path: File path to the directory containing multiple .csv/.tsv files. + sep: Separator in the file. Delegates to pandas.read_csv(). + index_column: Column names of the index columns for obs + columns_obs_only: List of columns per file (AnnData object) which should only be stored in .obs, but not in X. + Useful for free text annotations. + columns_x_only: List of columns per file (AnnData object) which should only be stored in .X, but not in obs. + Datetime columns will be added to .obs regardless. + return_dfs: When set to True, return a dictionary of Pandas DataFrames. + cache: Whether to cache results or not. + kwargs: Keyword arguments for Pandas `read_csv`. + + Returns: + A Dict mapping the filename (object name) to the corresponding :class:`~anndata.AnnData` object and the columns + that are obs only for each object. + """ + obs_only_all = {} + if return_dfs: + df_dict: dict[str, pd.DataFrame] = {} + else: + anndata_dict = {} + + for file in file_path.iterdir(): + if file.is_file() and file.suffix in {".csv", ".tsv"}: + # slice off the file suffix .csv or .tsv for a clean file name + file_identifier = file.name[:-4] + if return_dfs: + df = pd.read_csv(file, sep=sep, **kwargs) + df_dict[file_identifier] = df + continue + + index_col, col_obs_only, col_x_only = _extract_index_and_columns_obs_only( + file_identifier, index_column, columns_obs_only, columns_x_only + ) + adata, single_adata_obs_only = _do_read_csv(file, sep, index_col, col_obs_only, col_x_only, cache=cache) + obs_only_all[file_identifier] = single_adata_obs_only + # obs indices have to be unique otherwise updating and working with the object will fail + if index_col: + adata.obs_names_make_unique() + + anndata_dict[file_identifier] = adata + if return_dfs: + return df_dict + else: + return anndata_dict, obs_only_all + + +def _do_read_csv( + file_path: Path | Iterator[str], + sep: str | None = ",", + index_column: str | int | None = None, + columns_obs_only: list[str] | None = None, + columns_x_only: list[str] | None = None, + cache: bool = False, + **kwargs, +) -> tuple[AnnData, list[str] | None]: + """Read `.csv` and `.tsv` file. + + Args: + file_path: File path to the csv file. + sep: Separator in the file. Delegates to pandas.read_csv(). + index_column: Index or column name of the index column (obs) + columns_obs_only: List of columns which only be stored in .obs, but not in X. Useful for free text annotations. + columns_x_only: List of columns which only be stored in X, but not in .obs. + + cache: Whether the data should be written to cache or not. + + Returns: + An :class:`~anndata.AnnData` object and the column obs only for the object. + """ + try: + if index_column and columns_obs_only and index_column in columns_obs_only: + logger.warning( + f"Index column '{index_column}' is also used as a column " + f"for obs only. Using default indices instead and moving {index_column} to column_obs_only." + ) + index_column = None + initial_df = pd.read_csv(file_path, sep=sep, index_col=index_column, **kwargs) + # in case the index column is misspelled or does not exist + except ValueError: + raise IndexNotFoundError( + f"Could not create AnnData object while reading file {file_path} . Does index_column named {index_column} " + f"exist in {file_path}?" + ) from None + + initial_df, columns_obs_only = _prepare_dataframe(initial_df, columns_obs_only, columns_x_only, cache) + + return df_to_anndata(initial_df, columns_obs_only), columns_obs_only + + +def _read_multiple_h5ad( + file_path: Path, +) -> dict[str, AnnData]: + """Read a dataset containing multiple .h5ad files. + + Args: + file_path: File path to the directory containing multiple .csv/.tsv files. + + Returns: + A dict mapping the filename (object name) to the corresponding :class:`~anndata.AnnData` object + """ + anndata_dict = {} + for file in file_path.iterdir(): + if file.is_file() and file.suffix == ".h5ad": + # slice off the file suffix .h5ad + adata_identifier = file.name[:-5] + adata = _do_read_h5ad(file) + anndata_dict[adata_identifier] = adata + return anndata_dict + + +def _do_read_h5ad(file_path: Path | Iterator[str]) -> AnnData: + """Read from a h5ad file. + Args: + file_path: Path to the h5ad file. + + Returns: + An AnnData object. + """ + import anndata as ad + + adata = ad.read_h5ad(file_path) + if "ehrapy_dummy_encoding" in adata.uns.keys(): + # if dummy encoding was needed, the original dtype of X could not be numerical, so cast it to object + adata.X = adata.X.astype("object") + decoded_adata = _decode_cached_adata(adata, list(adata.uns["columns_obs_only"])) + return decoded_adata + return adata + + +def read_fhir( + dataset_path: str, + format: Literal["json", "ndjson"] = "json", + columns_obs_only: list[str] | None = None, + columns_x_only: list[str] | None = None, + return_df: bool = False, + cache: bool = False, + backup_url: str | None = None, + index_column: str | int | None = None, + download_dataset_name: str | None = None, + archive_format: Literal["zip", "tar", "tar.gz", "tgz"] = None, +) -> pd.DataFrame | AnnData: + """Reads one or multiple FHIR files using fhiry. + + Uses https://github.com/dermatologist/fhiry to read the FHIR file into a Pandas DataFrame + which is subsequently transformed into an AnnData object. + + Be aware that FHIR data can be nested and return lists or dictionaries as values. + In such cases, one can either: + 1. Transform the data into an awkward array and flatten it when needed. + 2. Extract values from all lists and dictionaries to store single values in the fields. + 3. Remove all lists and dictionaries. Only do this if the information is not relevant to you. + + Args: + dataset_path: Path to one or multiple FHIR files. + format: The file format of the FHIR data. One of 'json' or 'ndjson'. + columns_obs_only: These columns will be added to obs only and not X. + columns_x_only: These columns will be added to X only and all remaining columns to obs. + Note that datetime columns will always be added to .obs though. + return_df: Whether to return one or several Pandas DataFrames. + cache: Whether to write to cache when reading or not. + backup_url: URL to download the data file(s) from if not yet existing. + index_column: The index column for the generated object. Usually the patient or visit ID. + download_dataset_name: Name of the file or directory in case the dataset is downloaded. + archive_format: Whether the downloaded file is an archive. + + Returns: + A Pandas DataFrame or AnnData object of the read in FHIR file(s). + + Examples: + >>> import ehrapy as ep + >>> adata = ep.io.read_fhir("/path/to/fhir/resources") + + Be aware that most FHIR datasets have nested data that might need to be removed. + In such cases consider working with DataFrames. + + >>> df = ep.io.read_fhir("/path/to/fhir/resources", return_df=True) + >>> df.drop( + ... columns=[col for col in df.columns if any(isinstance(x, (list, dict)) for x in df[col].dropna())], + ... inplace=True, + ... ) + >>> df.drop(columns=df.columns[df.isna().all()], inplace=True) + + """ + _check_columns_only_params(columns_obs_only, columns_x_only) + file_path: Path = Path(dataset_path) + if not file_path.exists(): + file_path = _get_non_existing_files(file_path, download_dataset_name, backup_url, archive_format) + + adata = _read_fhir( + file_path=str(file_path.resolve()), + format=format, + index_column=index_column, + columns_obs_only=columns_obs_only, + columns_x_only=columns_x_only, + return_df=return_df, + cache=cache, + ) + return adata + + +def _read_fhir( + file_path: str, + format: Literal["json", "ndjson"], + index_column: dict[str, str | int] | str | int | None, + columns_obs_only: list[str] | None, + columns_x_only: list[str] | None, + return_df: bool = False, + cache: bool = False, +) -> AnnData | dict[str, AnnData]: + """Internal interface of the read_fhir method.""" + if cache and return_df: + raise CachingNotSupported("Caching is currently not supported for or Pandas DataFrame objects.") + if return_df and (columns_x_only or columns_obs_only): + raise Warning( + "Parameters columns_x_only and columns_obs_only are not supported when returning Pandas DataFrames." + ) + path_cache = settings.cachedir / file_path + if cache and (path_cache.is_dir() or path_cache.is_file()): + raise CacheExistsException( + f"{path_cache} already exists. Use the read_h5ad function instead to read from cache!" + ) + if format == "json": + df = fp.process(file_path) + elif format == "ndjson": + df = fp.ndjson(file_path) + else: + raise ValueError("Only folders containing json and ndjson in FHIR format are supported.") + + df, columns_obs_only = _prepare_dataframe(df, columns_obs_only, columns_x_only, cache) + if index_column: + df.set_index(index_column) + + if return_df: + return df + else: + adata = df_to_anndata(df, columns_obs_only) + + if cache: + if not path_cache.parent.is_dir(): + path_cache.parent.mkdir(parents=True) + return _write_cache(adata, path_cache, columns_obs_only) # type: ignore + + return adata + + +def _get_non_existing_files( + dataset_path: Path, + download_dataset_name: str, + backup_url: str, + archive_format: Literal["zip", "tar", "tar.gz", "tgz"] = None, +) -> Path: + """Handle non-existing files or directories by trying to download from a backup_url and moving them in the correct directory. + + Returns: + The file or directory path of the downloaded content. + """ + if backup_url is None and not dataset_path.exists(): + raise ValueError( + f"File or directory {dataset_path} does not exist and no backup_url was provided.\n" + f"Please provide a backup_url or check whether path is spelled correctly." + ) + logger.info("Path or dataset does not yet exist. Attempting to download...") + download( + backup_url, + output_file_name=download_dataset_name, + output_path=ehrapy_settings.datasetdir, + archive_format=archive_format, + ) + + if archive_format: + dataset_path = remove_archive_extension(dataset_path) + + return dataset_path + + +def _read_from_cache_dir(cache_dir: Path) -> dict[str, AnnData]: + """Read AnnData objects from the cache directory.""" + adata_objects = {} + # read each cache file in the cache directory and store it into a dict + for cache_file in cache_dir.iterdir(): + if cache_file.name.endswith(".h5ad"): + adata_objects[cache_file.stem] = _read_from_cache(cache_file) + return adata_objects + + +def _read_from_cache(path_cache: Path) -> AnnData: + """Read AnnData object from cached file.""" + cached_adata = read_h5(path_cache) + # type cast required when dealing with non-numerical data; otherwise all values in X would be treated as strings + if not np.issubdtype(cached_adata.X.dtype, np.number): + cached_adata.X = cached_adata.X.astype("object") + try: + columns_obs_only = list(cached_adata.uns["columns_obs_only"]) + del cached_adata.uns["columns_obs_only"] + # in case columns_obs_only has not been passed + except KeyError: + columns_obs_only = [] + # recreate the original AnnData object with the index column for obs and obs only columns + cached_adata = _decode_cached_adata(cached_adata, columns_obs_only) + + return cached_adata + + +def _write_cache_dir( + adata_objects: dict[str, AnnData], + path_cache: Path, + columns_obs_only, + index_column: dict[str, str | int] | None, # type ignore +) -> dict[str, AnnData]: + """Write multiple AnnData objects into a common cache directory keeping index column and columns_obs_only. + + Args: + adata_objects: A dictionary with an identifier as key for each of the AnnData objects. + path_cache: Path to the cache directory. + columns_obs_only: Columns for obs only. + index_column: The index columns for each object (if any). + + Returns: + A dict containing a unique identifier and an :class:`~anndata.AnnData` object for each file read. + """ + for identifier in adata_objects: + # for each identifier (for the AnnData object), we need the index column and obs_only cols (if any) for reuse when reading cache + index_col, cols_obs_only, _ = _extract_index_and_columns_obs_only(identifier, index_column, columns_obs_only) + adata_objects[identifier] = _write_cache( + adata_objects[identifier], path_cache / (identifier + ".h5ad"), cols_obs_only + ) + return adata_objects + + +def _write_cache( + raw_anndata: AnnData, + path_cache: Path, + columns_obs_only: list[str] | None, +) -> AnnData: + """Write AnnData object to cache""" + original_x_dtype = raw_anndata.X.dtype + if not np.issubdtype(original_x_dtype, np.number): + cached_adata = encode(adata=raw_anndata, autodetect=True) + else: + cached_adata = raw_anndata + # temporary key that stores all column names that are obs only for this AnnData object + cached_adata.uns["columns_obs_only"] = columns_obs_only + cached_adata.uns["ehrapy_dummy_encoding"] = True + # append correct file suffix + if not path_cache.suffix == ".h5ad": + if path_cache.suffix in {".tsv", ".csv"}: + path_cache = Path(str(path_cache)[:-4] + ".h5ad") + else: + path_cache = Path(str(path_cache) + ".h5ad") + cached_adata.write(path_cache) + # preserve original dtype of X (either numerical or object) + cached_adata.X = cached_adata.X.astype(original_x_dtype) + cached_adata = _decode_cached_adata(cached_adata, columns_obs_only) + return cached_adata + + +def _prepare_dataframe(initial_df: pd.DataFrame, columns_obs_only, columns_x_only=None, cache=False): + """Prepares the dataframe to be casted into an AnnData object. + + Datetime columns will be detected and added to columns_obs_only. + + Returns: + The initially parsed dataframe and an updated list of columns_obs_only. + """ + # when passing columns x only, simply handle the (asymmetric) difference to be obs only and everything else is kept in X + if columns_x_only: + columns_obs_only = list(set(initial_df.columns) - set(columns_x_only)) + # get all object dtype columns + object_type_columns = [col_name for col_name in initial_df.columns if initial_df[col_name].dtype == "object"] + # if columns_obs_only is None, initialize it as datetime columns need to be included here + if not columns_obs_only: + columns_obs_only = [] + no_datetime_object_col = [] + for col in object_type_columns: + try: + pd.to_datetime(initial_df[col], format="mixed", utc=True) + # only add to column_obs_only if not present already to avoid duplicates + if col not in columns_obs_only: + columns_obs_only.append(col) + except (ValueError, TypeError): + # we only need to replace NANs on non datetime, non numerical columns since datetime are obs only by default + no_datetime_object_col.append(col) + # writing to hd5a files requires non string to be empty in non numerical columns + if cache: + # TODO remove this when anndata 0.8.0 is released + initial_df[no_datetime_object_col] = initial_df[no_datetime_object_col].fillna("") + # temporary workaround needed; see https://github.com/theislab/anndata/issues/504 and https://github.com/theislab/anndata/issues/662 + # converting booleans to strings is needed for caching as writing to .h5ad files currently does not support writing boolean values + bool_columns = { + column_name: "str" for column_name in initial_df.columns if initial_df.dtypes[column_name] == "bool" + } + initial_df = initial_df.astype(bool_columns) + return initial_df, columns_obs_only + + +def _decode_cached_adata(adata: AnnData, column_obs_only: list[str]) -> AnnData: + """Decode the label encoding of initial AnnData object + + Args: + adata: The label encoded AnnData object. + column_obs_only: The columns, that should be kept in obs. + + Returns: + The decoded, initial AnnData object. + """ + var_names = list(adata.var_names) + # for each encoded categorical, replace its encoded values with its original values in X + for idx, var_name in enumerate(var_names): + if not var_name.startswith("ehrapycat_"): + break + value_name = var_name[10:] + if value_name not in adata.obs.keys(): + raise ValueError(f"Unencoded values for feature '{value_name}' not found in obs!") + original_values = adata.obs[value_name] + adata.X[:, idx] = original_values + # update var name per categorical + var_names[idx] = value_name + # drop all columns, that are not obs only in obs + if column_obs_only: + adata.obs = adata.obs[column_obs_only] + else: + adata.obs = pd.DataFrame(index=adata.obs.index) + # set the new var names (unencoded ones) + adata.var.index = var_names + adata.layers["original"] = adata.X.copy() + # reset uns + adata.uns = OrderedDict() + + return adata + + +def _extract_index_and_columns_obs_only(identifier: str, index_columns, columns_obs_only, columns_x_only=None): + """Extract the index column (if any) and the columns, for obs only (if any) from the given user input. + + For each file, `index_columns` and `columns_obs_only` can provide three cases: + 1.) The filename (thus the identifier) is not present as a key and no default key is provided or one or both dicts are empty: + --> No index column will be set and/or no columns are obs only (based on user input) + + 2.) The filename (thus the identifier) is not present as a key, but default key is provided + --> The index column will be set and/or columns will be obs only according to the default key + + 3.) The filename is present as a key + --> The index column will be set and/or columns are obs only according to its value + + Args: + identifier: The name of the + index_columns: Index columns + columns_obs_only: Columns for obs only + + Returns: + Index column (if any) and columns obs only (if any) for this specific AnnData object. + """ + _index_column = None + _columns_obs_only = None + _columns_x_only = None + # get index column (if any) + if index_columns and identifier in index_columns.keys(): + _index_column = index_columns[identifier] + elif index_columns and "default" in index_columns.keys(): + _index_column = index_columns["default"] + + # get columns obs only (if any) + if columns_obs_only and identifier in columns_obs_only.keys(): + _columns_obs_only = columns_obs_only[identifier] + elif columns_obs_only and "default" in columns_obs_only.keys(): + _columns_obs_only = columns_obs_only["default"] + + # get columns x only (if any) + if columns_x_only and identifier in columns_x_only.keys(): + _columns_x_only = columns_x_only[identifier] + elif columns_x_only and "default" in columns_x_only.keys(): + _columns_x_only = columns_x_only["default"] + + # if index column is also found in column_obs_only or x_only, use default indices instead and only move it to obs/X, but warn the user + if (_index_column and _columns_obs_only or _index_column and _columns_x_only) and ( + _index_column in _columns_obs_only or _index_column in _columns_x_only + ): + logger.warning( + f"Index column '{_index_column}' for file '{identifier}' is also used as a column " + f"for obs or X only. Using default indices instead and moving '{_index_column}' to obs/X!." + ) + _index_column = None + + return _index_column, _columns_obs_only, _columns_x_only + + +def _check_columns_only_params( + obs_only: dict[str, list[str]] | list[str] | None, x_only: dict[str, list[str]] | list[str] | None +) -> None: + """Check whether columns_obs_only and columns_x_only are passed exclusively. + + For a single AnnData object (thus parameters being a list of strings) it's not desirable to pass both, obs_only and x_only. + For multiple AnnData objects (thus the parameters being dicts of string keys with a list value), it is possible to pass both. But the keys + (unique identifiers of the AnData objects, basically its names) should share no common identifier, + thus a single AnnData object is either in x_only OR obs_only, but not in both. + """ + if not obs_only or not x_only: + return + if obs_only and x_only and isinstance(obs_only, list): + raise ValueError( + "Can not use columns_obs_only together with columns_x_only with a single AnnData object. " + "At least one has to be None!" + ) + else: + common_keys = obs_only.keys() & x_only.keys() # type: ignore + if common_keys: + raise ValueError( + "Can not use columns_obs_only together with columns_x_only for a single AnnData object. " + "The following anndata identifiers where found" + f"in both: {','.join(key for key in common_keys)}!" + ) + + +class IndexNotFoundError(Exception): + pass + + +class CachingNotSupported(Exception): + pass + + +class ExtensionMissingError(Exception): + pass + + +class CacheExistsException(Exception): + pass diff --git a/data/ehrapy/io/_write.py b/data/ehrapy/io/_write.py new file mode 100644 index 0000000000000000000000000000000000000000..c44c1f2fd8345a3689ac6e673c13e0537641edb6 --- /dev/null +++ b/data/ehrapy/io/_write.py @@ -0,0 +1,111 @@ +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING, Literal + +import numpy as np + +from ehrapy import settings +from ehrapy.preprocessing._encoding import encode + +if TYPE_CHECKING: + from anndata import AnnData + +supported_extensions = {"csv", "tsv", "h5ad"} + + +def write( + filename: str | Path, + adata: AnnData, + extension: str | bool = None, + compression: Literal["gzip", "lzf"] | None = "gzip", + compression_opts: int | None = None, +) -> None: + """Write :class:`~anndata.AnnData` objects to file. + + It is possbile to either write an :class:`~anndata.AnnData` object to a .csv file or a .h5ad file. + The .h5ad file can be used as a cache to save the current state of the object and to retrieve it faster once needed. + This preserves the object state at the time of writing. It is possible to write both, encoded and unencoded objects. + + Args: + filename: File name or path to write the file to. + adata: Annotated data matrix. + extension: File extension. One of 'h5ad', 'csv'. Defaults to `None` which infers the extension from the filename. + compression: Optional file compression. One of 'gzip', 'lzf'. + compression_opts: See http://docs.h5py.org/en/latest/high/dataset.html. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.io.write("mimic_2.h5ad", adata) + """ + filename = Path(filename) # allow passing strings + if _get_file_extension(filename): + filename = filename + _extension = _get_file_extension(filename) + if extension is None: + extension = _extension + elif extension != _extension: + raise ValueError( + "It suffices to provide the file type by " + "providing a proper extension to the filename." + 'One of "csv", "h5".' + ) + else: + key = filename + extension = settings.file_format_data if extension is None else extension + filename = _get_filename_from_key(key, extension) + if extension == "csv": + adata.write_csvs(filename) + else: + # dummy encoding when there is non numerical data in X + if not np.issubdtype(adata.X.dtype, np.number) and extension == "h5ad": + # flag to indicate an Anndata object has been dummy encoded to write it to .h5ad file + # Case of writing an unencoded non numerical AnnData object + encoded_adata = encode(adata, autodetect=True) + encoded_adata.uns["ehrapy_dummy_encoding"] = True + encoded_adata.uns["columns_obs_only"] = list(adata.obs.columns) + encoded_adata.write(filename, compression=compression, compression_opts=compression_opts) + else: + adata.write(filename, compression=compression, compression_opts=compression_opts) + + +def _get_file_extension(file_path: Path) -> str: + """Check whether the argument is a filename. + + Args: + file_path: Path to the file. + + Returns: + File extension of the specified file. + """ + ext = file_path.suffixes + + if len(ext) > 2: + ext = ext[-2:] + + if ext and ext[-1][1:] in supported_extensions: + return ext[-1][1:] + raise ValueError( + f"""\ + {file_path!r} does not end on a valid extension. + Please, provide one of the available extensions. + {supported_extensions} + """ + ) + + +def _get_filename_from_key(key, extension=None) -> Path: + """Gets full file name from a key. + + Args: + key: Key to get file name for. + extension: file extension. + + Returns: + Path to the full file. + """ + extension = settings.file_format_data if extension is None else extension + extension = "csv" if extension is None else extension + + return settings.datasetdir / f"{key}.{extension}" diff --git a/data/ehrapy/plot/__init__.py b/data/ehrapy/plot/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0c740e95e90ad217717c3470fb6679cdf72a06ad --- /dev/null +++ b/data/ehrapy/plot/__init__.py @@ -0,0 +1,7 @@ +from ehrapy.plot._catplot import catplot +from ehrapy.plot._colormaps import * # noqa: F403 +from ehrapy.plot._missingno_pl_api import * # noqa: F403 +from ehrapy.plot._scanpy_pl_api import * # noqa: F403 +from ehrapy.plot._survival_analysis import kaplan_meier, kmf, ols +from ehrapy.plot.causal_inference._dowhy import causal_effect +from ehrapy.plot.feature_ranking._feature_importances import rank_features_supervised diff --git a/data/ehrapy/plot/_catplot.py b/data/ehrapy/plot/_catplot.py new file mode 100644 index 0000000000000000000000000000000000000000..bab43b2a9198c6948c13850ca6d7c9e626a99ca3 --- /dev/null +++ b/data/ehrapy/plot/_catplot.py @@ -0,0 +1,49 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pandas as pd +import seaborn as sns + +if TYPE_CHECKING: + from scanpy import AnnData + from seaborn.axisgrid import FacetGrid + + +def catplot(adata: AnnData, x: str = None, y: str = None, hue: str = None, kind: str = "strip", **kwargs) -> FacetGrid: + """Plot categorical data. + + Wrapper around `seaborn.catplot `_. Typically used to show + the behaviour of one numerical variable with respect to one or several categorical variables. + + Considers adata.obs only. + + Args: + adata: AnnData object. + x: Variable to plot on the x-axis. + y: Variable to plot on the y-axis. + hue: Variable to plot as different colors. + kind: Kind of plot to make. Options are: "point", "bar", "strip", "swarm", "box", "violin", "boxen", or "count". + **kwargs: Keyword arguments for seaborn.catplot. + + Returns: + A Seaborn FacetGrid object for further modifications. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.diabetes_130_fairlearn() + >>> ep.ad.move_to_obs(adata, ["A1Cresult", "admission_source_id"], copy_obs=True) + >>> adata.obs["A1Cresult_measured"] = ~adata.obs["A1Cresult"].isna() + >>> ep.pl.catplot( + ... adata=adata, + ... y="A1Cresult_measured", + ... x="admission_source_id", + ... kind="point", + ... ci=95, + ... join=False, + ... ) + + .. image:: /_static/docstring_previews/catplot.png + """ + + return sns.catplot(data=adata.obs, x=x, y=y, hue=hue, kind=kind, **kwargs) diff --git a/data/ehrapy/plot/_colormaps.py b/data/ehrapy/plot/_colormaps.py new file mode 100644 index 0000000000000000000000000000000000000000..c706993017baae46844fe9e80472761528edc9b3 --- /dev/null +++ b/data/ehrapy/plot/_colormaps.py @@ -0,0 +1,20 @@ +from enum import Enum + +from matplotlib.colors import LinearSegmentedColormap + + +class Colormaps(Enum): + """ + Available colormaps: + | grey_red + | grey_blue + | grey_green + | gray_yellow + | grey_violet + """ + + grey_red = LinearSegmentedColormap.from_list("grouping", ["lightgray", "red", "darkred"], N=128) + grey_green = LinearSegmentedColormap.from_list("grouping", ["lightgray", "limegreen", "forestgreen"], N=128) + grey_yellow = LinearSegmentedColormap.from_list("grouping", ["lightgray", "yellow", "gold"], N=128) + grey_violet = LinearSegmentedColormap.from_list("grouping", ["lightgray", "mediumvioletred", "indigo"], N=128) + grey_blue = LinearSegmentedColormap.from_list("grouping", ["lightgray", "cornflowerblue", "darkblue"], N=128) diff --git a/data/ehrapy/plot/_missingno_pl_api.py b/data/ehrapy/plot/_missingno_pl_api.py new file mode 100644 index 0000000000000000000000000000000000000000..c5c8d52cf3a4730efa53f954b6fb0eb091f5d417 --- /dev/null +++ b/data/ehrapy/plot/_missingno_pl_api.py @@ -0,0 +1,316 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import missingno as msno + +from ehrapy.anndata import anndata_ext as ae + +if TYPE_CHECKING: + from anndata import AnnData + + +def missing_values_matrix( + adata: AnnData, + *, + filter: str | None = None, + max_cols: int = 0, + max_percentage: float = 0, + sort: str | None = None, + figsize: tuple = (25, 10), + width_ratios: tuple = (15, 1), + color: tuple = (0.25, 0.25, 0.25), + fontsize: float = 16, + labels: bool = True, + label_rotation: float = 45, + sparkline: bool = True, + categoricals: bool = False, +): # pragma: no cover + """A matrix visualization of the nullity of the given AnnData object. + + Args: + adata: :class:`~anndata.AnnData` object containing all observations. + filter: The filter to apply to the matrix. Should be one of "top", "bottom", or None. + max_cols: The max number of columns from the AnnData object to include. + max_percentage: The max percentage fill of the columns from the AnnData object. + sort: The row sort order to apply. Can be "ascending", "descending", or None. + figsize: The size of the figure to display. + width_ratios: The ratio of the width of the matrix to the width of the sparkline. + color: The color of the filled columns. + fontsize: The figure's font size. + labels: Whether or not to display the column names. + label_rotation: What angle to rotate the text labels to. + sparkline: Whether or not to display the sparkline. + categoricals: Whether to include "ehrapycat" columns to the plot. + + Returns: + The plot axis. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pl.missing_values_matrix(adata, filter="bottom", max_cols=15, max_percentage=0.999) + + Preview: + .. image:: /_static/docstring_previews/missingno_matrix.png + """ + df = ae.anndata_to_df(adata) + + if not categoricals: + non_categorical_columns = [col for col in df if not col.startswith("ehrapycat")] + return msno.matrix( + df[non_categorical_columns], + filter, + max_cols, + max_percentage, + sort, + figsize, + width_ratios, + color, + fontsize, + labels, + label_rotation, + sparkline, + ) + else: + return msno.matrix( + df, + filter, + max_cols, + max_percentage, + sort, + figsize, + width_ratios, + color, + fontsize, + labels, + label_rotation, + sparkline, + ) + + +def missing_values_barplot( + adata: AnnData, + *, + log: bool = False, + filter: str | None = None, + max_cols: int = 0, + max_percentage: float = 0, + sort: str | None = None, + figsize: tuple | None = None, + color: str = "dimgray", + fontsize: float = 16, + labels: str | None = None, + label_rotation: float = 45, + orientation: str | None = None, + categoricals: bool = False, +): # pragma: no cover + """A bar chart visualization of the nullity of the given AnnData object. + + Args: + adata: :class:`~anndata.AnnData` object containing all observations. + log: Whether to display a logarithmic plot. + filter: The filter to apply to the barplot. Should be one of "top", "bottom", or None. + max_cols: The max number of columns from the AnnData object to include. + max_percentage: The max percentage fill of the columns from the AnnData object. + sort: The row sort order to apply. Can be "ascending", "descending", or None. + figsize: The size of the figure to display. + color: The color of the filled columns. + fontsize: The figure's font size. + labels: Whether to display the column names. + label_rotation: What angle to rotate the text labels to. + orientation: The way the bar plot is oriented. + categoricals: Whether to include "ehrapycat" columns to the plot. + + Returns: + The plot axis. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pl.missing_values_barplot(adata, filter="bottom", max_cols=15, max_percentage=0.999) + + Preview: + .. image:: /_static/docstring_previews/missingno_barplot.png + """ + df = ae.anndata_to_df(adata) + + if not categoricals: + non_categorical_columns = [col for col in df if not col.startswith("ehrapycat")] + return msno.bar( + df[non_categorical_columns], + figsize, + fontsize, + labels, + label_rotation, + log, + color, + filter, + max_cols, + max_percentage, + sort, + orientation, + ) + else: + return msno.bar( + df, + figsize, + fontsize, + labels, + label_rotation, + log, + color, + filter, + max_cols, + max_percentage, + sort, + orientation, + ) + + +def missing_values_heatmap( + adata: AnnData, + *, + filter: str | None = None, + max_cols: int = 0, + max_percentage: float = 0, + sort: str | None = None, + figsize: tuple = (20, 12), + fontsize: float = 16, + labels: bool = True, + label_rotation: float = 45, + cmap: str = "RdBu", + vmin: int = -1, + vmax: int = 1, + cbar: bool = True, + categoricals: bool = False, +): # pragma: no cover + """Presents a `seaborn` heatmap visualization of nullity correlation in the given AnnData object. + + Note that this visualization has no special support for large datasets. For those, try the dendrogram instead. + + Args: + adata: :class:`~anndata.AnnData` object containing all observations. + filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None. + max_cols: The max number of columns from the AnnData object to include. + max_percentage: The max percentage fill of the columns from the AnnData object. + sort: The row sort order to apply. Can be "ascending", "descending", or None. + figsize: The size of the figure to display. + fontsize: The figure's font size. + labels: Whether or not to display the column names. + label_rotation: What angle to rotate the text labels to. + cmap: What `matplotlib` colormap to use. + vmin: The normalized colormap threshold. + vmax: The normalized colormap threshold. + cbar: Whether to draw a colorbar. + categoricals: Whether to include "ehrapycat" columns to the plot. + + Returns: + The plot axis. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pl.missing_values_heatmap(adata, filter="bottom", max_cols=15, max_percentage=0.999) + + Preview: + .. image:: /_static/docstring_previews/missingno_heatmap.png + """ + df = ae.anndata_to_df(adata) + + if not categoricals: + non_categorical_columns = [col for col in df if not col.startswith("ehrapycat")] + return msno.heatmap( + df[non_categorical_columns], + filter, + max_cols, + max_percentage, + sort, + figsize, + fontsize, + labels, + label_rotation, + cmap, + vmin, + vmax, + cbar, + ) + else: + return msno.heatmap( + df, + filter, + max_cols, + max_percentage, + sort, + figsize, + fontsize, + labels, + label_rotation, + cmap, + vmin, + vmax, + cbar, + ) + + +def missing_values_dendrogram( + adata: AnnData, + *, + method: str = "average", + filter: str | None = None, + max_cols: int = 0, + max_percentage: float = 0, + orientation: str | None = None, + figsize: tuple | None = None, + fontsize: float = 16, + label_rotation: float = 45, + categoricals: bool = False, +): + """Fits a `scipy` hierarchical clustering algorithm to the given AnnData object's var and visualizes the results as + a `scipy` dendrogram. + + The default vertical display will fit up to 50 columns. If more than 50 columns are specified and orientation is + left unspecified the dendrogram will automatically swap to a horizontal display to fit the additional variables. + + Args: + adata: :class:`~anndata.AnnData` object containing all observations. + method: The distance measure being used for clustering. This parameter is passed to `scipy.hierarchy`. + filter: The filter to apply to the dendrogram. Should be one of "top", "bottom", or None. + max_cols: The max number of columns from the AnnData object to include. + max_percentage: The max percentage fill of the columns from the AnnData object. + figsize: The size of the figure to display. + fontsize: The figure's font size. + orientation: The way the dendrogram is oriented. + label_rotation: What angle to rotate the text labels to. . + categoricals: Whether to include "ehrapycat" columns to the plot. + + Returns: + The plot axis. + + Example: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pl.missing_values_dendrogram(adata, filter="bottom", max_cols=15, max_percentage=0.999) + + Preview: + .. image:: /_static/docstring_previews/missingno_dendrogram.png + """ + df = ae.anndata_to_df(adata) + + if not categoricals: + non_categorical_columns = [col for col in df if not col.startswith("ehrapycat")] + return msno.dendrogram( + df[non_categorical_columns], + method, + filter, + max_cols, + max_percentage, + orientation, + figsize, + fontsize, + label_rotation, + ) + else: + return msno.dendrogram( + df, method, filter, max_cols, max_percentage, orientation, figsize, fontsize, label_rotation + ) diff --git a/data/ehrapy/plot/_scanpy_pl_api.py b/data/ehrapy/plot/_scanpy_pl_api.py new file mode 100644 index 0000000000000000000000000000000000000000..81791036514ffe613f748f015821b4f82f563b4c --- /dev/null +++ b/data/ehrapy/plot/_scanpy_pl_api.py @@ -0,0 +1,2454 @@ +from __future__ import annotations + +from collections.abc import Callable, Collection, Iterable, Mapping, Sequence +from enum import Enum +from functools import partial +from types import MappingProxyType +from typing import TYPE_CHECKING, Any, Literal + +import scanpy as sc +from scanpy.plotting import DotPlot, MatrixPlot, StackedViolin + +from ehrapy._utils_doc import ( + _doc_params, + doc_adata_color_etc, + doc_common_groupby_plot_args, + doc_common_plot_args, + doc_edges_arrows, + doc_panels, + doc_scatter_basic, + doc_scatter_embedding, + doc_show_save_ax, + doc_vbound_percentile, + doc_vboundnorm, +) + +if TYPE_CHECKING: + from pathlib import Path + + import numpy as np + import pandas as pd + from anndata import AnnData + from cycler import Cycler + from matplotlib.axes import Axes + from matplotlib.colors import Colormap, ListedColormap, Normalize + from matplotlib.figure import Figure + from scanpy.plotting._utils import _AxesSubplot + +_Basis = Literal["pca", "tsne", "umap", "diffmap", "draw_graph_fr"] +_VarNames = str | Sequence[str] +ColorLike = str | tuple[float, ...] +_IGraphLayout = Literal["fa", "fr", "rt", "rt_circular", "drl", "eq_tree", ...] # type: ignore +_FontWeight = Literal["light", "normal", "medium", "semibold", "bold", "heavy", "black"] +_FontSize = Literal["xx-small", "x-small", "small", "medium", "large", "x-large", "xx-large"] +VBound = str | float | Callable[[Sequence[float]], float] + + +@_doc_params(scatter_temp=doc_scatter_basic, show_save_ax=doc_show_save_ax) +def scatter( + adata: AnnData, + x: str | None = None, + y: str | None = None, + color: str = None, + use_raw: bool | None = None, + layers: str | Collection[str] = None, + sort_order: bool = True, + alpha: float | None = None, + basis: _Basis | None = None, + groups: str | Iterable[str] = None, + components: str | Collection[str] = None, + projection: Literal["2d", "3d"] = "2d", + legend_loc: str = "right margin", + legend_fontsize: int | float | _FontSize | None = None, + legend_fontweight: int | _FontWeight | None = None, + legend_fontoutline: float = None, + color_map: str | Colormap = None, + palette: Cycler | ListedColormap | ColorLike | Sequence[ColorLike] = None, + frameon: bool | None = None, + right_margin: float | None = None, + left_margin: float | None = None, + size: int | float | None = None, + title: str | None = None, + show: bool | None = None, + save: str | bool | None = None, + ax: Axes | None = None, +): # pragma: no cover + """Scatter plot along observations or variables axes. + + Color the plot using annotations of observations (`.obs`), variables (`.var`) or features (`.var_names`). + + Args: + adata: :class:`~anndata.AnnData` object object containing all observations. + x: x coordinate (MedCat entities currently not supported) + y: y coordinate (MedCat entities currently not supported) + color: Keys for annotations of observations/patients or features, or a hex color specification, e.g., + `'ann1'`, `'#fe57a1'`, or `['ann1', 'ann2']` or extracted entities from ehrapy's MedCat tool. + use_raw: Whether to use `raw` attribute of `adata`. Defaults to `True` if `.raw` is present. + layers: Use the `layers` attribute of `adata` if present: specify the layer for `x`, `y` and `color`. + If `layers` is a string, then it is expanded to `(layers, layers, layers)`. + basis: String that denotes a plotting tool that computed coordinates. + {scatter_temp} + {show_save_ax} + + Returns: + If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it. + + Example: + .. code-block:: python + + import ehrapy as ep + + adata = ep.dt.mimic_2(encoded=True) + ep.pp.knn_impute(adata) + ep.pp.log_norm(adata, offset=1) + ep.pp.neighbors(adata) + ep.pl.scatter(adata, x="age", y="icu_los_day", color="icu_los_day") + + Preview: + .. image:: /_static/docstring_previews/scatter.png + """ + scatter_partial = partial( + sc.pl.scatter, + x=x, + y=y, + use_raw=use_raw, + layers=layers, + sort_order=sort_order, + alpha=alpha, + basis=basis, + groups=groups, + components=components, + projection=projection, + legend_loc=legend_loc, + legend_fontsize=legend_fontsize, + legend_fontweight=legend_fontweight, + legend_fontoutline=legend_fontoutline, + color_map=color_map, + palette=palette, + frameon=frameon, + right_margin=right_margin, + left_margin=left_margin, + size=size, + title=title, + show=show, + save=save, + ax=ax, + ) + + return scatter_partial(adata=adata, color=color) + + +@_doc_params( + vminmax=doc_vboundnorm, + show_save_ax=doc_show_save_ax, + common_plot_args=doc_common_plot_args, +) +def heatmap( + adata: AnnData, + var_names: _VarNames | Mapping[str, _VarNames], + groupby: str | Sequence[str], + use_raw: bool | None = None, + log: bool = False, + num_categories: int = 7, + dendrogram: bool | str = False, + feature_symbols: str | None = None, + var_group_positions: Sequence[tuple[int, int]] | None = None, + var_group_labels: Sequence[str] | None = None, + var_group_rotation: float | None = None, + layer: str | None = None, + standard_scale: Literal["var", "obs"] | None = None, + swap_axes: bool = False, + show_feature_labels: bool | None = None, + show: bool | None = None, + save: str | bool | None = None, + figsize: tuple[float, float] | None = None, + vmin: float | None = None, + vmax: float | None = None, + vcenter: float | None = None, + norm: Normalize | None = None, + **kwds, +): # pragma: no cover + """Heatmap of the feature values. + + If `groupby` is given, the heatmap is ordered by the respective group. + If the `groupby` observation annotation is not categorical the observation + annotation is turned into a categorical by binning the data into the number specified in `num_categories`. + + Args: + {common_plot_args} + standard_scale: Whether or not to standardize that dimension between 0 and 1, meaning for each variable or observation, + subtract the minimum and divide each by its maximum. + swap_axes: By default, the x axis contains `var_names` (e.g. features) and the y axis the `groupby` + categories (if any). By setting `swap_axes` then x are the `groupby` categories and y the `var_names`. + show_feature_labels: By default feature labels are shown when there are 50 or less features. Otherwise the labels are removed. + {show_save_ax} + {vminmax} + **kwds: + Are passed to :func:`matplotlib.pyplot.imshow`. + + Returns: + List of :class:`~matplotlib.axes.Axes` + + Example: + .. code-block:: python + + import ehrapy as ep + + adata = ep.dt.mimic_2(encoded=True) + ep.pp.knn_impute(adata) + ep.pp.log_norm(adata, offset=1) + ep.pp.neighbors(adata) + ep.tl.leiden(adata, resolution=0.5, key_added="leiden_0_5") + ep.pl.heatmap( + adata, + var_names=[ + "map_1st", + "hr_1st", + "temp_1st", + "spo2_1st", + "abg_count", + "wbc_first", + "hgb_first", + "platelet_first", + "sodium_first", + "potassium_first", + "tco2_first", + "chloride_first", + "bun_first", + "creatinine_first", + "po2_first", + "pco2_first", + "iv_day_1", + ], + groupby="leiden_0_5", + ) + + Preview: + .. image:: /_static/docstring_previews/heatmap.png + """ + heatmap_partial = partial( + sc.pl.heatmap, + var_names=var_names, + use_raw=use_raw, + log=log, + num_categories=num_categories, + dendrogram=dendrogram, + gene_symbols=feature_symbols, + var_group_positions=var_group_positions, + var_group_labels=var_group_labels, + var_group_rotation=var_group_rotation, + layer=layer, + standard_scale=standard_scale, + swap_axes=swap_axes, + show_gene_labels=show_feature_labels, + show=show, + save=save, + figsize=figsize, + vmin=vmin, + vmax=vmax, + vcenter=vcenter, + norm=norm, + **kwds, + ) + + return heatmap_partial(adata=adata, groupby=groupby) + + +@_doc_params( + show_save_ax=doc_show_save_ax, + common_plot_args=doc_common_plot_args, + groupby_plots_args=doc_common_groupby_plot_args, + vminmax=doc_vboundnorm, +) +def dotplot( + adata: AnnData, + var_names: _VarNames | Mapping[str, _VarNames], + groupby: str, + use_raw: bool | None = None, + log: bool = False, + num_categories: int = 7, + feature_cutoff: float = 0.0, + mean_only_counts: bool = False, + cmap: str = "Reds", + dot_max: float | None = DotPlot.DEFAULT_DOT_MAX, + dot_min: float | None = DotPlot.DEFAULT_DOT_MIN, + standard_scale: Literal["var", "group"] | None = None, + smallest_dot: float | None = DotPlot.DEFAULT_SMALLEST_DOT, + title: str | None = None, + colorbar_title: str | None = "Mean value in group", + size_title: str | None = DotPlot.DEFAULT_SIZE_LEGEND_TITLE, + figsize: tuple[float, float] | None = None, + dendrogram: bool | str = False, + feature_symbols: str | None = None, + var_group_positions: Sequence[tuple[int, int]] | None = None, + var_group_labels: Sequence[str] | None = None, + var_group_rotation: float | None = None, + layer: str | None = None, + swap_axes: bool | None = False, + dot_color_df: pd.DataFrame | None = None, + show: bool | None = None, + save: str | bool | None = None, + ax: _AxesSubplot | None = None, + return_fig: bool | None = False, + vmin: float | None = None, + vmax: float | None = None, + vcenter: float | None = None, + norm: Normalize | None = None, + **kwds, +) -> DotPlot | dict | None: # pragma: no cover + """Makes a *dot plot* of the count values of `var_names`. + + For each var_name and each `groupby` category a dot is plotted. + Each dot represents two values: mean expression within each category + (visualized by color) and fraction of observations expressing the `var_name` in the + category (visualized by the size of the dot). If `groupby` is not given, + the dotplot assumes that all data belongs to a single category. + + .. note:: + A count is used if it is above the specified threshold which is zero by default. + + Args: + {common_plot_args} + {groupby_plots_args} + size_title: Title for the size legend. New line character (\\n) can be used. + feature_cutoff: Count cutoff that is used for binarizing the counts and + determining the fraction of patients having the feature. + A feature is only used if its counts are greater than this threshold. + mean_only_counts: If True, counts are averaged only over the patients having the provided feature. + dot_max: If none, the maximum dot size is set to the maximum fraction value found + (e.g. 0.6). If given, the value should be a number between 0 and 1. + All fractions larger than dot_max are clipped to this value. + dot_min: If none, the minimum dot size is set to 0. If given, + the value should be a number between 0 and 1. + All fractions smaller than dot_min are clipped to this value. + smallest_dot: If none, the smallest dot has size 0. All counts with `dot_min` are plotted with this size. + {show_save_ax} + {vminmax} + kwds: + Are passed to :func:`matplotlib.pyplot.scatter`. + + Returns: + If `return_fig` is `True`, returns a :class:`~scanpy.pl.DotPlot` object, else if `show` is false, return axes dict + + Example: + .. code-block:: python + + import ehrapy as ep + + adata = ep.dt.mimic_2(encoded=True) + ep.pp.knn_impute(adata) + ep.pp.neighbors(adata) + ep.tl.leiden(adata, resolution=0.5, key_added="leiden_0_5") + ep.pl.dotplot( + adata, + var_names=[ + "age", + "gender_num", + "weight_first", + "bmi", + "wbc_first", + "hgb_first", + "platelet_first", + "sodium_first", + "potassium_first", + "tco2_first", + "chloride_first", + "bun_first", + "creatinine_first", + "po2_first", + "pco2_first", + ], + groupby="leiden_0_5", + ) + + Preview: + .. image:: /_static/docstring_previews/dotplot.png + """ + dotplot_partial = partial( + sc.pl.dotplot, + var_names=var_names, + use_raw=use_raw, + log=log, + num_categories=num_categories, + expression_cutoff=feature_cutoff, + mean_only_expressed=mean_only_counts, + cmap=cmap, + dot_max=dot_max, + dot_min=dot_min, + standard_scale=standard_scale, + smallest_dot=smallest_dot, + title=title, + colorbar_title=colorbar_title, + size_title=size_title, + figsize=figsize, + dendrogram=dendrogram, + gene_symbols=feature_symbols, + var_group_positions=var_group_positions, + var_group_labels=var_group_labels, + var_group_rotation=var_group_rotation, + layer=layer, + swap_axes=swap_axes, + dot_color_df=dot_color_df, + show=show, + save=save, + ax=ax, + return_fig=return_fig, + vmin=vmin, + vmax=vmax, + vcenter=vcenter, + norm=norm, + **kwds, + ) + + return dotplot_partial(adata=adata, groupby=groupby) + + +@_doc_params(show_save_ax=doc_show_save_ax, common_plot_args=doc_common_plot_args) +def tracksplot( + adata: AnnData, + var_names: _VarNames | Mapping[str, _VarNames], + groupby: str, + use_raw: bool | None = None, + log: bool = False, + dendrogram: bool | str = False, + feature_symbols: str | None = None, + var_group_positions: Sequence[tuple[int, int]] | None = None, + var_group_labels: Sequence[str] | None = None, + layer: str | None = None, + show: bool | None = None, + save: str | bool | None = None, + figsize: tuple[float, float] | None = None, + **kwds, +) -> dict[str, list] | None: # pragma: no cover + """Plots a filled line plot. + + In this type of plot each var_name is plotted as a filled line plot where the + y values correspond to the var_name values and x is each of the observations. Best results + are obtained when using raw counts that are not log. + `groupby` is required to sort and order the values using the respective group and should be a categorical value. + + Args: + {common_plot_args} + {show_save_ax} + **kwds: + Are passed to :func:`~seaborn.heatmap`. + + Returns: + A list of :class:`~matplotlib.axes.Axes`. + + Example: + .. code-block:: python + + import ehrapy as ep + + adata = ep.dt.mimic_2(encoded=True) + ep.pp.knn_impute(adata) + ep.pp.neighbors(adata) + ep.tl.leiden(adata, resolution=0.5, key_added="leiden_0_5") + ep.pl.tracksplot( + adata, + var_names=[ + "age", + "gender_num", + "weight_first", + "bmi", + "sapsi_first", + "sofa_first", + "service_num", + "day_icu_intime_num", + "hour_icu_intime", + ], + groupby="leiden_0_5", + ) + + Preview: + .. image:: /_static/docstring_previews/tracksplot.png + """ + tracksplot_partial = partial( + sc.pl.tracksplot, + var_names=var_names, + use_raw=use_raw, + log=log, + dendrogram=dendrogram, + gene_symbols=feature_symbols, + var_group_positions=var_group_positions, + var_group_labels=var_group_labels, + layer=layer, + show=show, + save=save, + figsize=figsize, + **kwds, + ) + + return tracksplot_partial(adata=adata, groupby=groupby) + + +def violin( + adata: AnnData, + keys: str | Sequence[str], + groupby: str | None = None, + log: bool = False, + use_raw: bool | None = None, + stripplot: bool = True, + jitter: float | bool = True, + size: int = 1, + layer: str | None = None, + scale: Literal["area", "count", "width"] = "width", + order: Sequence[str] | None = None, + multi_panel: bool | None = None, + xlabel: str = "", + ylabel: str | Sequence[str] | None = None, + rotation: float | None = None, + show: bool | None = None, + save: bool | str | None = None, + ax: Axes | None = None, + **kwds, +): # pragma: no cover + """Violin plot. + + Wraps :func:`seaborn.violinplot` for :class:`~anndata.AnnData`. + + Args: + adata: :class:`~anndata.AnnData` object object containing all observations. + keys: Keys for accessing variables of `.var_names` or fields of `.obs`. + groupby: The key of the observation grouping to consider. + log: Plot on logarithmic axis. + use_raw: Whether to use `raw` attribute of `adata`. Defaults to `True` if `.raw` is present. + stripplot: Add a stripplot on top of the violin plot. See :func:`~seaborn.stripplot`. + jitter: Add jitter to the stripplot (only when stripplot is True) See :func:`~seaborn.stripplot`. + size: Size of the jitter points. + layer: Name of the AnnData object layer that wants to be plotted. By + default adata.raw.X is plotted. If `use_raw=False` is set, + then `adata.X` is plotted. If `layer` is set to a valid layer name, + then the layer is plotted. `layer` takes precedence over `use_raw`. + scale: The method used to scale the width of each violin. + If 'width' (the default), each violin will have the same width. + If 'area', each violin will have the same area. + If 'count', a violin’s width corresponds to the number of observations. + order: Order in which to show the categories. + multi_panel: Display keys in multiple panels also when `groupby is not None`. + xlabel: Label of the x axis. Defaults to `groupby` if `rotation` is `None`, otherwise, no label is shown. + ylabel: Label of the y axis. If `None` and `groupby` is `None`, defaults to `'value'`. + If `None` and `groubpy` is not `None`, defaults to `keys`. + rotation: Rotation of xtick labels. + {show_save_ax} + **kwds: + Are passed to :func:`~seaborn.violinplot`. + + Returns: + A :class:`~matplotlib.axes.Axes` object if `ax` is `None` else `None`. + + Example: + .. code-block:: python + + import ehrapy as ep + + adata = ep.dt.mimic_2(encoded=True) + ep.pp.knn_impute(adata) + ep.pp.log_norm(adata, offset=1) + ep.pp.neighbors(adata) + ep.tl.leiden(adata, resolution=0.5, key_added="leiden_0_5") + ep.pl.violin(adata, keys=["age"], groupby="leiden_0_5") + + Preview: + .. image:: /_static/docstring_previews/violin.png + """ + violin_partial = partial( + sc.pl.violin, + keys=keys, + log=log, + use_raw=use_raw, + stripplot=stripplot, + jitter=jitter, + size=size, + layer=layer, + scale=scale, + order=order, + multi_panel=multi_panel, + xlabel=xlabel, + ylabel=ylabel, + rotation=rotation, + show=show, + save=save, + ax=ax, + **kwds, + ) + + return violin_partial(adata=adata, groupby=groupby) + + +@_doc_params( + show_save_ax=doc_show_save_ax, + common_plot_args=doc_common_plot_args, + groupby_plots_args=doc_common_groupby_plot_args, + vminmax=doc_vboundnorm, +) +def stacked_violin( + adata: AnnData, + var_names: _VarNames | Mapping[str, _VarNames], + groupby: str | Sequence[str], + log: bool = False, + use_raw: bool | None = None, + num_categories: int = 7, + title: str | None = None, + colorbar_title: str | None = "Median value\n in group", + figsize: tuple[float, float] | None = None, + dendrogram: bool | str = False, + gene_symbols: str | None = None, + var_group_positions: Sequence[tuple[int, int]] | None = None, + var_group_labels: Sequence[str] | None = None, + standard_scale: Literal["var", "obs"] | None = None, + var_group_rotation: float | None = None, + layer: str | None = None, + stripplot: bool = StackedViolin.DEFAULT_STRIPPLOT, + jitter: float | bool = StackedViolin.DEFAULT_JITTER, + size: int = StackedViolin.DEFAULT_JITTER_SIZE, + scale: Literal[ + "area", "count", "width" + ] = "width", # TODO This should be StackedViolin.DEFAULT_DENSITY_NORM -> wait for next release + yticklabels: bool | None = StackedViolin.DEFAULT_PLOT_YTICKLABELS, + order: Sequence[str] | None = None, + swap_axes: bool = False, + show: bool | None = None, + save: bool | str | None = None, + return_fig: bool | None = False, + row_palette: str | None = StackedViolin.DEFAULT_ROW_PALETTE, + cmap: str | None = StackedViolin.DEFAULT_COLORMAP, + ax: _AxesSubplot | None = None, + vmin: float | None = None, + vmax: float | None = None, + vcenter: float | None = None, + norm: Normalize | None = None, + **kwds, +) -> StackedViolin | dict | None: # pragma: no cover + """Stacked violin plots. + + Makes a compact image composed of individual violin plots (from :func:`~seaborn.violinplot`) stacked on top of each other. + + This function provides a convenient interface to the :class:`~scanpy.pl.StackedViolin` class. + If you need more flexibility, you should use :class:`~scanpy.pl.StackedViolin` directly. + + + Args: + {common_plot_args} + {groupby_plots_args} + stripplot: Add a stripplot on top of the violin plot. See :func:`~seaborn.stripplot`. + jitter: Add jitter to the stripplot (only when stripplot is True) See :func:`~seaborn.stripplot`. + size: Size of the jitter points. + yticklabels: Set to true to view the y tick labels + order: Order in which to show the categories. Note: if `dendrogram=True` + the categories order will be given by the dendrogram and `order` will be ignored. + scale: The method used to scale the width of each violin. + If 'width' (the default), each violin will have the same width. + If 'area', each violin will have the same area. + If 'count', a violin’s width corresponds to the number of observations. + row_palette: Be default, median values are mapped to the violin color using a + color map (see `cmap` argument). Alternatively, a 'row_palette` can + be given to color each violin plot row using a different colors. + The value should be a valid seaborn or matplotlib palette name (see :func:`~seaborn.color_palette`). + Alternatively, a single color name or hex value can be passed, e.g. `'red'` or `'#cc33ff'`. + {show_save_ax} + {vminmax} + kwds: + Are passed to :func:`~seaborn.violinplot`. + + Returns: + If `return_fig` is `True`, returns a :class:`~scanpy.pl.StackedViolin` object, else if `show` is false, return axes dict + + Example: + .. code-block:: python + + import ehrapy as ep + + adata = ep.dt.mimic_2(encoded=True) + ep.pp.knn_impute(adata) + ep.pp.log_norm(adata, offset=1) + ep.pp.neighbors(adata) + ep.tl.leiden(adata, resolution=0.5, key_added="leiden_0_5") + ep.pl.stacked_violin( + adata, + var_names=[ + "icu_los_day", + "hospital_los_day", + "age", + "gender_num", + "weight_first", + "bmi", + "sapsi_first", + "sofa_first", + "service_num", + "day_icu_intime_num", + "hour_icu_intime", + ], + groupby="leiden_0_5", + ) + + Preview: + .. image:: /_static/docstring_previews/stacked_violin.png + """ + stacked_vio_partial = partial( + sc.pl.stacked_violin, + var_names=var_names, + log=log, + use_raw=use_raw, + num_categories=num_categories, + title=title, + colorbar_title=colorbar_title, + figsize=figsize, + dendrogram=dendrogram, + gene_symbols=gene_symbols, + var_group_positions=var_group_positions, + var_group_labels=var_group_labels, + standard_scale=standard_scale, + var_group_rotation=var_group_rotation, + layer=layer, + stripplot=stripplot, + jitter=jitter, + size=size, + scale=scale, + yticklabels=yticklabels, + order=order, + swap_axes=swap_axes, + show=show, + save=save, + return_fig=return_fig, + row_palette=row_palette, + cmap=cmap, + ax=ax, + vmin=vmin, + vmax=vmax, + vcenter=vcenter, + norm=norm, + **kwds, + ) + + return stacked_vio_partial(adata=adata, groupby=groupby) + + +@_doc_params( + show_save_ax=doc_show_save_ax, + common_plot_args=doc_common_plot_args, + groupby_plots_args=doc_common_groupby_plot_args, + vminmax=doc_vboundnorm, +) +def matrixplot( + adata: AnnData, + var_names: _VarNames | Mapping[str, _VarNames], + groupby: str | Sequence[str], + use_raw: bool | None = None, + log: bool = False, + num_categories: int = 7, + figsize: tuple[float, float] | None = None, + dendrogram: bool | str = False, + title: str | None = None, + cmap: str | None = MatrixPlot.DEFAULT_COLORMAP, + colorbar_title: str | None = "Mean value\n in group", + gene_symbols: str | None = None, + var_group_positions: Sequence[tuple[int, int]] | None = None, + var_group_labels: Sequence[str] | None = None, + var_group_rotation: float | None = None, + layer: str | None = None, + standard_scale: Literal["var", "group"] = None, + values_df: pd.DataFrame | None = None, + swap_axes: bool = False, + show: bool | None = None, + save: str | bool | None = None, + ax: _AxesSubplot | None = None, + return_fig: bool | None = False, + vmin: float | None = None, + vmax: float | None = None, + vcenter: float | None = None, + norm: Normalize | None = None, + **kwds, +) -> MatrixPlot | dict | None: # pragma: no cover + """Creates a heatmap of the mean count per group of each var_names. + + This function provides a convenient interface to the :class:`~scanpy.pl.MatrixPlot` + class. If you need more flexibility, you should use :class:`~scanpy.pl.MatrixPlot` directly. + + + Args: + {common_plot_args} + {groupby_plots_args} + {show_save_ax} + {vminmax} + kwds: + Are passed to :func:`matplotlib.pyplot.pcolor`. + + Returns: + If `return_fig` is `True`, returns a :class:`~scanpy.pl.MatrixPlot` object, else if `show` is false, return axes dict + + Example: + .. code-block:: python + + import ehrapy as ep + + adata = ep.dt.mimic_2(encoded=True) + ep.pp.knn_impute(adata) + ep.pp.log_norm(adata, offset=1) + ep.pp.neighbors(adata) + ep.tl.leiden(adata, resolution=0.5, key_added="leiden_0_5") + ep.pl.matrixplot( + adata, + var_names=[ + "abg_count", + "wbc_first", + "hgb_first", + "platelet_first", + "sodium_first", + "potassium_first", + "tco2_first", + "chloride_first", + "bun_first", + "creatinine_first", + "po2_first", + "pco2_first", + "iv_day_1", + ], + groupby="leiden_0_5", + ) + + Preview: + .. image:: /_static/docstring_previews/matrixplot.png + """ + matrix_partial = partial( + sc.pl.matrixplot, + var_names=var_names, + use_raw=use_raw, + log=log, + num_categories=num_categories, + figsize=figsize, + dendrogram=dendrogram, + title=title, + cmap=cmap, + colorbar_title=colorbar_title, + gene_symbols=gene_symbols, + var_group_positions=var_group_positions, + var_group_labels=var_group_labels, + var_group_rotation=var_group_rotation, + layer=layer, + standard_scale=standard_scale, + values_df=values_df, + swap_axes=swap_axes, + show=show, + save=save, + ax=ax, + return_fig=return_fig, + vmin=vmin, + vmax=vmax, + vcenter=vcenter, + norm=norm, + **kwds, + ) + + return matrix_partial(adata=adata, groupby=groupby) + + +@_doc_params(show_save_ax=doc_show_save_ax) +def clustermap( + adata: AnnData, + obs_keys: str | None = None, + use_raw: bool | None = None, + show: bool | None = None, + save: bool | str | None = None, + **kwds, +): # pragma: no cover + """Hierarchically-clustered heatmap. + + Wraps :func:`seaborn.clustermap` for :class:`~anndata.AnnData`. + + Args: + adata: :class:`~anndata.AnnData` object object containing all observations. + obs_keys: Categorical annotation to plot with a different color map. Currently, only a single key is supported. + use_raw: Whether to use `raw` attribute of `adata`. Defaults to `True` if `.raw` is present. + {show_save_ax} + **kwds: + Keyword arguments passed to :func:`~seaborn.clustermap`. + + Returns: + If `show` is `False`, a :class:`~seaborn.ClusterGrid` object (see :func:`~seaborn.clustermap`). + + Example: + .. code-block:: python + + import ehrapy as ep + + adata = ep.dt.mimic_2(encoded=True) + ep.pp.knn_impute(adata) + ep.pp.log_norm(adata, offset=1) + ep.pp.neighbors(adata) + ep.tl.leiden(adata, resolution=0.5, key_added="leiden_0_5") + ep.pl.clustermap(adata) + + Preview: + .. image:: /_static/docstring_previews/clustermap.png + """ + clustermap_partial = partial(sc.pl.clustermap, use_raw=use_raw, show=show, save=save, **kwds) + + return clustermap_partial(adata=adata, obs_keys=obs_keys) + + +def ranking( + adata: AnnData, + attr: Literal["var", "obs", "uns", "varm", "obsm"], + keys: str | Sequence[str], + dictionary=None, + indices=None, + labels=None, + color="black", + n_points=30, + log=False, + include_lowest=False, + show=None, +): # pragma: no cover + """Plot rankings. + + See, for example, how this is used in pl.pca_loadings. + + Args: + adata: :class:`~anndata.AnnData` object object containing all observations. + attr: The attribute of AnnData that contains the score. + keys: The scores to look up an array from the attribute of adata. + dictionary: Optional key dictionary. + indices: Optional dictionary indices. + labels: Optional labels. + color: Optional primary color (default: black). + n_points: Number of points (default: 30). + log: Whether logarithmic scale should be used. + include_lowest: Whether to include the lowest points. + show: Whether to show the plot. + + Returns: + Returns matplotlib gridspec with access to the axes. + + Example: + .. code-block:: python + + import ehrapy as ep + + adata = ep.dt.mimic_2(encoded=True) + ep.pp.knn_impute(adata) + ep.pp.log_norm(adata, offset=1) + ep.pp.neighbors(adata) + ep.pp.pca(adata) + TODO: ep.pl.ranking(adata) + """ + return sc.pl.ranking( + adata=adata, + attr=attr, + keys=keys, + dictionary=dictionary, + indices=indices, + labels=labels, + color=color, + n_points=n_points, + log=log, + include_lowest=include_lowest, + show=show, + ) + + +@_doc_params(show_save_ax=doc_show_save_ax) +def dendrogram( + adata: AnnData, + groupby: str, + *, + dendrogram_key: str | None = None, + orientation: Literal["top", "bottom", "left", "right"] = "top", + remove_labels: bool = False, + show: bool | None = None, + save: str | bool | None = None, + ax: Axes | None = None, +): # pragma: no cover + """Plots a dendrogram of the categories defined in `groupby`. + + See :func:`~ehrapy.tl.dendrogram`. + + Args: + adata: :class:`~anndata.AnnData` object object containing all observations. + groupby: Categorical data column used to create the dendrogram. + dendrogram_key: Key under with the dendrogram information was stored. + By default the dendrogram information is stored under `.uns[f'dendrogram_{{groupby}}']`. + orientation: Origin of the tree. Will grow into the opposite direction. + remove_labels: Don’t draw labels. Used e.g. by :func:`scanpy.pl.matrixplot` to annotate matrix columns/rows. + {show_save_ax} + + Returns: + :class:`matplotlib.axes.Axes` + + Example: + .. code-block:: python + + import ehrapy as ep + + adata = ep.dt.mimic_2(encoded=True) + ep.pp.knn_impute(adata) + ep.pp.log_norm(adata, offset=1) + ep.pp.neighbors(adata) + ep.tl.leiden(adata, resolution=0.5, key_added="leiden_0_5") + ep.pl.dendrogram(adata, groupby="leiden_0_5") + + Preview: + .. image:: /_static/docstring_previews/dendrogram.png + """ + dendrogram_partial = partial( + sc.pl.dendrogram, + dendrogram_key=dendrogram_key, + orientation=orientation, + remove_labels=remove_labels, + show=show, + save=save, + ax=ax, + ) + + return dendrogram_partial(adata=adata, groupby=groupby) + + +# @_wraps_plot_scatter +@_doc_params( + adata_color_etc=doc_adata_color_etc, + scatter_bulk=doc_scatter_embedding, + show_save_ax=doc_show_save_ax, +) +def pca( + adata, + *, + annotate_var_explained: bool = False, + show: bool | None = None, + return_fig: bool | None = None, + save: bool | str | None = None, + **kwargs, +) -> Axes | list[Axes] | None: # pragma: no cover + """Scatter plot in PCA coordinates. + + Use the parameter `annotate_var_explained` to annotate the explained variance. + + Args: + {adata_color_etc} + annotate_var_explained: Whether to only annotate the explained variables. + {scatter_bulk} + {show_save_ax} + + Returns: + If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.log_norm(adata, offset=1) + >>> ep.pp.neighbors(adata) + >>> ep.tl.pca(adata) + >>> ep.pl.pca(adata, color="service_unit") + + Preview: + .. image:: /_static/docstring_previews/pca.png + """ + pca_partial = partial( + sc.pl.pca, annotate_var_explained=annotate_var_explained, show=show, return_fig=return_fig, save=save + ) + + return pca_partial(adata=adata, **kwargs) + + +def pca_loadings( + adata: AnnData, + components: str | Sequence[int] | None = None, + include_lowest: bool = True, + show: bool | None = None, + save: str | bool | None = None, +): # pragma: no cover + """Rank features according to contributions to PCs. + + Args: + adata: :class:`~anndata.AnnData` object object containing all observations. + components: For example, ``'1,2,3'`` means ``[1, 2, 3]``, first, second, third principal component. + include_lowest: Whether to show the features with both highest and lowest loadings. + show: Show the plot, do not return axis. + save: If `True` or a `str`, save the figure. A string is appended to the default filename. + Infer the filetype if ending on {`'.pdf'`, `'.png'`, `'.svg'`}. + + Returns: + If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.log_norm(adata, offset=1) + >>> ep.pp.neighbors(adata) + >>> ep.pp.pca(adata) + >>> ep.pl.pca_loadings(adata, components="1,2,3") + + Preview: + .. image:: /_static/docstring_previews/pca_loadings.png + """ + return sc.pl.pca_loadings(adata=adata, components=components, include_lowest=include_lowest, show=show, save=save) + + +def pca_variance_ratio( + adata: AnnData, + n_pcs: int = 30, + log: bool = False, + show: bool | None = None, + save: bool | str | None = None, +): # pragma: no cover + """Plot the variance ratio. + + Args: + adata: :class:`~anndata.AnnData` object object containing all observations. + n_pcs: Number of PCs to show. + log: Plot on logarithmic scale.. + show: Show the plot, do not return axis. + save: If `True` or a `str`, save the figure. + A string is appended to the default filename. + Infer the filetype if ending on {`'.pdf'`, `'.png'`, `'.svg'`}. + + Returns: + If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.log_norm(adata, offset=1) + >>> ep.pp.neighbors(adata) + >>> ep.pp.pca(adata) + >>> ep.pl.pca_variance_ratio(adata, n_pcs=8) + + Preview: + .. image:: /_static/docstring_previews/pca_variance_ratio.png + """ + return sc.pl.pca_variance_ratio(adata=adata, n_pcs=n_pcs, log=log, show=show, save=save) + + +@_doc_params(scatter_bulk=doc_scatter_embedding, show_save_ax=doc_show_save_ax) +def pca_overview(adata: AnnData, **params): # pragma: no cover + """Plot PCA results. + + The parameters are the ones of the scatter plot. Call pca_ranking separately + if you want to change the default settings. + + Args: + adata: :class:`~anndata.AnnData` object object containing all observations. + {scatter_bulk} + {show_save_ax} + **params: Scatterplot parameters + + Returns: + If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.log_norm(adata, offset=1) + >>> ep.pp.neighbors(adata) + >>> ep.pp.pca(adata) + >>> ep.pl.pca_overview(adata, components="1,2,3", color="service_unit") + + Preview: + .. image:: /_static/docstring_previews/pca_overview_1.png + + .. image:: /_static/docstring_previews/pca_overview_2.png + + .. image:: /_static/docstring_previews/pca_overview_3.png + """ + return sc.pl.pca_overview(adata=adata, **params) + + +# @_wraps_plot_scatter +@_doc_params( + adata_color_etc=doc_adata_color_etc, + edges_arrows=doc_edges_arrows, + scatter_bulk=doc_scatter_embedding, + show_save_ax=doc_show_save_ax, +) +def tsne(adata, **kwargs) -> Axes | list[Axes] | None: # pragma: no cover + """Scatter plot in tSNE basis. + + Args: + {adata_color_etc} + {edges_arrows} + {scatter_bulk} + {show_save_ax} + + Returns: + If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.log_norm(adata, offset=1) + >>> ep.pp.neighbors(adata) + >>> ep.tl.tsne(adata) + >>> ep.pl.tsne(adata) + + .. image:: /_static/docstring_previews/tsne_1.png + + >>> ep.pl.tsne( + ... adata, color=["day_icu_intime", "service_unit"], wspace=0.5, title=["Day of ICU admission", "Service unit"] + ... ) + + .. image:: /_static/docstring_previews/tsne_2.png + + >>> ep.tl.leiden(adata, resolution=0.5, key_added="leiden_0_5") + >>> ep.pl.tsne(adata, color=["leiden_0_5"], title="Leiden 0.5") + + .. image:: /_static/docstring_previews/tsne_3.png + """ + + return sc.pl.tsne(adata=adata, **kwargs) + + +# @_wraps_plot_scatter +@_doc_params( + adata_color_etc=doc_adata_color_etc, + edges_arrows=doc_edges_arrows, + scatter_bulk=doc_scatter_embedding, + show_save_ax=doc_show_save_ax, +) +def umap(adata: AnnData, **kwargs) -> Axes | list[Axes] | None: # pragma: no cover + """Scatter plot in UMAP basis. + + Args: + {adata_color_etc} + {edges_arrows} + {scatter_bulk} + {show_save_ax} + + Returns: + If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.log_norm(adata, offset=1) + >>> ep.pp.neighbors(adata) + >>> ep.tl.umap(adata) + >>> ep.pl.umap(adata) + + .. image:: /_static/docstring_previews/umap_1.png + + >>> ep.pl.umap( + ... adata, color=["day_icu_intime", "service_unit"], wspace=0.5, title=["Day of ICU admission", "Service unit"] + ... ) + + .. image:: /_static/docstring_previews/umap_2.png + + >>> ep.tl.leiden(adata, resolution=0.5, key_added="leiden_0_5") + >>> ep.pl.umap(adata, color=["leiden_0_5"], title="Leiden 0.5") + + .. image:: /_static/docstring_previews/umap_3.png + """ + + return sc.pl.umap(adata=adata, **kwargs) + + +# @_wraps_plot_scatter +@_doc_params( + adata_color_etc=doc_adata_color_etc, + scatter_bulk=doc_scatter_embedding, + show_save_ax=doc_show_save_ax, +) +def diffmap(adata, **kwargs) -> Axes | list[Axes] | None: # pragma: no cover + """Scatter plot in Diffusion Map basis. + + Args: + {adata_color_etc} + {scatter_bulk} + {show_save_ax} + + Returns: + If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.log_norm(adata, offset=1) + >>> ep.pp.neighbors(adata) + >>> ep.tl.diffmap(adata) + >>> ep.pl.diffmap(adata, color="day_icu_intime") + + Preview: + .. image:: /_static/docstring_previews/diffmap.png + """ + + return sc.pl.diffmap(adata=adata, **kwargs) + + +# @_wraps_plot_scatter +@_doc_params( + adata_color_etc=doc_adata_color_etc, + edges_arrows=doc_edges_arrows, + scatter_bulk=doc_scatter_embedding, + show_save_ax=doc_show_save_ax, +) +def draw_graph( + adata: AnnData, *, layout: _IGraphLayout | None = None, **kwargs +) -> Axes | list[Axes] | None: # pragma: no cover + """Scatter plot in graph-drawing basis. + + Args: + {adata_color_etc} + layout: One of the :func:`~scanpy.tl.draw_graph` layouts. By default, the last computed layout is used. + {edges_arrows} + {scatter_bulk} + {show_save_ax} + + Returns: + If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.log_norm(adata, offset=1) + >>> ep.pp.neighbors(adata) + >>> ep.tl.leiden(adata, resolution=0.5, key_added="leiden_0_5") + >>> ep.tl.paga(adata, groups="leiden_0_5") + >>> ep.pl.paga( + ... adata, + ... color=["leiden_0_5", "day_28_flg"], + ... cmap=ep.pl.Colormaps.grey_red.value, + ... title=["Leiden 0.5", "Died in less than 28 days"], + ... ) + >>> ep.tl.draw_graph(adata, init_pos="paga") + >>> ep.pl.draw_graph(adata, color=["leiden_0_5", "icu_exp_flg"], legend_loc="on data") + + Preview: + .. image:: /_static/docstring_previews/draw_graph_1.png + + .. image:: /_static/docstring_previews/draw_graph_2.png + """ + draw_graph_part = partial(sc.pl.draw_graph, layout=layout) + + return draw_graph_part(adata=adata, **kwargs) + + +class Empty(Enum): + token = 0 + + +_empty = Empty.token + + +@_doc_params( + adata_color_etc=doc_adata_color_etc, + edges_arrows=doc_edges_arrows, + scatter_bulk=doc_scatter_embedding, + show_save_ax=doc_show_save_ax, +) +def embedding( + adata: AnnData, + basis: str, + *, + color: str | Sequence[str] | None = None, + feature_symbols: str | None = None, + use_raw: bool | None = None, + sort_order: bool = True, + edges: bool = False, + edges_width: float = 0.1, + edges_color: str | Sequence[float] | Sequence[str] = "grey", + neighbors_key: str | None = None, + arrows: bool = False, + arrows_kwds: Mapping[str, Any] | None = None, + groups: str | None = None, + components: str | Sequence[str] = None, + layer: str | None = None, + projection: Literal["2d", "3d"] = "2d", + scale_factor: float | None = None, + color_map: Colormap | str | None = None, + cmap: Colormap | str | None = None, + palette: str | Sequence[str] | Cycler | None = None, + na_color: ColorLike = "lightgray", + na_in_legend: bool = True, + size: float | Sequence[float] | None = None, + frameon: bool | None = None, + legend_fontsize: int | float | _FontSize | None = None, + legend_fontweight: int | _FontWeight = "bold", + legend_loc: str = "right margin", + legend_fontoutline: int | None = None, + vmax: VBound | Sequence[VBound] | None = None, + vmin: VBound | Sequence[VBound] | None = None, + vcenter: VBound | Sequence[VBound] | None = None, + norm: Normalize | Sequence[Normalize] | None = None, + add_outline: bool | None = False, + outline_width: tuple[float, float] = (0.3, 0.05), + outline_color: tuple[str, str] = ("black", "white"), + ncols: int = 4, + hspace: float = 0.25, + wspace: float | None = None, + title: str | Sequence[str] | None = None, + show: bool | None = None, + save: bool | str | None = None, + ax: Axes | None = None, + return_fig: bool | None = None, + **kwargs, +) -> Figure | Axes | None: # pragma: no cover + """Scatter plot for user specified embedding basis (e.g. umap, pca, etc). + + Args: + basis: + {adata_color_etc} + {edges_arrows} + {scatter_bulk} + {show_save_ax} + + Returns: + If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.log_norm(adata, offset=1) + >>> ep.pp.neighbors(adata) + >>> ep.tl.umap(adata) + >>> ep.pl.embedding(adata, "X_umap", color="icu_exp_flg") + + Preview: + .. image:: /_static/docstring_previews/embedding.png + """ + embedding_partial = partial( + sc.pl.embedding, + basis=basis, + gene_symbols=feature_symbols, + use_raw=use_raw, + sort_order=sort_order, + edges=edges, + edges_width=edges_width, + edges_color=edges_color, + neighbors_key=neighbors_key, + arrows=arrows, + arrows_kwds=arrows_kwds, + groups=groups, + components=components, + layer=layer, + projection=projection, + scale_factor=scale_factor, + color_map=color_map, + cmap=cmap, + palette=palette, + na_color=na_color, + na_in_legend=na_in_legend, + size=size, + frameon=frameon, + legend_fontsize=legend_fontsize, + legend_fontweight=legend_fontweight, + legend_loc=legend_loc, + legend_fontoutline=legend_fontoutline, + vmax=vmax, + vmin=vmin, + vcenter=vcenter, + norm=norm, + add_outline=add_outline, + outline_width=outline_width, + outline_color=outline_color, + ncols=ncols, + hspace=hspace, + wspace=wspace, + title=title, + show=show, + save=save, + ax=ax, + return_fig=return_fig, + **kwargs, + ) + + return embedding_partial(adata=adata, color=color) + + +@_doc_params(vminmax=doc_vbound_percentile, panels=doc_panels, show_save_ax=doc_show_save_ax) +def embedding_density( + adata: AnnData, + basis: str = "umap", # was positional before 1.4.5 + key: str | None = None, # was positional before 1.4.5 + groupby: str | None = None, + group: str | list[str] | None | None = "all", + color_map: Colormap | str = "YlOrRd", + bg_dotsize: int | None = 80, + fg_dotsize: int | None = 180, + vmax: int | None = 1, + vmin: int | None = 0, + vcenter: int | None = None, + norm: Normalize | None = None, + ncols: int | None = 4, + hspace: float | None = 0.25, + wspace: None = None, + title: str = None, + show: bool | None = None, + save: bool | str | None = None, + ax: Axes | None = None, + return_fig: bool | None = None, + **kwargs, +) -> Figure | Axes | None: # pragma: no cover + """Plot the density of observations in an embedding (per condition). + + Plots the gaussian kernel density estimates (over condition) from the `sc.tl.embedding_density()` output. This currently + does not support extracted medcat entities. + + Args: + adata: :class:`~anndata.AnnData` object object containing all observations. + basis: The embedding over which the density was calculated. + This embedded representation should be found in `adata.obsm['X_[basis]']``. + key: Name of the `.obs` covariate that contains the density estimates. Alternatively, pass `groupby`. + groupby: Name of the condition used in `tl.embedding_density`. Alternatively, pass `key`. + group: The category in the categorical observation annotation to be plotted. + If all categories are to be plotted use group='all' (default), If multiple categories + want to be plotted use a list (e.g.: ['G1', 'S']. If the overall density wants to be ploted set group to 'None'. + color_map: Matplolib color map to use for density plotting. + bg_dotsize: Dot size for background data points not in the `group`. + fg_dotsize: Dot size for foreground data points in the `group`. + vmin: The value representing the lower limit of the color scale. Values smaller than vmin are plotted + with the same color as vmin. vmin can be a number, a string, a function or `None`. If + vmin is a string and has the format `pN`, this is interpreted as a vmin=percentile(N). + For example vmin='p1.5' is interpreted as the 1.5 percentile. If vmin is function, then + vmin is interpreted as the return value of the function over the list of values to plot. + For example to set vmin tp the mean of the values to plot, `def my_vmin(values): return + np.mean(values)` and then set `vmin=my_vmin`. If vmin is None (default) an automatic + minimum value is used as defined by matplotlib `scatter` function. When making multiple + plots, vmin can be a list of values, one for each plot. For example `vmin=[0.1, 'p1', None, my_vmin]` + vmax: The value representing the upper limit of the color scale. The format is the same as for `vmin`. + vcenter: The value representing the center of the color scale. Useful for diverging colormaps. + The format is the same as for `vmin`. + Example: sc.pl.umap(adata, color='TREM2', vcenter='p50', cmap='RdBu_r') + ncols: Number of panels per row. + wspace: Adjust the width of the space between multiple panels. + hspace: Adjust the height of the space between multiple panels. + return_fig: Return the matplotlib figure.\ + {show_save_ax} + + Returns: + If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.log_norm(adata, offset=1) + >>> ep.pp.neighbors(adata) + >>> ep.tl.umap(adata) + >>> ep.tl.leiden(adata, resolution=0.5, key_added="leiden_0_5") + >>> ep.tl.embedding_density(adata, groupby='leiden_0_5', key_added='icu_exp_flg') + >>> ep.pl.embedding_density(adata, key='icu_exp_flg') + + Preview: + .. image:: /_static/docstring_previews/embedding_density.png + """ + return sc.pl.embedding_density( + adata=adata, + basis=basis, + key=key, + groupby=groupby, + group=group, + color_map=color_map, + bg_dotsize=bg_dotsize, + fg_dotsize=fg_dotsize, + vmax=vmax, + vmin=vmin, + vcenter=vcenter, + norm=norm, + ncols=ncols, + hspace=hspace, + wspace=wspace, + title=title, + show=show, + save=save, + ax=ax, + return_fig=return_fig, + **kwargs, + ) + + +def dpt_groups_pseudotime( + adata: AnnData, + color_map: str | Colormap | None = None, + palette: Sequence[str] | Cycler | None = None, + show: bool | None = None, + save: bool | str | None = None, +): # pragma: no cover + """Plot groups and pseudotime. + + Args: + adata: :class:`~anndata.AnnData` object object containing all observations. + color_map: Matplotlib Colormap + palette: Matplotlib color Palette + show: Whether to show the plot. + save: Whether to save the plot or a path to save the plot. + + Examples: + >>> import ehrapy as ep + >>> import numpy as np + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.log_norm(adata, offset=1) + >>> ep.pp.neighbors(adata, method="gauss") + >>> ep.tl.leiden(adata, resolution=0.5, key_added="leiden_0_5") + >>> ep.tl.diffmap(adata, n_comps=10) + >>> adata.uns["iroot"] = np.flatnonzero(adata.obs["leiden_0_5"] == "0")[0] + >>> ep.tl.dpt(adata, n_branchings=3) + >>> ep.pl.dpt_groups_pseudotime(adata) + + Preview: + .. image:: /_static/docstring_previews/dpt_groups_pseudotime.png + """ + sc.pl.dpt_groups_pseudotime(adata=adata, color_map=color_map, palette=palette, show=show, save=save) + + +def dpt_timeseries( + adata: AnnData, + color_map: str | Colormap = None, + as_heatmap: bool = True, + show: bool | None = None, + save: bool | None = None, +): # pragma: no cover + """Heatmap of pseudotime series. + + Args: + adata: :class:`~anndata.AnnData` object object containing all observations. + color_map: Matplotlib Colormap + as_heatmap: Whether to render the plot a heatmap + show: Whether to show the plot. + save: Whether to save the plot or a path to save the plot. + + Examples: + >>> import ehrapy as ep + >>> import numpy as np + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.log_norm(adata, offset=1) + >>> ep.pp.neighbors(adata, method="gauss") + >>> ep.tl.leiden(adata, resolution=0.5, key_added="leiden_0_5") + >>> ep.tl.diffmap(adata, n_comps=10) + >>> adata.uns["iroot"] = np.flatnonzero(adata.obs["leiden_0_5"] == "0")[0] + >>> ep.tl.dpt(adata, n_branchings=3) + >>> ep.pl.dpt_timeseries(adata) + + Preview: + .. image:: /_static/docstring_previews/dpt_timeseries.png + """ + sc.pl.dpt_timeseries(adata=adata, color_map=color_map, show=show, save=save, as_heatmap=as_heatmap) + + +def paga( + adata: AnnData, + threshold: float | None = None, + color: str | Mapping[str | int, Mapping[Any, float]] | None = None, + layout: _IGraphLayout | None = None, + layout_kwds: Mapping[str, Any] = MappingProxyType({}), + init_pos: np.ndarray | None = None, + root: int | str | Sequence[int] | None = 0, + labels: str | Sequence[str] | Mapping[str, str] | None = None, + single_component: bool = False, + solid_edges: str = "connectivities", + dashed_edges: str | None = None, + transitions: str | None = None, + fontsize: int | None = None, + fontweight: str = "bold", + fontoutline: int | None = None, + text_kwds: Mapping[str, Any] = MappingProxyType({}), + node_size_scale: float = 1.0, + node_size_power: float = 0.5, + edge_width_scale: float = 1.0, + min_edge_width: float | None = None, + max_edge_width: float | None = None, + arrowsize: int = 30, + title: str | None = None, + left_margin: float = 0.01, + random_state: int | None = 0, + pos: np.ndarray | str | Path | None = None, + normalize_to_color: bool = False, + cmap: str | Colormap = None, + cax: Axes | None = None, + cb_kwds: Mapping[str, Any] = MappingProxyType({}), + frameon: bool | None = None, + add_pos: bool = True, + export_to_gexf: bool = False, + use_raw: bool = True, + plot: bool = True, + show: bool | None = None, + save: bool | str | None = None, + ax: Axes | None = None, +) -> Axes | list[Axes] | None: # pragma: no cover + """Plot the PAGA graph through thresholding low-connectivity edges. + + Compute a coarse-grained layout of the data. Reuse this by passing + `init_pos='paga'` to :func:`~scanpy.tl.umap` or + :func:`~scanpy.tl.draw_graph` and obtain embeddings with more meaningful + global topology [Wolf19]_. + This uses ForceAtlas2 or igraph's layout algorithms for most layouts [Csardi06]_. + + Args: + adata: :class:`~anndata.AnnData` object object containing all observations. + threshold: Do not draw edges for weights below this threshold. Set to 0 if you want + all edges. Discarding low-connectivity edges helps in getting a much clearer picture of the graph. + color: Feature name or `obs` annotation defining the node colors. + Also plots the degree of the abstracted graph when + passing {`'degree_dashed'`, `'degree_solid'`}. + Can be also used to visualize pie chart at each node in the following form: + `{: {: , ...}, ...}`. If the fractions + do not sum to 1, a new category called `'rest'` colored grey will be created. + layout: The node labels. If `None`, this defaults to the group labels stored in + the categorical for which :func:`~scanpy.tl.paga` has been computed. + layout_kwds: Keywords for the layout. + init_pos: Two-column array storing the x and y coordinates for initializing the layout. + root: If choosing a tree layout, this is the index of the root node or a list + of root node indices. If this is a non-empty vector then the supplied + node IDs are used as the roots of the trees (or a single tree if the + graph is connected). If this is `None` or an empty list, the root + vertices are automatically calculated based on topological sorting. + labels: The node labels. If `None`, this defaults to the group labels stored in + the categorical for which :func:`~scanpy.tl.paga` has been computed. + single_component: Restrict to largest connected component. + solid_edges: Key for `.uns['paga']` that specifies the matrix that stores the edges to be drawn solid black. + dashed_edges: Key for `.uns['paga']` that specifies the matrix that stores the edges + to be drawn dashed grey. If `None`, no dashed edges are drawn. + transitions: Key for `.uns['paga']` that specifies the matrix that stores the + arrows, for instance `'transitions_confidence'`. + fontsize: Font size for node labels. + fontweight: Weight of the font. + fontoutline: Width of the white outline around fonts. + text_kwds: Keywords for :meth:`~matplotlib.axes.Axes.text`. + node_size_scale: Increase or decrease the size of the nodes. + node_size_power: The power with which groups sizes influence the radius of the nodes. + edge_width_scale: Edge with scale in units of `rcParams['lines.linewidth']`. + min_edge_width: Min width of solid edges. + max_edge_width: Max width of solid and dashed edges. + arrowsize: For directed graphs, choose the size of the arrow head head's length and width. + See :py:class: `matplotlib.patches.FancyArrowPatch` for attribute `mutation_scale` for more info. + title: Provide a title. + left_margin: Margin to the left of the plot. + random_state: For layouts with random initialization like `'fr'`, change this to use + different intial states for the optimization. If `None`, the initial state is not reproducible. + pos: Two-column array-like storing the x and y coordinates for drawing. + Otherwise, path to a `.gdf` file that has been exported from Gephi or + a similar graph visualization software. + normalize_to_color: Whether to normalize categorical plots to `color` or the underlying grouping. + cmap: The Matplotlib color map. + cax: A matplotlib axes object for a potential colorbar. + cb_kwds: Keyword arguments for :class:`~matplotlib.colorbar.ColorbarBase`, for instance, `ticks`. + frameon: Draw a frame around the PAGA graph. + add_pos: Add the positions to `adata.uns['paga']`. + export_to_gexf: Export to gexf format to be read by graph visualization programs such as Gephi. + use_raw: Whether to use `raw` attribute of `adata`. Defaults to `True` if `.raw` is present. + plot: If `False`, do not create the figure, simply compute the layout. + ax: Matplotlib Axis object. + show: Whether to show the plot. + save: Whether or where to save the plot. + + Returns: + A :class:`~matplotlib.axes.Axes` object, if `ax` is `None`, else `None`. + If `return_data`, return the timeseries data in addition to an axes. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.log_norm(adata, offset=1) + >>> ep.pp.neighbors(adata) + >>> ep.tl.leiden(adata, resolution=0.5, key_added="leiden_0_5") + >>> ep.tl.paga(adata, groups="leiden_0_5") + >>> ep.pl.paga( + ... adata, + ... color=["leiden_0_5", "day_28_flg"], + ... cmap=ep.pl.Colormaps.grey_red.value, + ... title=["Leiden 0.5", "Died in less than 28 days"], + ... ) + + Preview: + .. image:: /_static/docstring_previews/paga.png + """ + return sc.pl.paga( + adata=adata, + threshold=threshold, + color=color, + layout=layout, + layout_kwds=layout_kwds, + init_pos=init_pos, + root=root, + labels=labels, + single_component=single_component, + solid_edges=solid_edges, + dashed_edges=dashed_edges, + transitions=transitions, + fontsize=fontsize, + fontweight=fontweight, + fontoutline=fontoutline, + text_kwds=text_kwds, + node_size_scale=node_size_scale, + node_size_power=node_size_power, + edge_width_scale=edge_width_scale, + min_edge_width=min_edge_width, + max_edge_width=max_edge_width, + arrowsize=arrowsize, + title=title, + left_margin=left_margin, + random_state=random_state, + pos=pos, + normalize_to_color=normalize_to_color, + cmap=cmap, + cax=cax, + cb_kwds=cb_kwds, + frameon=frameon, + add_pos=add_pos, + export_to_gexf=export_to_gexf, + use_raw=use_raw, + plot=plot, + show=show, + save=save, + ax=ax, + ) + + +def paga_path( + adata: AnnData, + nodes: Sequence[str | int], + keys: Sequence[str], + use_raw: bool = True, + annotations: Sequence[str] = ("dpt_pseudotime",), + color_map: str | Colormap | None = None, + color_maps_annotations: Mapping[str, str | Colormap] = MappingProxyType({"dpt_pseudotime": "Greys"}), + palette_groups: Sequence[str] | None = None, + n_avg: int = 1, + groups_key: str | None = None, + xlim: tuple[int | None, int | None] = (None, None), + title: str | None = None, + left_margin=None, + ytick_fontsize: int | None = None, + title_fontsize: int | None = None, + show_node_names: bool = True, + show_yticks: bool = True, + show_colorbar: bool = True, + legend_fontsize: int | float | _FontSize | None = None, + legend_fontweight: int | _FontWeight | None = None, + normalize_to_zero_one: bool = False, + as_heatmap: bool = True, + return_data: bool = False, + show: bool | None = None, + save: bool | str | None = None, + ax: Axes | None = None, +) -> Axes | None: # pragma: no cover + """Feature changes along paths in the abstracted graph. + + Args: + adata: :class:`~anndata.AnnData` object object containing all observations. + nodes: A path through nodes of the abstracted graph, that is, names or indices + (within `.categories`) of groups that have been used to run PAGA. + keys: Either variables in `adata.var_names` or annotations in `adata.obs`. They are plotted using `color_map`. + use_raw: Use `adata.raw` for retrieving feature values if it has been set. + annotations: Plot these keys with `color_maps_annotations`. Need to be keys for `adata.obs`. + color_map: Matplotlib colormap. + color_maps_annotations: Color maps for plotting the annotations. Keys of the dictionary must appear in `annotations`. + palette_groups: Usually, use the same `sc.pl.palettes...` as used for coloring the abstracted graph. + n_avg: Number of data points to include in computation of running average. + groups_key: Key of the grouping used to run PAGA. If `None`, defaults to `adata.uns['paga']['groups']`. + xlim: Matplotlib x limit. + title: Plot title. + left_margin: Margin to the left of the plot. + ytick_fontsize: Matplotlib ytick fontsize. + title_fontsize: Font size of the title. + show_node_names: Whether to plot the node names on the nodes bar. + show_yticks: Whether to show the y axis ticks. + show_colorbar: Whether to show the color bar. + legend_fontsize: Font size of the legend. + legend_fontweight: Font weight of the legend. + normalize_to_zero_one: Shift and scale the running average to [0, 1] per feature. + as_heatmap: Whether to display the plot as heatmap. + return_data: Whether to return the timeseries data in addition to the axes if `True`. + ax: Matplotlib Axis object. + show: Whether to show the plot. + save: Whether or where to save the plot. + + Returns: + A :class:`~matplotlib.axes.Axes` object, if `ax` is `None`, else `None`. + If `return_data`, return the timeseries data in addition to an axes. + """ + return sc.pl.paga_path( + adata=adata, + nodes=nodes, + keys=keys, + use_raw=use_raw, + annotations=annotations, + color_map=color_map, + color_maps_annotations=color_maps_annotations, + palette_groups=palette_groups, + n_avg=n_avg, + groups_key=groups_key, + xlim=xlim, + title=title, + left_margin=left_margin, + ytick_fontsize=ytick_fontsize, + title_fontsize=title_fontsize, + show_node_names=show_node_names, + show_yticks=show_yticks, + show_colorbar=show_colorbar, + legend_fontsize=legend_fontsize, + legend_fontweight=legend_fontweight, + normalize_to_zero_one=normalize_to_zero_one, + as_heatmap=as_heatmap, + return_data=return_data, + show=show, + save=save, + ax=ax, + ) + + +def paga_compare( + adata: AnnData, + basis=None, + edges=False, + color=None, + alpha=None, + groups=None, + components=None, + projection: Literal["2d", "3d"] = "2d", + legend_loc="on data", + legend_fontsize: int | float | _FontSize | None = None, + legend_fontweight: int | _FontWeight = "bold", + legend_fontoutline=None, + color_map=None, + palette=None, + frameon=False, + size=None, + title=None, + right_margin=None, + left_margin=0.05, + show=None, + save=None, + title_graph=None, + groups_graph=None, + *, + pos=None, + **paga_graph_params, +) -> Axes | None: # pragma: no cover + """Scatter and PAGA graph side-by-side. + + Consists in a scatter plot and the abstracted graph. See :func:`~ehrapy.pl.paga` for all related parameters. + + Args: + adata: :class:`~anndata.AnnData` object object containing all observations. + basis: String that denotes a plotting tool that computed coordinates. + edges: Whether to display edges. + color: Keys for annotations of observations/patients or features, or a hex color specification, e.g., + `'ann1'`, `'#fe57a1'`, or `['ann1', 'ann2']`. + alpha: Alpha value for the image + groups: Key of the grouping used to run PAGA. If `None`, defaults to `adata.uns['paga']['groups']`. + components: For example, ``'1,2,3'`` means ``[1, 2, 3]``, first, second, third principal component. + projection: One of '2d' or '3d' + legend_loc: Location of the legend. + legend_fontsize: Font size of the legend. + legend_fontweight: Font weight of the legend. + legend_fontoutline: Font outline of the legend. + color_map: Matplotlib color map. + palette: Matplotlib color palette. + frameon: Whether to display the labels frameon. + size: Size of the plot. + title: Title of the plot. + right_margin: Margin to the right of the plot. + left_margin: Margin to the left of the plot. + show: Whether to show the plot. + save: Whether or where to save the plot. + title_graph: The title of the graph. + groups_graph: Graph labels. + pos: Position of the plot. + **paga_graph_params: Keywords for :func:`~ehrapy.pl.paga` and keywords for :func:`~ehrapy.pl.scatter`. + + Returns: + Matplotlib axes. + """ + return sc.pl.paga_compare( + adata=adata, + basis=basis, + edges=edges, + color=color, + alpha=alpha, + groups=groups, + components=components, + projection=projection, + legend_loc=legend_loc, + legend_fontsize=legend_fontsize, + legend_fontweight=legend_fontweight, + legend_fontoutline=legend_fontoutline, + color_map=color_map, + palette=palette, + frameon=frameon, + size=size, + title=title, + right_margin=right_margin, + left_margin=left_margin, + show=show, + save=save, + title_graph=title_graph, + groups_graph=groups_graph, + pos=pos, + **paga_graph_params, + ) + + +@_doc_params(show_save_ax=doc_show_save_ax) +def rank_features_groups( + adata: AnnData, + groups: str | Sequence[str] = None, + n_features: int = 20, + feature_symbols: str | None = None, + key: str | None = "rank_features_groups", + fontsize: int = 8, + ncols: int = 4, + share_y: bool = True, + show: bool | None = None, + save: bool | None = None, + ax: Axes | None = None, + **kwds, +): # pragma: no cover + """Plot ranking of features. + + Args: + adata: Annotated data matrix. + groups: The groups for which to show the feature ranking. + n_features: The number of features to plot. + feature_symbols: Key for field in `.var` that stores feature symbols if you do not want to use `.var_names`. + key: The key of the calculated feature group rankings (default: 'rank_features_groups'). + fontsize: Fontsize for feature names. + ncols: Number of panels shown per row. + share_y: Controls if the y-axis of each panels should be shared. + But passing `sharey=False`, each panel has its own y-axis range. + {show_save_ax} + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.log_norm(adata, offset=1) + >>> ep.pp.neighbors(adata) + >>> ep.tl.leiden(adata, resolution=0.15, key_added="leiden_0_5") + >>> ep.tl.rank_features_groups(adata, groupby="leiden_0_5") + >>> ep.pl.rank_features_groups(adata, key="rank_features_groups") + + Preview: + .. image:: /_static/docstring_previews/rank_features_groups.png + """ + return sc.pl.rank_genes_groups( + adata=adata, + groups=groups, + n_genes=n_features, + gene_symbols=feature_symbols, + key=key, + fontsize=fontsize, + ncols=ncols, + sharey=share_y, + show=show, + save=save, + ax=ax, + **kwds, + ) + + +@_doc_params(show_save_ax=doc_show_save_ax) +def rank_features_groups_violin( + adata: AnnData, + groups: Sequence[str] | None = None, + n_features: int = 20, + feature_names: Iterable[str] | None = None, + feature_symbols: str | None = None, + key: str | None = None, + split: bool = True, + scale: str = "width", + strip: bool = True, + jitter: int | float | bool = True, + size: int = 1, + ax: Axes | None = None, + show: bool | None = None, + save: bool | None = None, +): # pragma: no cover + """Plot ranking of features for all tested comparisons as violin plots. + + Args: + adata: Annotated data matrix. + groups: List of group names. + n_features: Number of features to show. Is ignored if `feature_names` is passed. + feature_names: List of features to plot. Is only useful if interested in a custom feature list, + which is not the result of :func:`ehrapy.tl.rank_features_groups`. + feature_symbols: Key for field in `.var` that stores feature symbols if you do not want to + use `.var_names` displayed in the plot. + key: The key of the calculated feature group rankings (default: 'rank_features_groups'). + split: Whether to split the violins or not. + scale: See :func:`~seaborn.violinplot`. + strip: Show a strip plot on top of the violin plot. + jitter: If set to 0, no points are drawn. See :func:`~seaborn.stripplot`. + size: Size of the jitter points. + {show_save_ax} + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.log_norm(adata, offset=1) + >>> ep.pp.neighbors(adata) + >>> ep.tl.leiden(adata, resolution=0.15, key_added="leiden_0_5") + >>> ep.tl.rank_features_groups(adata, groupby="leiden_0_5") + >>> ep.pl.rank_features_groups_violin(adata, key="rank_features_groups", n_features=5) + + Preview: + .. image:: /_static/docstring_previews/rank_features_groups_violin_1.png + + .. image:: /_static/docstring_previews/rank_features_groups_violin_2.png + + .. image:: /_static/docstring_previews/rank_features_groups_violin_3.png + + .. image:: /_static/docstring_previews/rank_features_groups_violin_4.png + """ + return sc.pl.rank_genes_groups_violin( + adata=adata, + groups=groups, + n_genes=n_features, + gene_names=feature_names, + gene_symbols=feature_symbols, + use_raw=False, + key=key, + split=split, + scale=scale, + strip=strip, + jitter=jitter, + size=size, + ax=ax, + show=show, + save=save, + ) + + +@_doc_params(show_save_ax=doc_show_save_ax) +def rank_features_groups_stacked_violin( + adata: AnnData, + groups: str | Sequence[str] = None, + n_features: int | None = None, + groupby: str | None = None, + feature_symbols: str | None = None, + *, + var_names: Sequence[str] | Mapping[str, Sequence[str]] | None = None, + min_logfoldchange: float | None = None, + key: str | None = None, + show: bool | None = None, + save: bool | None = None, + return_fig: bool | None = False, + **kwds, +): # pragma: no cover + """Plot ranking of genes using stacked_violin plot. + + Args: + adata: Annotated data matrix. + groups: List of group names. + n_features: Number of features to show. Is ignored if `feature_names` is passed. + groupby: Which key to group the features by. + feature_symbols: Key for field in `.var` that stores feature symbols if you do not want to + use `.var_names` displayed in the plot. + var_names: Feature names. + min_logfoldchange: Minimum log fold change to consider. + key: The key of the calculated feature group rankings (default: 'rank_features_groups'). + show: Whether to show the plot. + save: Where to save the plot. + return_fig: Returns :class:`StackedViolin` object. Useful for fine-tuning the plot. + Takes precedence over `show=False`. + + Returns: + If `return_fig` is `True`, returns a :class:`StackedViolin` object, + else if `show` is false, return axes dict + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.log_norm(adata, offset=1) + >>> ep.pp.neighbors(adata) + >>> ep.tl.leiden(adata, resolution=0.15, key_added="leiden_0_5") + >>> ep.tl.rank_features_groups(adata, groupby="leiden_0_5") + >>> ep.pl.rank_features_groups_stacked_violin(adata, key="rank_features_groups", n_features=5) + + Preview: + .. image:: /_static/docstring_previews/rank_features_groups_stacked_violin.png + """ + return sc.pl.rank_genes_groups_stacked_violin( + adata=adata, + groups=groups, + n_genes=n_features, + groupby=groupby, + gene_symbols=feature_symbols, + var_names=var_names, + min_logfoldchange=min_logfoldchange, + key=key, + show=show, + save=save, + return_fig=return_fig, + **kwds, + ) + + +def rank_features_groups_heatmap( + adata: AnnData, + groups: str | Sequence[str] = None, + n_features: int | None = None, + groupby: str | None = None, + feature_symbols: str | None = None, + var_names: Sequence[str] | Mapping[str, Sequence[str]] | None = None, + min_logfoldchange: float | None = None, + key: str = None, + show: bool | None = None, + save: bool | None = None, + **kwds, +): # pragma: no cover + """Plot ranking of genes using heatmap plot (see :func:`~ehrapy.pl.heatmap`) + + Args: + adata: Annotated data matrix. + groups: List of group names. + n_features: Number of features to show. Is ignored if `feature_names` is passed. + groupby: Which key to group the features by. + feature_symbols: Key for field in `.var` that stores feature symbols if you do not want to + use `.var_names` displayed in the plot. + var_names: Feature names. + min_logfoldchange: Minimum log fold change to consider. + key: The key of the calculated feature group rankings (default: 'rank_features_groups'). + show: Whether to show the plot. + save: Where to save the plot. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.log_norm(adata, offset=1) + >>> ep.pp.neighbors(adata) + >>> ep.tl.leiden(adata, resolution=0.15, key_added="leiden_0_5") + >>> ep.tl.rank_features_groups(adata, groupby="leiden_0_5") + >>> ep.pl.rank_features_groups_heatmap(adata, key="rank_features_groups") + + Preview: + .. image:: /_static/docstring_previews/rank_features_groups_heatmap.png + """ + return sc.pl.rank_genes_groups_heatmap( + adata=adata, + groups=groups, + n_genes=n_features, + groupby=groupby, + gene_symbols=feature_symbols, + var_names=var_names, + min_logfoldchange=min_logfoldchange, + key=key, + show=show, + save=save, + **kwds, + ) + + +def rank_features_groups_dotplot( + adata: AnnData, + groups: str | Sequence[str] = None, + n_features: int | None = None, + groupby: str | None = None, + values_to_plot: None + | ( + Literal[ + "scores", + "logfoldchanges", + "pvals", + "pvals_adj", + "log10_pvals", + "log10_pvals_adj", + ] + ) = None, + var_names: Sequence[str] | Mapping[str, Sequence[str]] | None = None, + feature_symbols: str | None = None, + min_logfoldchange: float | None = None, + key: str | None = None, + show: bool | None = None, + save: bool | None = None, + return_fig: bool | None = False, + **kwds, +): # pragma: no cover + """Plot ranking of genes using dotplot plot (see :func:`~ehrapy.pl.dotplot`) + + Args: + adata: Annotated data matrix. + groups: List of group names. + n_features: Number of features to show. Is ignored if `feature_names` is passed. + groupby: Which key to group the features by. + feature_symbols: Key for field in `.var` that stores feature symbols if you do not want to + use `.var_names` displayed in the plot. + values_to_plot: Key to plot. One of 'scores', 'logfoldchanges', 'pvals', 'pvals_adj', + 'log10_pvals', 'log10_pvals_adj'. + var_names: Feature names. + min_logfoldchange: Minimum log fold change to consider. + key: The key of the calculated feature group rankings (default: 'rank_features_groups'). + show: Whether to show the plot. + save: Where to save the plot. + return_fig: Returns :class:`StackedViolin` object. Useful for fine-tuning the plot. + Takes precedence over `show=False`. + + Returns: + If `return_fig` is `True`, returns a :class:`StackedViolin` object, + else if `show` is false, return axes dict + + Example: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.neighbors(adata) + >>> ep.tl.leiden(adata, resolution=0.5, key_added="leiden_0_5") + >>> ep.tl.rank_features_groups(adata, groupby="leiden_0_5") + >>> ep.pl.rank_features_groups_dotplot(adata, key="rank_features_groups", groupby="leiden_0_5") + + Preview: + .. image:: /_static/docstring_previews/rank_features_groups_dotplot.png + """ + return sc.pl.rank_genes_groups_dotplot( + adata=adata, + groups=groups, + n_genes=n_features, + groupby=groupby, + values_to_plot=values_to_plot, + var_names=var_names, + gene_symbols=feature_symbols, + min_logfoldchange=min_logfoldchange, + key=key, + show=show, + save=save, + return_fig=return_fig, + colorbar_title="Mean value in group", + **kwds, + ) + + +def rank_features_groups_matrixplot( + adata: AnnData, + groups: str | Sequence[str] = None, + n_features: int | None = None, + groupby: str | None = None, + values_to_plot: None + | ( + Literal[ + "scores", + "logfoldchanges", + "pvals", + "pvals_adj", + "log10_pvals", + "log10_pvals_adj", + ] + ) = None, + var_names: Sequence[str] | Mapping[str, Sequence[str]] | None = None, + feature_symbols: str | None = None, + min_logfoldchange: float | None = None, + key: str | None = "rank_features_groups", + show: bool | None = None, + save: bool | None = None, + return_fig: bool | None = False, + **kwds, +): # pragma: no cover + """Plot ranking of genes using matrixplot plot (see :func:`~ehrapy.pl.matrixplot`) + + Args: + adata: Annotated data matrix. + groups: List of group names. + n_features: Number of features to show. Is ignored if `feature_names` is passed. + groupby: Which key to group the features by. + feature_symbols: Key for field in `.var` that stores feature symbols if you do not want to + use `.var_names` displayed in the plot. + values_to_plot: Key to plot. One of 'scores', 'logfoldchanges', 'pvals', 'pvalds_adj', + 'log10_pvals', 'log10_pvalds_adj'. + var_names: Feature names. + min_logfoldchange: Minimum log fold change to consider. + key: The key of the calculated feature group rankings (default: 'rank_features_groups'). + show: Whether to show the plot. + save: Where to save the plot. + return_fig: Returns :class:`StackedViolin` object. Useful for fine-tuning the plot. + Takes precedence over `show=False`. + + Returns: + If `return_fig` is `True`, returns a :class:`MatrixPlot` object, + else if `show` is false, return axes dict + + Example: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.neighbors(adata) + >>> ep.tl.leiden(adata, resolution=0.5, key_added="leiden_0_5") + >>> ep.tl.rank_features_groups(adata, groupby="leiden_0_5") + >>> ep.pl.rank_features_groups_matrixplot(adata, key="rank_features_groups", groupby="leiden_0_5") + + Preview: + .. image:: /_static/docstring_previews/rank_features_groups_matrixplot.png + + """ + return sc.pl.rank_genes_groups_matrixplot( + adata=adata, + groups=groups, + n_genes=n_features, + groupby=groupby, + values_to_plot=values_to_plot, + var_names=var_names, + gene_symbols=feature_symbols, + min_logfoldchange=min_logfoldchange, + key=key, + show=show, + save=save, + return_fig=return_fig, + **kwds, + ) + + +def rank_features_groups_tracksplot( + adata: AnnData, + groups: str | Sequence[str] = None, + n_features: int | None = None, + groupby: str | None = None, + var_names: Sequence[str] | Mapping[str, Sequence[str]] | None = None, + feature_symbols: str | None = None, + min_logfoldchange: float | None = None, + key: str | None = None, + show: bool | None = None, + save: bool | None = None, + **kwds, +): # pragma: no cover + """Plot ranking of genes using tracksplot plot (see :func:`~ehrapy.pl.tracksplot`) + + Args: + adata: Annotated data matrix. + groups: List of group names. + n_features: Number of features to show. Is ignored if `feature_names` is passed. + groupby: Which key to group the features by. + feature_symbols: Key for field in `.var` that stores feature symbols if you do not want to + use `.var_names` displayed in the plot. + var_names: Feature names. + min_logfoldchange: Minimum log fold change to consider. + key: The key of the calculated feature group rankings (default: 'rank_features_groups'). + show: Whether to show the plot. + save: Where to save the plot. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.knn_impute(adata) + >>> ep.pp.log_norm(adata, offset=1) + >>> ep.pp.neighbors(adata) + >>> ep.tl.leiden(adata, resolution=0.15, key_added="leiden_0_5") + >>> ep.tl.rank_features_groups(adata, groupby="leiden_0_5") + >>> ep.pl.rank_features_groups_tracksplot(adata, key="rank_features_groups") + + Preview: + .. image:: /_static/docstring_previews/rank_features_groups_tracksplot.png + """ + return sc.pl.rank_genes_groups_tracksplot( + adata=adata, + groups=groups, + n_genes=n_features, + groupby=groupby, + var_names=var_names, + feature_symbols=feature_symbols, + min_logfoldchange=min_logfoldchange, + key=key, + show=show, + save=save, + **kwds, + ) diff --git a/data/ehrapy/plot/_survival_analysis.py b/data/ehrapy/plot/_survival_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..717f9202709e1e34ca89074f46bc639a19374541 --- /dev/null +++ b/data/ehrapy/plot/_survival_analysis.py @@ -0,0 +1,297 @@ +from __future__ import annotations + +import warnings +from typing import TYPE_CHECKING + +import matplotlib.pyplot as plt +import numpy as np +from numpy import ndarray + +from ehrapy.plot import scatter + +if TYPE_CHECKING: + from collections.abc import Sequence + from xmlrpc.client import Boolean + + from anndata import AnnData + from lifelines import KaplanMeierFitter + from matplotlib.axes import Axes + from statsmodels.regression.linear_model import RegressionResults + + +def ols( + adata: AnnData | None = None, + x: str | None = None, + y: str | None = None, + scatter_plot: Boolean | None = True, + ols_results: list[RegressionResults] | None = None, + ols_color: list[str] | None | None = None, + xlabel: str | None = None, + ylabel: str | None = None, + figsize: tuple[float, float] | None = None, + lines: list[tuple[ndarray | float, ndarray | float]] | None = None, + lines_color: list[str] | None | None = None, + lines_style: list[str] | None | None = None, + lines_label: list[str] | None | None = None, + xlim: tuple[float, float] | None = None, + ylim: tuple[float, float] | None = None, + show: bool | None = None, + ax: Axes | None = None, + title: str | None = None, + **kwds, +) -> Axes | None: + """Plots an Ordinary Least Squares (OLS) Model result, scatter plot, and line plot. + + Args: + adata: :class:`~anndata.AnnData` object containing all observations. + x: x coordinate, for scatter plotting. + y: y coordinate, for scatter plotting. + scatter_plot: Whether to show a scatter plot. + ols_results: List of RegressionResults from ehrapy.tl.ols. Example: [result_1, result_2] + ols_color: List of colors for each ols_results. Example: ['red', 'blue']. + xlabel: The x-axis label text. + ylabel: The y-axis label text. + figsize: Width, height in inches. + lines: List of Tuples of (slope, intercept) or (x, y). Plot lines by slope and intercept or data points. + Example: plot two lines (y = x + 2 and y = 2*x + 1): [(1, 2), (2, 1)] + lines_color: List of colors for each line. Example: ['red', 'blue'] + lines_style: List of line styles for each line. Example: ['-', '--'] + lines_label: List of line labels for each line. Example: ['Line1', 'Line2'] + xlim: Set the x-axis view limits. Required for only plotting lines using slope and intercept. + ylim: Set the y-axis view limits. Required for only plotting lines using slope and intercept. + show: Show the plot, do not return axis. + ax: A matplotlib axes object. Only works if plotting a single component. + title: Set the title of the plot. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=False) + >>> co2_lm_result = ep.tl.ols( + ... adata, var_names=["pco2_first", "tco2_first"], formula="tco2_first ~ pco2_first", missing="drop" + ... ).fit() + >>> ep.pl.ols( + ... adata, + ... x="pco2_first", + ... y="tco2_first", + ... ols_results=[co2_lm_result], + ... ols_color=["red"], + ... xlabel="PCO2", + ... ylabel="TCO2", + ... ) + + .. image:: /_static/docstring_previews/ols_plot_1.png + + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=False) + >>> ep.pl.ols(adata, x='pco2_first', y='tco2_first', lines=[(0.25, 10), (0.3, 20)], + >>> lines_color=['red', 'blue'], lines_style=['-', ':'], lines_label=['Line1', 'Line2']) + + .. image:: /_static/docstring_previews/ols_plot_2.png + + >>> import ehrapy as ep + >>> ep.pl.ols(lines=[(0.25, 10), (0.3, 20)], lines_color=['red', 'blue'], lines_style=['-', ':'], + >>> lines_label=['Line1', 'Line2'], xlim=(0, 150), ylim=(0, 50)) + + .. image:: /_static/docstring_previews/ols_plot_3.png + """ + if ax is None: + _, ax = plt.subplots(figsize=figsize) + if xlim is not None: + plt.xlim(xlim) + if ylim is not None: + plt.ylim(ylim) + if ols_color is None and ols_results is not None: + ols_color = [None] * len(ols_results) + if lines_color is None and lines is not None: + lines_color = [None] * len(lines) + if lines_style is None and lines is not None: + lines_style = [None] * len(lines) + if lines_label is None and lines is not None: + lines_label = [None] * len(lines) + if adata is not None and x is not None and y is not None: + x_processed = np.array(adata[:, x].X).astype(float) + x_processed = x_processed[~np.isnan(x_processed)] + if scatter_plot is True: + ax = scatter(adata, x=x, y=y, show=False, ax=ax, **kwds) + if ols_results is not None: + for i, ols_result in enumerate(ols_results): + ax.plot(x_processed, ols_result.predict(), color=ols_color[i]) + + if lines is not None: + for i, line in enumerate(lines): + a, b = line + if np.ndim(a) == 0 and np.ndim(b) == 0: + line_x = np.array(ax.get_xlim()) + line_y = a * line_x + b + ax.plot(line_x, line_y, linestyle=lines_style[i], color=lines_color[i], label=lines_label[i]) + else: + ax.plot(a, b, lines_style[i], color=lines_color[i], label=lines_label[i]) + plt.xlabel(xlabel) + plt.ylabel(ylabel) + if title: + plt.title(title) + if lines_label is not None and lines_label[0] is not None: + plt.legend() + + if not show: + return ax + else: + return None + + +def kmf( + kmfs: Sequence[KaplanMeierFitter], + ci_alpha: list[float] | None = None, + ci_force_lines: list[Boolean] | None = None, + ci_show: list[Boolean] | None = None, + ci_legend: list[Boolean] | None = None, + at_risk_counts: list[Boolean] | None = None, + color: list[str] | None | None = None, + grid: Boolean | None = False, + xlim: tuple[float, float] | None = None, + ylim: tuple[float, float] | None = None, + xlabel: str | None = None, + ylabel: str | None = None, + figsize: tuple[float, float] | None = None, + show: bool | None = None, + title: str | None = None, +) -> Axes | None: + warnings.warn( + "This function is deprecated and will be removed in the next release. Use `ep.pl.kaplan_meier` instead.", + DeprecationWarning, + stacklevel=2, + ) + return kaplan_meier( + kmfs=kmfs, + ci_alpha=ci_alpha, + ci_force_lines=ci_force_lines, + ci_show=ci_show, + ci_legend=ci_legend, + at_risk_counts=at_risk_counts, + color=color, + grid=grid, + xlim=xlim, + ylim=ylim, + xlabel=xlabel, + ylabel=ylabel, + figsize=figsize, + show=show, + title=title, + ) + + +def kaplan_meier( + kmfs: Sequence[KaplanMeierFitter], + ci_alpha: list[float] | None = None, + ci_force_lines: list[Boolean] | None = None, + ci_show: list[Boolean] | None = None, + ci_legend: list[Boolean] | None = None, + at_risk_counts: list[Boolean] | None = None, + color: list[str] | None | None = None, + grid: Boolean | None = False, + xlim: tuple[float, float] | None = None, + ylim: tuple[float, float] | None = None, + xlabel: str | None = None, + ylabel: str | None = None, + figsize: tuple[float, float] | None = None, + show: bool | None = None, + title: str | None = None, +) -> Axes | None: + """Plots a pretty figure of the Fitted KaplanMeierFitter model + + See https://lifelines.readthedocs.io/en/latest/fitters/univariate/KaplanMeierFitter.html + + Args: + kmfs: Iterables of fitted KaplanMeierFitter objects. + ci_alpha: The transparency level of the confidence interval. If more than one kmfs, this should be a list. + ci_force_lines: Force the confidence intervals to be line plots (versus default shaded areas). + If more than one kmfs, this should be a list. + ci_show: Show confidence intervals. If more than one kmfs, this should be a list. + ci_legend: If ci_force_lines is True, this is a boolean flag to add the lines' labels to the legend. + If more than one kmfs, this should be a list. + at_risk_counts: Show group sizes at time points. If more than one kmfs, this should be a list. + color: List of colors for each kmf. If more than one kmfs, this should be a list. + grid: If True, plot grid lines. + xlim: Set the x-axis view limits. + ylim: Set the y-axis view limits. + xlabel: The x-axis label text. + ylabel: The y-axis label text. + figsize: Width, height in inches. + show: Show the plot, do not return axis. + title: Set the title of the plot. + + Examples: + >>> import ehrapy as ep + >>> import numpy as np + >>> adata = ep.dt.mimic_2(encoded=False) + + # Because in MIMIC-II database, `censor_fl` is censored or death (binary: 0 = death, 1 = censored). + # While in KaplanMeierFitter, `event_observed` is True if the the death was observed, False if the event was lost (right-censored). + # So we need to flip `censor_fl` when pass `censor_fl` to KaplanMeierFitter + + >>> adata[:, ["censor_flg"]].X = np.where(adata[:, ["censor_flg"]].X == 0, 1, 0) + >>> kmf = ep.tl.kaplan_meier(adata, "mort_day_censored", "censor_flg") + >>> ep.pl.kaplan_meier( + ... [kmf], color=["r"], xlim=[0, 700], ylim=[0, 1], xlabel="Days", ylabel="Proportion Survived", show=True + ... ) + + .. image:: /_static/docstring_previews/kmf_plot_1.png + + >>> groups = adata[:, ["service_unit"]].X + >>> adata_ficu = adata[groups == "FICU"] + >>> adata_micu = adata[groups == "MICU"] + >>> adata_sicu = adata[groups == "SICU"] + >>> kmf_1 = ep.tl.kaplan_meier(adata_ficu, "mort_day_censored", "censor_flg", label="FICU") + >>> kmf_2 = ep.tl.kaplan_meier(adata_micu, "mort_day_censored", "censor_flg", label="MICU") + >>> kmf_3 = ep.tl.kaplan_meier(adata_sicu, "mort_day_censored", "censor_flg", label="SICU") + >>> ep.pl.kaplan_meier([kmf_1, kmf_2, kmf_3], ci_show=[False,False,False], color=['k','r', 'g'], + >>> xlim=[0, 750], ylim=[0, 1], xlabel="Days", ylabel="Proportion Survived") + + .. image:: /_static/docstring_previews/kmf_plot_2.png + """ + if ci_alpha is None: + ci_alpha = [0.3] * len(kmfs) + if ci_force_lines is None: + ci_force_lines = [False] * len(kmfs) + if ci_show is None: + ci_show = [True] * len(kmfs) + if ci_legend is None: + ci_legend = [False] * len(kmfs) + if at_risk_counts is None: + at_risk_counts = [False] * len(kmfs) + if color is None: + color = [None] * len(kmfs) + plt.figure(figsize=figsize) + + for i, kmf in enumerate(kmfs): + if i == 0: + ax = kmf.plot_survival_function( + ci_alpha=ci_alpha[i], + ci_force_lines=ci_force_lines[i], + ci_show=ci_show[i], + ci_legend=ci_legend[i], + at_risk_counts=at_risk_counts[i], + color=color[i], + ) + else: + ax = kmf.plot_survival_function( + ax=ax, + ci_alpha=ci_alpha[i], + ci_force_lines=ci_force_lines[i], + ci_show=ci_show[i], + ci_legend=ci_legend[i], + at_risk_counts=at_risk_counts[i], + color=color[i], + ) + ax.grid(grid) + plt.xlim(xlim) + plt.ylim(ylim) + plt.xlabel(xlabel) + plt.ylabel(ylabel) + if title: + plt.title(title) + + if not show: + return ax + else: + return None diff --git a/data/ehrapy/plot/causal_inference/__init__.py b/data/ehrapy/plot/causal_inference/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/data/ehrapy/plot/causal_inference/_dowhy.py b/data/ehrapy/plot/causal_inference/_dowhy.py new file mode 100644 index 0000000000000000000000000000000000000000..99fe04fb8cca26551c28af438573856c7a3c018c --- /dev/null +++ b/data/ehrapy/plot/causal_inference/_dowhy.py @@ -0,0 +1,59 @@ +import dowhy +import matplotlib +import matplotlib.pyplot as plt +import numpy as np + + +def causal_effect(estimate: dowhy.causal_estimator.CausalEstimate, precision: int = 3) -> matplotlib.axes: + """Plot the causal effect estimate. + + This function plots the causal effect of treatment on outcome, assuming a + linear relationship between the two. It uses the data, treatment name, + outcome name, and estimate object to determine the data to plot. It then + creates a plot with the treatment on the x-axis and the outcome on the + y-axis. The observed data is plotted as gray dots, and the causal variation + is plotted as a black line. The function then returns the plot. + + Args: + estimate: The causal effect estimate to plot. + precision: The number of decimal places to round the estimate to in the plot title. + + Returns: + matplotlib.axes.Axes: The matplotlib Axes object containing the plot. + + Raises: + TypeError: If the `estimate` parameter is not an instance of `dowhy.causal_estimator.CausalEstimate`. + ValueError: If the estimation method in `estimate` is not supported for this plot type. + """ + if not isinstance(estimate, dowhy.causal_estimator.CausalEstimate): + raise TypeError("Parameter 'estimate' must be a dowhy.causal_estimator.CausalEstimate object") + + if "LinearRegressionEstimator" not in str(estimate.params["estimator_class"]): + raise ValueError(f"Estimation method {estimate.params['estimator_class']} is not supported for this plot type.") + + treatment_name = estimate.estimator._target_estimand.treatment_variable[0] + outcome_name = estimate.estimator._target_estimand.outcome_variable[0] + data = estimate._data + treatment = data[treatment_name].values + outcome = data[outcome_name] + + _, ax = plt.subplots() + x_min = 0 + x_max = max(treatment) + if isinstance(x_max, np.ndarray) and len(x_max) == 1: + x_max = x_max[0] + y_min = estimate.params["intercept"] + y_max = y_min + estimate.value * (x_max - x_min) + if isinstance(y_max, np.ndarray) and len(y_max) == 1: + y_max = y_max[0] + ax.scatter(treatment, outcome, c="gray", marker="o", label="Observed data") + ax.plot([x_min, x_max], [y_min, y_max], c="black", ls="solid", lw=4, label="Causal variation") + ax.set_ylim(0, max(outcome)) + ax.set_xlim(0, x_max) + ax.set_title(r"DoWhy estimate $\rho$ (slope) = " + str(round(estimate.value, precision))) + ax.legend(loc="upper left") + ax.set_xlabel(treatment_name) + ax.set_ylabel(outcome_name) + plt.tight_layout() + + return ax diff --git a/data/ehrapy/plot/feature_ranking/__init__.py b/data/ehrapy/plot/feature_ranking/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/data/ehrapy/plot/feature_ranking/_feature_importances.py b/data/ehrapy/plot/feature_ranking/_feature_importances.py new file mode 100644 index 0000000000000000000000000000000000000000..cd2d5097e4ee41f45c15556561fed92dedc1dd3b --- /dev/null +++ b/data/ehrapy/plot/feature_ranking/_feature_importances.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import matplotlib.pyplot as plt +import pandas as pd +import seaborn as sns + +if TYPE_CHECKING: + from anndata import AnnData + from matplotlib.axes import Axes + + +def rank_features_supervised( + adata: AnnData, + key: str = "feature_importances", + n_features: int = 10, + ax: Axes | None = None, + show: bool = True, + save: str | None = None, + **kwargs, +) -> Axes | None: + """Plot features with greatest absolute importances as a barplot. + + Args: + adata: :class:`~anndata.AnnData` object storing the data. A key in adata.var should contain the feature + importances, calculated beforehand. + key: The key in adata.var to use for feature importances. + n_features: The number of features to plot. + ax: A matplotlib axes object to plot on. If `None`, a new figure will be created. + show: If `True`, show the figure. If `False`, return the axes object. + save: Path to save the figure. If `None`, the figure will not be saved. + **kwargs: Additional arguments passed to `seaborn.barplot`. + + Returns: + If `show == False` a `matplotlib.axes.Axes` object, else `None`. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=False) + >>> ep.pp.knn_impute(adata, n_neighbors=5) + >>> input_features = [ + ... feat for feat in adata.var_names if feat not in {"service_unit", "day_icu_intime", "tco2_first"} + ... ] + >>> ep.tl.rank_features_supervised(adata, "tco2_first", "rf", input_features=input_features) + >>> ep.pl.rank_features_supervised(adata) + + .. image:: /_static/docstring_previews/feature_importances.png + """ + if key not in adata.var.keys(): + raise ValueError( + f"Key {key} not found in adata.var. Make sure to calculate feature importances first with ep.tl.feature_importances." + ) + + df = pd.DataFrame({"importance": adata.var[key]}, index=adata.var_names) + df["absolute_importance"] = df["importance"].abs() + df = df.sort_values("absolute_importance", ascending=False) + + if ax is None: + fig, ax = plt.subplots() + ax = sns.barplot(x=df["importance"][:n_features], y=df.index[:n_features], orient="h", ax=ax, **kwargs) + plt.ylabel("Feature") + plt.xlabel("Importance") + plt.tight_layout() + + if save: + plt.savefig(save, bbox_inches="tight") + if show: + plt.show() + return None + else: + return ax diff --git a/data/ehrapy/preprocessing/__init__.py b/data/ehrapy/preprocessing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2d9b3e6ca8b964fbc4cbe6502df31a43960adda7 --- /dev/null +++ b/data/ehrapy/preprocessing/__init__.py @@ -0,0 +1,51 @@ +from ehrapy.preprocessing._balanced_sampling import balanced_sample +from ehrapy.preprocessing._bias import detect_bias +from ehrapy.preprocessing._encoding import encode +from ehrapy.preprocessing._highly_variable_features import highly_variable_features +from ehrapy.preprocessing._imputation import ( + explicit_impute, + knn_impute, + mice_forest_impute, + miss_forest_impute, + simple_impute, +) +from ehrapy.preprocessing._normalization import ( + log_norm, + maxabs_norm, + minmax_norm, + offset_negative_values, + power_norm, + quantile_norm, + robust_scale_norm, + scale_norm, +) +from ehrapy.preprocessing._outliers import clip_quantile, winsorize +from ehrapy.preprocessing._quality_control import mcar_test, qc_lab_measurements, qc_metrics +from ehrapy.preprocessing._scanpy_pp_api import * # noqa: F403 +from ehrapy.preprocessing._summarize_measurements import summarize_measurements + +__all__ = [ + "balanced_sample", + "detect_bias", + "encode", + "highly_variable_features", + "explicit_impute", + "knn_impute", + "mice_forest_impute", + "miss_forest_impute", + "simple_impute", + "log_norm", + "maxabs_norm", + "minmax_norm", + "offset_negative_values", + "power_norm", + "quantile_norm", + "robust_scale_norm", + "scale_norm", + "clip_quantile", + "winsorize", + "mcar_test", + "qc_lab_measurements", + "qc_metrics", + "summarize_measurements", +] diff --git a/data/ehrapy/preprocessing/_balanced_sampling.py b/data/ehrapy/preprocessing/_balanced_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..5c10b9b525f02640751f864e361c5f6d9aaeafda --- /dev/null +++ b/data/ehrapy/preprocessing/_balanced_sampling.py @@ -0,0 +1,82 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Literal + +if TYPE_CHECKING: + from anndata import AnnData + +from anndata import AnnData +from imblearn.over_sampling import RandomOverSampler +from imblearn.under_sampling import RandomUnderSampler + + +def balanced_sample( + adata: AnnData, + *, + key: str, + random_state: int = 0, + method: Literal["RandomUnderSampler", "RandomOverSampler"] = "RandomUnderSampler", + sampler_kwargs: dict = None, + copy: bool = False, +) -> AnnData: + """Balancing groups in the dataset. + + Balancing groups in the dataset based on group members in `.obs[key]` using the `imbalanced-learn `_ package. + Currently, supports `RandomUnderSampler` and `RandomOverSampler`. + + Note that `RandomOverSampler `_ + only replicates observations of the minority groups, which distorts several downstream analyses, very prominently neighborhood calculations and downstream analyses depending on that. + The `RandomUnderSampler `_ + by default undersamples the majority group without replacement, not causing this issues of replicated observations. + + Args: + adata: The annotated data matrix of shape `n_obs` × `n_vars`. + key: The key in `adata.obs` that contains the group information. + random_state: Random seed. + method: The method to use for balancing. + sampler_kwargs: Keyword arguments for the sampler, see the `imbalanced-learn` documentation for options. + copy: If True, return a copy of the balanced data. + Returns: + A new `AnnData` object, with the balanced groups. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.data.diabetes_130_fairlearn(columns_obs_only=["age"]) + >>> adata.obs.age.value_counts() + age + 'Over 60 years' 68541 + '30-60 years' 30716 + '30 years or younger' 2509 + >>> adata_balanced = ep.pp.sample(adata, key="age") + >>> adata_balanced.obs.age.value_counts() + age + '30 years or younger' 2509 + '30-60 years' 2509 + 'Over 60 years' 2509 + """ + if not isinstance(adata, AnnData): + raise ValueError(f"Input data is not an AnnData object: type of {adata}, is {type(adata)}") + + if sampler_kwargs is None: + sampler_kwargs = {"random_state": random_state} + else: + sampler_kwargs["random_state"] = random_state + + if method == "RandomUnderSampler": + sampler = RandomUnderSampler(**sampler_kwargs) + elif method == "RandomOverSampler": + sampler = RandomOverSampler(**sampler_kwargs) + else: + raise ValueError(f"Unknown sampling method: {method}") + + if key in adata.obs.keys(): + use_label = adata.obs[key] + else: + raise ValueError(f"key not in adata.obs: {key}") + + sampler.fit_resample(adata.X, use_label) + + if copy: + return adata[sampler.sample_indices_].copy() + else: + adata._inplace_subset_obs(sampler.sample_indices_) diff --git a/data/ehrapy/preprocessing/_bias.py b/data/ehrapy/preprocessing/_bias.py new file mode 100644 index 0000000000000000000000000000000000000000..1a1f8b1c4bf6fed9d85ad841203ab0b2eb6497de --- /dev/null +++ b/data/ehrapy/preprocessing/_bias.py @@ -0,0 +1,312 @@ +from __future__ import annotations + +import itertools +from typing import TYPE_CHECKING, Literal + +import numpy as np +import pandas as pd + +from ehrapy.anndata import anndata_to_df, check_feature_types +from ehrapy.anndata._constants import CATEGORICAL_TAG, DATE_TAG, FEATURE_TYPE_KEY, NUMERIC_TAG + +if TYPE_CHECKING: + from collections.abc import Iterable + + from anndata import AnnData + + +@check_feature_types +def detect_bias( + adata: AnnData, + sensitive_features: Iterable[str] | Literal["all"], + *, + run_feature_importances: bool | None = None, + corr_threshold: float = 0.5, + smd_threshold: float = 0.5, + categorical_factor_threshold: float = 2, + feature_importance_threshold: float = 0.1, + prediction_confidence_threshold: float = 0.5, + corr_method: Literal["pearson", "spearman"] = "spearman", + layer: str | None = None, + copy: bool = False, +) -> dict[str, pd.DataFrame] | tuple[dict[str, pd.DataFrame], AnnData]: + """Detects biases in the data using feature correlations, standardized mean differences, and feature importances. + + Detects biases with respect to sensitive features, which can be either a specified subset of features or all features in adata.var. + The method detects biases by computing: + + - pairwise correlations between features + - standardized mean differences for numeric features between groups of sensitive features + - value counts of categorical features between groups of sensitive features + - feature importances for predicting one feature with another + + Results of the computations are stored in var, varp, and uns of the adata object. + Values that exceed the specified thresholds are considered of interest and returned in the results dictionary. + Be aware that the results depend on the encoding of the data. E.g. when using one-hot encoding, each group of a categorical feature will + be treated as a separate feature, which can lead to an increased number of detected biases. Please take this into consideration when + interpreting the results. + + Args: + adata: An annotated data matrix containing EHR data. Encoded features are required for bias detection. + sensitive_features: Sensitive features to consider for bias detection. If set to "all", all features in adata.var will be considered. + run_feature_importances: Whether to run feature importances for detecting bias. If set to None, the function will run feature importances if + sensitive_features is not set to "all", as this can be computationally expensive. + corr_threshold: The threshold for the correlation coefficient between two features to be considered of interest. + smd_threshold: The threshold for the standardized mean difference between two features to be considered of interest. + categorical_factor_threshold: The threshold for the factor between the value counts (as percentages) of a feature compared between two + groups of a sensitive feature. + feature_importance_threshold: The threshold for the feature importance of a sensitive feature for predicting another feature to be considered + of interest. + prediction_confidence_threshold: The threshold for the prediction confidence (R2 or accuracy) of a sensitive feature for predicting another + feature to be considered of interest. + corr_method: The correlation method to use. + layer: The layer in adata.layers to use for computation. If None, adata.X will be used. + copy: If set to False, adata is updated in place. If set to True, the adata is copied and the results are stored in the copied adata, which + is then returned. + + Returns: + A dictionary containing the results of the bias detection. The keys are + + - "feature_correlations": Pairwise correlations between features that exceed the correlation threshold. + - "standardized_mean_differences": Standardized mean differences between groups of sensitive features that exceed the SMD threshold. + - "categorical_value_counts": Value counts of categorical features between groups of sensitive features that exceed the categorical factor + threshold. + - "feature_importances": Feature importances for predicting one feature with another that exceed the feature importance and prediction + confidence thresholds. + + If copy is set to True, the function returns a tuple with the results dictionary and the updated adata. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.ad.infer_feature_types(adata) + >>> adata = ep.pp.encode(adata, autodetect=True, encodings="label") + >>> results_dict = ep.pp.detect_bias(adata, "all") + + >>> # Example with specified sensitive features + >>> import ehrapy as ep + >>> adata = ep.dt.diabetes_130_fairlearn() + >>> ep.ad.infer_feature_types(adata) + >>> adata = ep.pp.encode(adata, autodetect=True, encodings="label") + >>> results_dict = ep.pp.detect_bias(adata, sensitive_features=["race", "gender"]) + """ + from ehrapy.tools import rank_features_supervised + + bias_results = {} + + if run_feature_importances is None: + run_feature_importances = sensitive_features != "all" + + if sensitive_features == "all": + sens_features_list = adata.var_names.values.tolist() + cat_sens_features = adata.var_names.values[adata.var[FEATURE_TYPE_KEY] == CATEGORICAL_TAG] + else: + sens_features_list = [] + for variable in sensitive_features: + if variable not in adata.var_names: + # check if feature has been encodeds + encoded_categorical_features = [ + feature for feature in adata.var_names if feature.startswith(f"ehrapycat_{variable}") + ] + + if len(encoded_categorical_features) == 0: + raise ValueError(f"Feature {variable} not found in adata.var.") + + sens_features_list.extend(encoded_categorical_features) + else: + sens_features_list.append(variable) + + cat_sens_features = [ + variable for variable in sens_features_list if adata.var[FEATURE_TYPE_KEY][variable] == CATEGORICAL_TAG + ] + + if copy: + adata = adata.copy() + + adata_df = anndata_to_df(adata, layer=layer) + + for feature in adata.var_names: + if not np.all(adata_df[feature].dropna().apply(type).isin([int, float, complex])): + raise ValueError( + f"Feature {feature} is not encoded numerically. Please encode the data (ep.pp.encode) before running bias detection." + ) + + def _get_group_name(encoded_feature: str, group_val: int) -> str | int: + try: + feature_name = encoded_feature.split("_")[1] + # Get the original group name stored in adata.obs by filtering the data for the encoded group value + return adata.obs[feature_name][list(adata[:, encoded_feature].X.squeeze() == group_val)].unique()[0] + except KeyError: + return group_val + + # -------------------- + # Feature correlations + # -------------------- + correlations = adata_df.corr(method=corr_method) + adata.varp["feature_correlations"] = correlations + + corr_results: dict[str, list] = {"Feature 1": [], "Feature 2": [], f"{corr_method.capitalize()} CC": []} + if sensitive_features == "all": + feature_tuples = list(itertools.combinations(sens_features_list, 2)) + else: + feature_tuples = list(itertools.product(sens_features_list, adata.var_names)) + for sens_feature, comp_feature in feature_tuples: + if sens_feature == comp_feature: + continue + if abs(correlations.loc[sens_feature, comp_feature]) > corr_threshold: + corr_results["Feature 1"].append(sens_feature) + corr_results["Feature 2"].append(comp_feature) + corr_results[f"{corr_method.capitalize()} CC"].append(correlations.loc[sens_feature, comp_feature]) + bias_results["feature_correlations"] = pd.DataFrame(corr_results).sort_values( + by=f"{corr_method.capitalize()} CC", key=abs, ascending=False + ) + + # ----------------------------- + # Standardized mean differences + # ----------------------------- + smd_results: dict[str, list] = { + "Sensitive Feature": [], + "Sensitive Group": [], + "Compared Feature": [], + "Standardized Mean Difference": [], + } + adata.uns["smd"] = {} + continuous_var_names = adata.var_names[adata.var[FEATURE_TYPE_KEY] == NUMERIC_TAG] + for sens_feature in cat_sens_features: + sens_feature_groups = sorted(adata_df[sens_feature].unique()) + if len(sens_feature_groups) == 1: + continue + smd_df = pd.DataFrame(index=continuous_var_names, columns=sens_feature_groups) + + for _group_nr, group in enumerate(sens_feature_groups): + # Compute SMD for all continuous features between the sensitive group and all other observations + group_mean = adata_df[continuous_var_names][adata_df[sens_feature] == group].mean() + group_std = adata_df[continuous_var_names][adata_df[sens_feature] == group].std() + + comparison_mean = adata_df[continuous_var_names][adata_df[sens_feature] != group].mean() + comparison_std = adata_df[continuous_var_names][adata_df[sens_feature] != group].std() + + smd = (group_mean - comparison_mean) / np.sqrt((group_std**2 + comparison_std**2) / 2) + smd_df[group] = smd + + abs_smd = smd.abs() + for comp_feature in continuous_var_names: + if abs_smd[comp_feature] > smd_threshold: + smd_results["Sensitive Feature"].append(sens_feature) + group_name = ( + _get_group_name(sens_feature, group) if sens_feature.startswith("ehrapycat_") else group + ) + smd_results["Sensitive Group"].append(group_name) + smd_results["Compared Feature"].append(comp_feature) + smd_results["Standardized Mean Difference"].append(smd[comp_feature]) + adata.uns["smd"][sens_feature] = smd_df + + bias_results["standardized_mean_differences"] = pd.DataFrame(smd_results).sort_values( + by="Standardized Mean Difference", key=abs, ascending=False + ) + + # ------------------------ + # Categorical value counts + # ------------------------ + cat_value_count_results: dict[str, list] = { + "Sensitive Feature": [], + "Sensitive Group": [], + "Compared Feature": [], + "Group 1": [], + "Group 2": [], + "Group 1 Percentage": [], + "Group 2 Percentage": [], + } + cat_var_names = adata.var_names[adata.var[FEATURE_TYPE_KEY] == CATEGORICAL_TAG] + for sens_feature in cat_sens_features: + for comp_feature in cat_var_names: + if sens_feature == comp_feature: + continue + value_counts = adata_df.groupby([sens_feature, comp_feature]).size().unstack(fill_value=0) + value_counts = value_counts.div(value_counts.sum(axis=1), axis=0) + + for sens_group in value_counts.index: + for comp_group1, comp_group2 in itertools.combinations(value_counts.columns, 2): + value_count_diff = ( + value_counts.loc[sens_group, comp_group1] / value_counts.loc[sens_group, comp_group2] + ) + if ( + value_count_diff > categorical_factor_threshold + or value_count_diff < 1 / categorical_factor_threshold + ): + cat_value_count_results["Sensitive Feature"].append(sens_feature) + + sens_group_name = ( + _get_group_name(sens_feature, sens_group) + if sens_feature.startswith("ehrapycat_") + else sens_group + ) + cat_value_count_results["Sensitive Group"].append(sens_group_name) + + cat_value_count_results["Compared Feature"].append(comp_feature) + comp_group1_name = ( + _get_group_name(comp_feature, comp_group1) + if comp_feature.startswith("ehrapycat_") + else comp_group1 + ) + cat_value_count_results["Group 1"].append(comp_group1_name) + comp_group2_name = ( + _get_group_name(comp_feature, comp_group2) + if comp_feature.startswith("ehrapycat_") + else comp_group2 + ) + cat_value_count_results["Group 2"].append(comp_group2_name) + + cat_value_count_results["Group 1 Percentage"].append(value_counts.loc[sens_group, comp_group1]) + cat_value_count_results["Group 2 Percentage"].append(value_counts.loc[sens_group, comp_group2]) + bias_results["categorical_value_counts"] = pd.DataFrame(cat_value_count_results) + + # ------------------- + # Feature importances + # ------------------- + if run_feature_importances: + feature_importances_results: dict[str, list] = { + "Sensitive Feature": [], + "Predicted Feature": [], + "Feature Importance": [], + "Prediction Score": [], + } + for prediction_feature in adata.var_names: + try: + prediction_score = rank_features_supervised( + adata, + prediction_feature, + input_features="all", + model="rf", + key_added=f"{prediction_feature}_feature_importances", + percent_output=True, + verbose=False, + return_score=True, + ) + except ValueError as e: + if "Input y contains NaN" in str(e): + raise ValueError( + f"During feature importance computation, input feature y ({prediction_feature}) was found to contain NaNs." + ) from e + else: + raise e + + for sens_feature in sens_features_list: + if prediction_feature == sens_feature: + continue + feature_importance = adata.var[f"{prediction_feature}_feature_importances"][sens_feature] / 100 + if ( + feature_importance > feature_importance_threshold + and prediction_score > prediction_confidence_threshold + ): + feature_importances_results["Sensitive Feature"].append(sens_feature) + feature_importances_results["Predicted Feature"].append(prediction_feature) + feature_importances_results["Feature Importance"].append(feature_importance) + feature_importances_results["Prediction Score"].append(prediction_score) + bias_results["feature_importances"] = pd.DataFrame(feature_importances_results).sort_values( + by="Feature Importance", key=abs, ascending=False + ) + + if copy: + return bias_results, adata + return bias_results diff --git a/data/ehrapy/preprocessing/_encoding.py b/data/ehrapy/preprocessing/_encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..3c3426aae23e8d49988eddbbdab4178802a6886e --- /dev/null +++ b/data/ehrapy/preprocessing/_encoding.py @@ -0,0 +1,665 @@ +from __future__ import annotations + +from collections import OrderedDict +from itertools import chain +from typing import Any + +import numpy as np +import pandas as pd +from anndata import AnnData +from lamin_utils import logger +from rich.progress import BarColumn, Progress +from sklearn.preprocessing import LabelEncoder, OneHotEncoder + +from ehrapy.anndata import anndata_to_df, check_feature_types +from ehrapy.anndata._constants import ( + CATEGORICAL_TAG, + FEATURE_TYPE_KEY, + NUMERIC_TAG, +) +from ehrapy.anndata.anndata_ext import _get_var_indices_for_type + +available_encodings = {"one-hot", "label"} + + +@check_feature_types +def encode( + adata: AnnData, + autodetect: bool | dict = False, + encodings: dict[str, dict[str, list[str]]] | dict[str, list[str]] | str | None = "one-hot", +) -> AnnData: + """Encode categoricals of an :class:`~anndata.AnnData` object. + + Categorical values could be either passed via parameters or are autodetected on the fly. + The categorical values are also stored in obs and uns (for keeping the original, unencoded values). + The current encoding modes for each variable are also stored in adata.var['encoding_mode']. + Variable names in var are updated according to the encoding modes used. A variable name starting with `ehrapycat_` + indicates an encoded column (or part of it). + + Autodetect mode: + By using this mode, every column that contains non-numerical values is encoded. + In addition, every binary column will be encoded too. + These are those columns which contain only 1's and 0's (could be either integers or floats). + + Available encodings are: + 1. one-hot (https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html) + 2. label (https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) + + Args: + adata: A :class:`~anndata.AnnData` object. + autodetect: Whether to autodetect categorical values that will be encoded. + encodings: Only needed if autodetect set to False. + A dict containing the encoding mode and categorical name for the respective column + or the specified encoding that will be applied to all columns. + + Returns: + An :class:`~anndata.AnnData` object with the encoded values in X. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2() + >>> adata_encoded = ep.pp.encode(adata, autodetect=True, encodings="one-hot") + + >>> # Example using custom encodings per columns: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2() + >>> # encode col1 and col2 using label encoding and encode col3 using one hot encoding + >>> adata_encoded = ep.pp.encode( + ... adata, autodetect=False, encodings={"label": ["col1", "col2"], "one-hot": ["col3"]} + ... ) + """ + if not isinstance(adata, AnnData): + raise ValueError(f"Cannot encode object of type {type(adata)}. Can only encode AnnData objects!") + + if isinstance(encodings, str) and not autodetect: + raise ValueError("Passing a string for parameter encodings is only possible when using autodetect=True!") + elif autodetect and not isinstance(encodings, str | type(None)): + raise ValueError( + f"Setting encode mode with autodetect=True only works by passing a string (encode mode name) or None not {type(encodings)}!" + ) + + if "original" not in adata.layers.keys(): + adata.layers["original"] = adata.X.copy() + + # autodetect categorical values based on feature types stored in adata.var[FEATURE_TYPE_KEY] + if autodetect: + categoricals_names = _get_var_indices_for_type(adata, CATEGORICAL_TAG) + + if "encoding_mode" in adata.var.keys(): + if adata.var["encoding_mode"].isnull().values.any(): + not_encoded_features = adata.var["encoding_mode"].isna().index + categoricals_names = [ + _categorical for _categorical in categoricals_names if _categorical in not_encoded_features + ] + else: + logger.warning( + "The current AnnData object has been already encoded. Returning original AnnData object!" + ) + return adata + + # filter out categorical columns, that are already stored numerically + df_adata = anndata_to_df(adata) + categoricals_names = [ + feat for feat in categoricals_names if not np.all(df_adata[feat].apply(type).isin([int, float])) + ] + + # no columns were detected, that would require an encoding (e.g. non-numerical columns) + if not categoricals_names: + logger.warning("Detected no columns that need to be encoded. Leaving passed AnnData object unchanged.") + return adata + # update obs with the original categorical values + updated_obs = _update_obs(adata, categoricals_names) + + encoded_x = None + encoded_var_names = adata.var_names.to_list() + unencoded_var_names = adata.var_names.to_list() + if encodings not in available_encodings: + raise ValueError( + f"Unknown encoding mode {encodings}. Please provide one of the following encoding modes:\n" + f"{available_encodings}" + ) + single_encode_mode_switcher = { + "one-hot": _one_hot_encoding, + "label": _label_encoding, + } + with Progress( + "[progress.description]{task.description}", + BarColumn(), + "[progress.percentage]{task.percentage:>3.0f}%", + refresh_per_second=1500, + ) as progress: + task = progress.add_task(f"[red]Running {encodings} on detected columns ...", total=1) + # encode using the desired mode + encoded_x, encoded_var_names, unencoded_var_names = single_encode_mode_switcher[encodings]( # type: ignore + adata, + encoded_x, + updated_obs, + encoded_var_names, + unencoded_var_names, + categoricals_names, + progress, + task, + ) + progress.update(task, description="Updating layer originals ...") + + # update layer content with the latest categorical encoding and the old other values + updated_layer = _update_layer_after_encoding( + adata.layers["original"], + encoded_x, + encoded_var_names, + adata.var_names.to_list(), + categoricals_names, + ) + progress.update(task, description=f"[bold blue]Finished {encodings} of autodetected columns.") + + # copy non-encoded columns, and add new tag for encoded columns. This is needed to track encodings + new_var = pd.DataFrame(index=encoded_var_names) + new_var[FEATURE_TYPE_KEY] = adata.var[FEATURE_TYPE_KEY].copy() + new_var.loc[new_var.index.str.contains("ehrapycat"), FEATURE_TYPE_KEY] = CATEGORICAL_TAG + + new_var["unencoded_var_names"] = unencoded_var_names + + new_var["encoding_mode"] = [encodings if var in categoricals_names else None for var in unencoded_var_names] + + encoded_ann_data = AnnData( + encoded_x, + obs=updated_obs, + var=new_var, + uns=adata.uns.copy(), + layers={"original": updated_layer}, + ) + + # user passed categorical values with encoding mode for each of them + else: + # re-encode data + if "encoding_mode" in adata.var.keys(): + encodings = _reorder_encodings(adata, encodings) # type: ignore + adata = _undo_encoding(adata) + + # are all specified encodings valid? + for encoding in encodings.keys(): # type: ignore + if encoding not in available_encodings: + raise ValueError( + f"Unknown encoding mode {encoding}. Please provide one of the following encoding modes:\n" + f"{available_encodings}" + ) + + categoricals = list(chain(*encodings.values())) # type: ignore + + # ensure no categorical column gets encoded twice + if len(categoricals) != len(set(categoricals)): + raise ValueError( + "The categorical column names given contain at least one duplicate column. " + "Check the column names to ensure that no column is encoded twice!" + ) + elif any( + _categorical in adata.var_names[adata.var[FEATURE_TYPE_KEY] == NUMERIC_TAG] for _categorical in categoricals + ): + logger.warning( + "At least one of passed column names seems to have numerical dtype. In general it is not recommended " + "to encode numerical columns!" + ) + + updated_obs = _update_obs(adata, categoricals) + + encoding_mode = {} + encoded_x = None + encoded_var_names = adata.var_names.to_list() + unencoded_var_names = adata.var_names.to_list() + with Progress( + "[progress.description]{task.description}", + BarColumn(), + "[progress.percentage]{task.percentage:>3.0f}%", + refresh_per_second=1500, + ) as progress: + for encoding in encodings.keys(): # type: ignore + task = progress.add_task(f"[red]Setting up {encodings}", total=1) + encode_mode_switcher = { + "one-hot": _one_hot_encoding, + "label": _label_encoding, + } + progress.update(task, description=f"Running {encoding} ...") + # perform the actual encoding + encoded_x, encoded_var_names, unencoded_var_names = encode_mode_switcher[encoding]( + adata, + encoded_x, + updated_obs, + encoded_var_names, + unencoded_var_names, + encodings[encoding], # type: ignore + progress, + task, # type: ignore + ) + + for _categorical in encodings[encoding]: # type: ignore + _categorical = [_categorical] if isinstance(_categorical, str) else _categorical # type: ignore + for column_name in _categorical: + # get idx of column in unencoded_var_names + indices = [i for i, var in enumerate(unencoded_var_names) if var == column_name] + encoded_var = [encoded_var_names[idx] for idx in indices] + for var in encoded_var: + encoding_mode[var] = encoding + + # update original layer content with the new categorical encoding and the old other values + updated_layer = _update_layer_after_encoding( + adata.layers["original"], + encoded_x, + encoded_var_names, + adata.var_names.to_list(), + categoricals, + ) + + # copy non-encoded columns, and add new tag for encoded columns. This is needed to track encodings + new_var = pd.DataFrame(index=encoded_var_names) + + new_var[FEATURE_TYPE_KEY] = adata.var[FEATURE_TYPE_KEY].copy() + new_var.loc[new_var.index.str.contains("ehrapycat"), FEATURE_TYPE_KEY] = CATEGORICAL_TAG + + new_var["unencoded_var_names"] = unencoded_var_names + + # update encoding mode in var, keeping already annotated columns + if "encoding_mode" in adata.var.keys(): + encoding_mode.update({key: value for key, value in adata.var["encoding_mode"].items() if value is not None}) + new_var["encoding_mode"] = [None] * len(new_var) + for _categorical in encoding_mode.keys(): + new_var.loc[_categorical, "encoding_mode"] = encoding_mode[_categorical] + + try: + encoded_ann_data = AnnData( + X=encoded_x, + obs=updated_obs, + var=new_var, + uns=adata.uns.copy(), + layers={"original": updated_layer}, + ) + + # if the user did not pass every non-numerical column for encoding, an Anndata object cannot be created + except ValueError: + raise AnnDataCreationError( + "Creation of AnnData object failed. Ensure that you passed all non numerical, " + "categorical values for encoding!" + ) from None + + encoded_ann_data.X = encoded_ann_data.X.astype(np.float32) + + return encoded_ann_data + + +def _one_hot_encoding( + adata: AnnData, + X: np.ndarray | None, + updated_obs: pd.DataFrame, + var_names: list[str], + unencoded_var_names: list[str], + categories: list[str], + progress: Progress, + task, +) -> tuple[np.ndarray, list[str], list[str]]: + """Encode categorical columns using one hot encoding. + + Args: + adata: The current AnnData object + X: Current (encoded) X + updated_obs: A copy of the original obs where the original categorical values are stored that will be encoded + var_names: Var names of current AnnData object + categories: The name of the categorical columns to be encoded + + Returns: + Encoded new X and the corresponding new var names + """ + original_values = _initial_encoding(updated_obs, categories) + progress.update(task, description="[bold blue]Running one-hot encoding on passed columns ...") + + encoder = OneHotEncoder(handle_unknown="ignore", sparse_output=False).fit(original_values) + categorical_prefixes = [ + f"ehrapycat_{category}_{str(suffix).strip()}" + for idx, category in enumerate(categories) + for suffix in encoder.categories_[idx] + ] + unencoded_prefixes = [category for idx, category in enumerate(categories) for suffix in encoder.categories_[idx]] + transformed = encoder.transform(original_values) + # X is None if this is the first encoding "round" -> take the former X + if X is None: + X = adata.X + progress.advance(task, 1) + progress.update(task, description="[blue]Updating X and var ...") + + temp_x, temp_var_names, unencoded_var_names = _update_encoded_data( + X, transformed, var_names, categorical_prefixes, categories, unencoded_prefixes, unencoded_var_names + ) + progress.update(task, description="[blue]Finished one-hot encoding.") + + return temp_x, temp_var_names, unencoded_var_names + + +def _label_encoding( + adata: AnnData, + X: np.ndarray | None, + updated_obs: pd.DataFrame, + var_names: list[str], + unencoded_var_names: list[str], + categoricals: list[str], + progress: Progress, + task, +) -> tuple[np.ndarray, list[str], list[str]]: + """Encode categorical columns using label encoding. + + Args: + adata: The current AnnData object. + X: Current (encoded) X. + updated_obs: A copy of the original obs where the original categorical values are stored that will be encoded. + var_names: Var names of current AnnData object. + categoricals: The name of the categorical columns, that need to be encoded. + + Returns: + Encoded new X and the corresponding new var names. + """ + original_values = _initial_encoding(updated_obs, categoricals) + # label encoding expects input array to be 1D, so iterate over all columns and encode them one by one + for idx in range(original_values.shape[1]): + progress.update(task, description=f"[blue]Running label encoding on column {categoricals[idx]} ...") + label_encoder = LabelEncoder() + row_vec = original_values[:, idx : idx + 1].ravel() # type: ignore + label_encoder.fit(row_vec) + transformed = label_encoder.transform(row_vec) + # need a column vector instead of row vector + original_values[:, idx : idx + 1] = transformed[..., None] + progress.advance(task, 1 / len(categoricals)) + category_prefixes = [f"ehrapycat_{_categorical}" for _categorical in categoricals] + # X is None if this is the first encoding "round" -> take the former X + if X is None: + X = adata.X + + progress.update(task, description="[blue]Updating X and var ...") + temp_x, temp_var_names, unencoded_var_names = _update_encoded_data( + X, original_values, var_names, category_prefixes, categoricals, categoricals, unencoded_var_names + ) + progress.update(task, description="[blue]Finished label encoding.") + + return temp_x, temp_var_names, unencoded_var_names + + +def _update_layer_after_encoding( + old_layer: np.ndarray, + new_x: np.ndarray, + new_var_names: list[str], + old_var_names: list[str], + categories: list[str], +) -> np.ndarray: + """Update the original layer containing the initial non categorical values and the latest encoded categoricals. + + Args: + old_layer: The previous "original" layer + new_x: The new encoded X + new_var_names: The new encoded var names + old_var_names: The previous var names + categories: All previous categorical names + + Returns + A Numpy array containing all numericals together with all encoded categoricals. + """ + try: + # get the index of the first column of the new encoded X, that does not store an encoded categorical + new_cat_stop_index = next(i for i in range(len(new_var_names)) if not new_var_names[i].startswith("ehrapycat")) + # get the index of the first column of the old encoded X, that does not store an encoded categorical + old_cat_stop_index = next(i for i in range(len(old_var_names)) if not old_var_names[i].startswith("ehrapycat")) + # when there are only encoded columns, simply return a copy of the new X, since to originals will be kept in the layer + except StopIteration: + return new_x.copy().astype("float32") + # keep track of all indices with original value columns, that are (and were) not encoded + idx_list = [] + for idx, col_name in enumerate(old_var_names[old_cat_stop_index:]): + # this case is needed when there are one or more numerical (but categorical) columns that was not encoded yet + if col_name not in categories: + idx_list.append(idx + old_cat_stop_index) + # slice old original layer using the selector + old_layer_view = old_layer[:, idx_list] + # get all encoded categoricals of X + encoded_categoricals = new_x[:, :new_cat_stop_index] + # horizontally stack all encoded categoricals and the remaining "old original values" + updated_layer = np.hstack((encoded_categoricals, old_layer_view)) + + try: + logger.info("Updated the original layer after encoding.") + return updated_layer.astype("float32") + except ValueError as e: + raise ValueError("Ensure that all columns which require encoding are being encoded.") from e + + +def _update_encoded_data( + X: np.ndarray, + transformed: np.ndarray, + var_names: list[str], + categorical_prefixes: list[str], + categoricals: list[str], + unencoded_prefixes: list[str], + unencoded_var_names: list[str], +) -> tuple[np.ndarray, list[str], list[str]]: + """Update X and var_names after each encoding. + + Args: + X: Current (former) X + transformed: The encoded (transformed) categorical column + var_names: Var names of current AnnData object + categorical_prefixes: The name(s) of the encoded column(s) + categoricals: The categorical values that were encoded recently + unencoded_prefixes: The unencoded names of the categorical columns that were encoded + + Returns: + Encoded new X, the corresponding new var names, and the unencoded var names + """ + idx = _get_categoricals_old_indices(var_names, categoricals) + # delete the original categorical column + del_cat_column_x = np.delete(X, list(idx), 1) + # create the new, encoded X + temp_x = np.hstack((transformed, del_cat_column_x)) + # delete old categorical name + var_names = [col_name for col_idx, col_name in enumerate(var_names) if col_idx not in idx] + temp_var_names = categorical_prefixes + var_names + + unencoded_var_names = [col_name for col_idx, col_name in enumerate(unencoded_var_names) if col_idx not in idx] + unencoded_var_names = unencoded_prefixes + unencoded_var_names + + return temp_x, temp_var_names, unencoded_var_names + + +def _initial_encoding( + obs: pd.DataFrame, + categoricals: list[str], +) -> np.ndarray: + """Get all original values for all categoricals that need to be encoded (again). + + Args: + obs: A copy of the original obs where the original categorical values are stored that will be encoded + categoricals: All categoricals that need to be encoded + + Returns: + Numpy array of all original categorial values + """ + # create numpy array from all original categorical values, that will be encoded (again) + array = np.array([obs[categoricals[i]].to_numpy() for i in range(len(categoricals))]).transpose() + + return array + + +def _undo_encoding( + adata: AnnData, + verbose: bool = True, +) -> AnnData | None: + """Undo the current encodings applied to all columns in X. This currently resets the AnnData object to its initial state. + + Args: + adata: The AnnData object + verbose: Set to False to suppress warnings. + + Returns: + A (partially) encoding reset AnnData object + """ + # get all encoded features + categoricals = _get_encoded_features(adata) + + # get all columns that should be stored in obs only + columns_obs_only = [column_name for column_name in list(adata.obs.columns) if column_name not in categoricals] + + transformed = _initial_encoding(adata.obs, categoricals) + temp_x, temp_var_names = _delete_all_encodings(adata) + new_x = np.hstack((transformed, temp_x)) if temp_x is not None else transformed + new_var_names = categoricals + temp_var_names if temp_var_names is not None else categoricals + + # only keep columns in obs that were stored in obs only -> delete every encoded column from obs + new_obs = adata.obs[columns_obs_only] + + var = pd.DataFrame(index=new_var_names) + var[FEATURE_TYPE_KEY] = [ + adata.var.loc[adata.var["unencoded_var_names"] == unenc_var_name, FEATURE_TYPE_KEY].unique()[0] + for unenc_var_name in new_var_names + ] + + return AnnData( + new_x, + obs=new_obs, + var=var, + uns=OrderedDict(), + layers={"original": new_x.copy()}, + ) + + +def _delete_all_encodings(adata: AnnData) -> tuple[np.ndarray | None, list | None]: + """Delete all encoded columns and keep track of their indices. + + Args: + adata: The AnnData object to operate on + + Returns: + A temporary X were all encoded columns are deleted and all var_names of unencoded columns. + """ + var_names = list(adata.var_names) + if adata.X is not None and var_names is not None: + idx = 0 + for var in var_names: + if not var.startswith("ehrapycat"): + break + idx += 1 + # case: only encoded columns were found + if idx == len(var_names): + return None, None + # don't need to consider case when no encoded columns are there, since undo_encoding would not run anyways in this case + return adata.X[:, idx:].copy(), var_names[idx:] + return None, None + + +def _reorder_encodings(adata: AnnData, new_encodings: dict[str, list[list[str]] | list[str]]): + """Reorder the encodings and update which column will be encoded using which mode. + + Args: + adata: The AnnData object to be reencoded + new_encodings: The new encodings passed by the user (might affect encoded as well as previously non encoded columns) + + Returns: + An updated encoding scheme + """ + latest_encoded_columns = sum(new_encodings.values(), []) + + # check for duplicates and raise an error if any + if len(set(latest_encoded_columns)) != len(latest_encoded_columns): + logger.error( + "Reencoding AnnData object failed. You have at least one duplicate in your encodings. A column " + "cannot be encoded at the same time using different encoding modes!" + ) + raise DuplicateColumnEncodingError + + encodings = {} + for encode_mode in available_encodings: + encoded_categoricals_with_mode = ( + adata.var.loc[adata.var["encoding_mode"] == encode_mode, "unencoded_var_names"].unique().tolist() + ) + + encodings[encode_mode] = new_encodings[encode_mode] if encode_mode in new_encodings.keys() else [] + # add all columns that were encoded with the current mode before and are not reencoded + encodings[encode_mode] += [ + _categorical + for _categorical in encoded_categoricals_with_mode + if _categorical not in latest_encoded_columns + ] + + if len(encodings[encode_mode]) == 0: + del encodings[encode_mode] + + return encodings + + +def _get_categoricals_old_indices(old_var_names: list[str], encoded_categories: list[str]) -> set[int]: + """Get indices of every (possibly encoded) categorical column belonging to a newly encoded categorical value. + + Args: + old_var_names: Former variables names + encoded_categories: Already encoded categories + + Returns: + Set of all indices of formerly encoded categories belonging to a newly encoded categorical value. + """ + idx_list = set() + category_set = set(encoded_categories) + for idx, old_var_name in enumerate(old_var_names): + # if the old variable was previously unencoded (only the case for numerical categoricals) + if old_var_name in category_set: + idx_list.add(idx) + # if the old variable was already encoded + elif old_var_name.startswith("ehrapycat_"): + if any(old_var_name[10:].startswith(category) for category in category_set): + idx_list.add(idx) + + return idx_list + + +def _update_obs(adata: AnnData, categorical_names: list[str]) -> pd.DataFrame: + """Add the original categorical values to obs. + + Args: + adata: The original AnnData object + categorical_names: Name of each categorical column + + Returns: + Updated obs with the original categorical values added + """ + updated_obs = adata.obs.copy() + for idx, var_name in enumerate(adata.var_names): + if var_name in updated_obs.columns: + continue + elif var_name in categorical_names: + updated_obs[var_name] = adata.X[::, idx : idx + 1].flatten() + # note: this will count binary columns (0 and 1 only) as well + # needed for writing to .h5ad files + if set(pd.unique(updated_obs[var_name])).issubset({False, True, np.nan}): + updated_obs[var_name] = updated_obs[var_name].astype("bool") + # get all non bool object columns and cast them to category dtype + object_columns = list(updated_obs.select_dtypes(include="object").columns) + updated_obs[object_columns] = updated_obs[object_columns].astype("category") + logger.info(f"The original categorical values `{categorical_names}` were added to obs.") + + return updated_obs + + +def _get_encoded_features(adata: AnnData) -> list[str]: + """Get all encoded features in an AnnData object. + + Args: + adata: The AnnData object + + Returns: + List of all unencoded names of features that were encoded + """ + encoded_features = [ + unencoded_feature + for enc_mode, unencoded_feature in adata.var[["encoding_mode", "unencoded_var_names"]].values + if enc_mode is not None and not pd.isna(enc_mode) + ] + return list(set(encoded_features)) + + +class AnnDataCreationError(ValueError): + pass + + +class DuplicateColumnEncodingError(ValueError): + pass diff --git a/data/ehrapy/preprocessing/_highly_variable_features.py b/data/ehrapy/preprocessing/_highly_variable_features.py new file mode 100644 index 0000000000000000000000000000000000000000..6a7b41e91ef9f0a1fe625321873ec423a3db9b29 --- /dev/null +++ b/data/ehrapy/preprocessing/_highly_variable_features.py @@ -0,0 +1,68 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import scanpy as sc + +if TYPE_CHECKING: + import pandas as pd + from anndata import AnnData + + +def highly_variable_features( + adata: AnnData, + layer: str | None = None, + top_features_percentage: float = 0.2, + span: float | None = 0.3, + n_bins: int = 20, + subset: bool = False, + inplace: bool = True, + check_values: bool = True, +) -> pd.DataFrame | None: + """Annotate highly variable features. + + Expects count data. A normalized variance for each feature is computed. First, the data + are standardized (i.e., z-score normalization per feature) with a regularized + standard deviation. Next, the normalized variance is computed as the variance + of each feature after the transformation. Features are ranked by the normalized variance. + + Args: + adata: The annotated data matrix of shape `n_obs` × `n_vars`. + layer: If provided, use `adata.layers[layer]` for expression values instead of `adata.X`. + top_features_percentage: Percentage of highly-variable features to keep. + span: The fraction of the data used when estimating the variance in the loess model fit. + n_bins: Number of bins for binning. Normalization is done with respect to each bin. + If just a single observation falls into a bin, the normalized dispersion is artificially set to 1. + You'll be informed about this if you set `settings.verbosity = 4`. + subset: Inplace subset to highly-variable features if `True` otherwise merely indicate highly variable features. + inplace: Whether to place calculated metrics in `.var` or return them. + check_values: Check if counts in selected layer are integers. A Warning is returned if set to True. + + Returns: + Depending on `inplace` returns calculated metrics (:class:`~pandas.DataFrame`) or + updates `.var` with the following fields + + **highly_variable** + boolean indicator of highly-variable features + **means** + means per feature + **variances** + variance per feature + **variances_norm** + normalized variance per feature, averaged in the case of multiple batches + **highly_variable_rank** + rank of the feature according to normalized variance, median rank in the case of multiple batches + """ + n_top_features = int(top_features_percentage * len(adata.var)) + + return sc.pp.highly_variable_genes( + adata=adata, + layer=layer, + n_top_genes=n_top_features, + span=span, + n_bins=n_bins, + flavor="seurat_v3", + subset=subset, + inplace=inplace, + check_values=check_values, + ) diff --git a/data/ehrapy/preprocessing/_imputation.py b/data/ehrapy/preprocessing/_imputation.py new file mode 100644 index 0000000000000000000000000000000000000000..03796c2b3c414fd74c2a9b321292152e2c3ba5a9 --- /dev/null +++ b/data/ehrapy/preprocessing/_imputation.py @@ -0,0 +1,560 @@ +from __future__ import annotations + +import warnings +from collections.abc import Iterable +from typing import TYPE_CHECKING, Literal + +import numpy as np +import pandas as pd +from lamin_utils import logger +from sklearn.experimental import enable_iterative_imputer # noinspection PyUnresolvedReference +from sklearn.impute import SimpleImputer + +from ehrapy import settings +from ehrapy._utils_available import _check_module_importable +from ehrapy._utils_rendering import spinner +from ehrapy.anndata import check_feature_types +from ehrapy.anndata.anndata_ext import get_column_indices + +if TYPE_CHECKING: + from anndata import AnnData + + +@spinner("Performing explicit impute") +def explicit_impute( + adata: AnnData, + replacement: (str | int) | (dict[str, str | int]), + *, + impute_empty_strings: bool = True, + warning_threshold: int = 70, + copy: bool = False, +) -> AnnData | None: + """Replaces all missing values in all columns or a subset of columns specified by the user with the passed replacement value. + + There are two scenarios to cover: + 1. Replace all missing values with the specified value. + 2. Replace all missing values in a subset of columns with a specified value per column. + + Args: + adata: :class:`~anndata.AnnData` object containing X to impute values in. + replacement: The value to replace missing values with. If a dictionary is provided, the keys represent column + names and the values represent replacement values for those columns. + impute_empty_strings: If True, empty strings are also replaced. + warning_threshold: Threshold of percentage of missing values to display a warning for. + copy: If True, returns a modified copy of the original AnnData object. If False, modifies the object in place. + + Returns: + If copy is True, a modified copy of the original AnnData object with imputed X. + If copy is False, the original AnnData object is modified in place, and None is returned. + + Examples: + Replace all missing values in adata with the value 0: + + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.explicit_impute(adata, replacement=0) + """ + if copy: + adata = adata.copy() + + if isinstance(replacement, int) or isinstance(replacement, str): + _warn_imputation_threshold(adata, var_names=list(adata.var_names), threshold=warning_threshold) + else: + _warn_imputation_threshold(adata, var_names=replacement.keys(), threshold=warning_threshold) # type: ignore + + # 1: Replace all missing values with the specified value + if isinstance(replacement, int | str): + _replace_explicit(adata.X, replacement, impute_empty_strings) + + # 2: Replace all missing values in a subset of columns with a specified value per column or a default value, when the column is not explicitly named + elif isinstance(replacement, dict): + for idx, column_name in enumerate(adata.var_names): + imputation_value = _extract_impute_value(replacement, column_name) + # only replace if an explicit value got passed or could be extracted from replacement + if imputation_value: + _replace_explicit(adata.X[:, idx : idx + 1], imputation_value, impute_empty_strings) + else: + logger.warning(f"No replace value passed and found for var [not bold green]{column_name}.") + else: + raise ValueError( # pragma: no cover + f"Type {type(replacement)} is not a valid datatype for replacement parameter. Either use int, str or a dict!" + ) + + return adata if copy else None + + +def _replace_explicit(arr: np.ndarray, replacement: str | int, impute_empty_strings: bool) -> None: + """Replace one column or whole X with a value where missing values are stored.""" + if not impute_empty_strings: # pragma: no cover + impute_conditions = pd.isnull(arr) + else: + impute_conditions = np.logical_or(pd.isnull(arr), arr == "") + arr[impute_conditions] = replacement + + +def _extract_impute_value(replacement: dict[str, str | int], column_name: str) -> str | int | None: + """Extract the replacement value for a given column in the :class:`~anndata.AnnData` object + + Returns: + The value to replace missing values + """ + # try to get a value for the specific column + imputation_value = replacement.get(column_name) + if imputation_value: + return imputation_value + # search for a default value in case no value was specified for that column + imputation_value = replacement.get("default") + if imputation_value: # pragma: no cover + return imputation_value + else: + return None + + +@spinner("Performing simple impute") +def simple_impute( + adata: AnnData, + var_names: Iterable[str] | None = None, + *, + strategy: Literal["mean", "median", "most_frequent"] = "mean", + copy: bool = False, + warning_threshold: int = 70, +) -> AnnData | None: + """Impute missing values in numerical data using mean/median/most frequent imputation. + + If required and using mean or median strategy, the data needs to be properly encoded as this imputation requires + numerical data only. + + Args: + adata: The annotated data matrix to impute missing values on. + var_names: A list of column names to apply imputation on (if None, impute all columns). + strategy: Imputation strategy to use. One of {'mean', 'median', 'most_frequent'}. + warning_threshold: Display a warning message if percentage of missing values exceeds this threshold. + copy:Whether to return a copy of `adata` or modify it inplace. + + Returns: + If copy is True, a modified copy of the original AnnData object with imputed X. + If copy is False, the original AnnData object is modified in place, and None is returned. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.simple_impute(adata, strategy="median") + """ + if copy: + adata = adata.copy() + + _warn_imputation_threshold(adata, var_names, threshold=warning_threshold) + + if strategy in {"median", "mean"}: + try: + _simple_impute(adata, var_names, strategy) + except ValueError: + raise ValueError( + f"Can only impute numerical data using {strategy} strategy. Try to restrict imputation " + "to certain columns using var_names parameter or use a different mode." + ) from None + # most_frequent imputation works with non-numerical data as well + elif strategy == "most_frequent": + _simple_impute(adata, var_names, strategy) + else: + raise ValueError( + f"Unknown impute strategy {strategy} for simple Imputation. Choose any of mean, median or most_frequent." + ) from None + + return adata if copy else None + + +def _simple_impute(adata: AnnData, var_names: Iterable[str] | None, strategy: str) -> None: + imputer = SimpleImputer(strategy=strategy) + if isinstance(var_names, Iterable) and all(isinstance(item, str) for item in var_names): + column_indices = get_column_indices(adata, var_names) + adata.X[::, column_indices] = imputer.fit_transform(adata.X[::, column_indices]) + else: + adata.X = imputer.fit_transform(adata.X) + + +@spinner("Performing KNN impute") +@check_feature_types +def knn_impute( + adata: AnnData, + var_names: Iterable[str] | None = None, + *, + n_neighbors: int = 5, + copy: bool = False, + backend: Literal["scikit-learn", "faiss"] = "faiss", + warning_threshold: int = 70, + backend_kwargs: dict | None = None, + **kwargs, +) -> AnnData: + """Imputes missing values in the input AnnData object using K-nearest neighbor imputation. + + If required, the data needs to be properly encoded as this imputation requires numerical data only. + + .. warning:: + Currently, both `n_neighbours` and `n_neighbors` are accepted as parameters for the number of neighbors. + However, in future versions, only `n_neighbors` will be supported. Please update your code accordingly. + + + Args: + adata: An annotated data matrix containing EHR data. + var_names: A list of variable names indicating which columns to impute. + If `None`, all columns are imputed. Default is `None`. + n_neighbors: Number of neighbors to use when performing the imputation. + copy: Whether to perform the imputation on a copy of the original `AnnData` object. + If `True`, the original object remains unmodified. + backend: The implementation to use for the KNN imputation. + 'scikit-learn' is very slow but uses an exact KNN algorithm, whereas 'faiss' + is drastically faster but uses an approximation for the KNN graph. + In practice, 'faiss' is close enough to the 'scikit-learn' results. + warning_threshold: Percentage of missing values above which a warning is issued. + backend_kwargs: Passed to the backend. + Pass "mean", "median", or "weighted" for 'strategy' to set the imputation strategy for faiss. + See `sklearn.impute.KNNImputer `_ for more information on the 'scikit-learn' backend. + See `fknni.faiss.FaissImputer `_ for more information on the 'faiss' backend. + kwargs: Gathering keyword arguments of earlier ehrapy versions for backwards compatibility. It is encouraged to use the here listed, current arguments. + + Returns: + If copy is True, a modified copy of the original AnnData object with imputed X. + If copy is False, the original AnnData object is modified in place, and None is returned. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.ad.infer_feature_types(adata) + >>> ep.pp.knn_impute(adata) + """ + if copy: + adata = adata.copy() + + _warn_imputation_threshold(adata, var_names, threshold=warning_threshold) + + if backend not in {"scikit-learn", "faiss"}: + raise ValueError(f"Unknown backend '{backend}' for KNN imputation. Choose between 'scikit-learn' and 'faiss'.") + + if backend_kwargs is None: + backend_kwargs = {} + + valid_kwargs = {"n_neighbours"} + unexpected_kwargs = set(kwargs.keys()) - valid_kwargs + + if unexpected_kwargs: + raise ValueError(f"Unexpected keyword arguments: {unexpected_kwargs}.") + + if "n_neighbours" in kwargs.keys(): + n_neighbors = kwargs["n_neighbours"] + warnings.warn( + "ehrapy will use 'n_neighbors' instead of 'n_neighbours'. Please update your code.", + DeprecationWarning, + stacklevel=1, + ) + + if _check_module_importable("sklearnex"): # pragma: no cover + from sklearnex import patch_sklearn, unpatch_sklearn + + patch_sklearn() + + try: + if np.issubdtype(adata.X.dtype, np.number): + _knn_impute(adata, var_names, n_neighbors, backend=backend, **backend_kwargs) + else: + # Raise exception since non-numerical data can not be imputed using KNN Imputation + raise ValueError( + "Can only impute numerical data. Try to restrict imputation to certain columns using " + "var_names parameter or perform an encoding of your data." + ) + + except ValueError as e: + if "Data matrix has wrong shape" in str(e): + logger.error("Check that your matrix does not contain any NaN only columns!") + raise + + if _check_module_importable("sklearnex"): # pragma: no cover + unpatch_sklearn() + + return adata if copy else None + + +def _knn_impute( + adata: AnnData, + var_names: Iterable[str] | None, + n_neighbors: int, + backend: Literal["scikit-learn", "faiss"], + **kwargs, +) -> None: + if backend == "scikit-learn": + from sklearn.impute import KNNImputer + + imputer = KNNImputer(n_neighbors=n_neighbors, **kwargs) + else: + from fknni import FaissImputer + + imputer = FaissImputer(n_neighbors=n_neighbors, **kwargs) + + if isinstance(var_names, Iterable) and all(isinstance(item, str) for item in var_names): + column_indices = get_column_indices(adata, var_names) + adata.X[::, column_indices] = imputer.fit_transform(adata.X[::, column_indices]) + # this is required since X dtype has to be numerical in order to correctly round floats + adata.X = adata.X.astype("float64") + else: + adata.X = imputer.fit_transform(adata.X) + + +@spinner("Performing miss-forest impute") +def miss_forest_impute( + adata: AnnData, + var_names: Iterable[str] | None = None, + *, + num_initial_strategy: Literal["mean", "median", "most_frequent", "constant"] = "mean", + max_iter: int = 3, + n_estimators: int = 100, + random_state: int = 0, + warning_threshold: int = 70, + copy: bool = False, +) -> AnnData | None: + """Impute data using the MissForest strategy. + + This function uses the MissForest strategy to impute missing values in the data matrix of an AnnData object. + The strategy works by fitting a random forest model on each feature containing missing values, + and using the trained model to predict the missing values. + + See https://academic.oup.com/bioinformatics/article/28/1/112/219101. + + If required, the data needs to be properly encoded as this imputation requires numerical data only. + + Args: + adata: The AnnData object to use MissForest Imputation on. + var_names: Iterable of columns to impute + num_initial_strategy: The initial strategy to replace all missing numerical values with. + max_iter: The maximum number of iterations if the stop criterion has not been met yet. + n_estimators: The number of trees to fit for every missing variable. Has a big effect on the run time. + Decrease for faster computations. + random_state: The random seed for the initialization. + warning_threshold: Threshold of percentage of missing values to display a warning for. + copy: Whether to return a copy or act in place. + + Returns: + If copy is True, a modified copy of the original AnnData object with imputed X. + If copy is False, the original AnnData object is modified in place, and None is returned. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.miss_forest_impute(adata) + """ + if copy: + adata = adata.copy() + + if var_names is None: + _warn_imputation_threshold(adata, list(adata.var_names), threshold=warning_threshold) + elif isinstance(var_names, Iterable) and all(isinstance(item, str) for item in var_names): + _warn_imputation_threshold(adata, var_names, threshold=warning_threshold) + + if _check_module_importable("sklearnex"): # pragma: no cover + from sklearnex import patch_sklearn, unpatch_sklearn + + patch_sklearn() + + from sklearn.ensemble import ExtraTreesRegressor, RandomForestClassifier + from sklearn.impute import IterativeImputer + + try: + imp_num = IterativeImputer( + estimator=ExtraTreesRegressor(n_estimators=n_estimators, n_jobs=settings.n_jobs), + initial_strategy=num_initial_strategy, + max_iter=max_iter, + random_state=random_state, + ) + # initial strategy here will not be parametrized since only most_frequent will be applied to non numerical data + IterativeImputer( + estimator=RandomForestClassifier(n_estimators=n_estimators, n_jobs=settings.n_jobs), + initial_strategy="most_frequent", + max_iter=max_iter, + random_state=random_state, + ) + + if isinstance(var_names, Iterable) and all(isinstance(item, str) for item in var_names): # type: ignore + num_indices = get_column_indices(adata, var_names) + else: + num_indices = get_column_indices(adata, adata.var_names) + + if set(num_indices).issubset(_get_non_numerical_column_indices(adata.X)): + raise ValueError( + "Can only impute numerical data. Try to restrict imputation to certain columns using " + "var_names parameter." + ) + + # this step is the most expensive one and might extremely slow down the impute process + if num_indices: + adata.X[::, num_indices] = imp_num.fit_transform(adata.X[::, num_indices]) + else: + raise ValueError("Cannot find any feature to perform imputation") + + except ValueError as e: + if "Data matrix has wrong shape" in str(e): + logger.error("Check that your matrix does not contain any NaN only columns!") + raise + + if _check_module_importable("sklearnex"): # pragma: no cover + unpatch_sklearn() + + return adata if copy else None + + +@spinner("Performing mice-forest impute") +@check_feature_types +def mice_forest_impute( + adata: AnnData, + var_names: Iterable[str] | None = None, + *, + warning_threshold: int = 70, + save_all_iterations_data: bool = True, + random_state: int | None = None, + inplace: bool = False, + iterations: int = 5, + variable_parameters: dict | None = None, + verbose: bool = False, + copy: bool = False, +) -> AnnData | None: + """Impute data using the miceforest. + + See https://github.com/AnotherSamWilson/miceforest + Fast, memory efficient Multiple Imputation by Chained Equations (MICE) with lightgbm. + + If required, the data needs to be properly encoded as this imputation requires numerical data only. + + Args: + adata: The AnnData object containing the data to impute. + var_names: A list of variable names to impute. If None, impute all variables. + warning_threshold: Threshold of percentage of missing values to display a warning for. + save_all_iterations_data: Whether to save all imputed values from all iterations or just the latest. + Saving all iterations allows for additional plotting, but may take more memory. + random_state: The random state ensures script reproducibility. + inplace: If True, modify the input AnnData object in-place and return None. + If False, return a copy of the modified AnnData object. Default is False. + iterations: The number of iterations to run. + variable_parameters: Model parameters can be specified by variable here. + Keys should be variable names or indices, and values should be a dict of parameter which should apply to that variable only. + verbose: Whether to print information about the imputation process. + copy: Whether to return a copy of the AnnData object or modify it in-place. + + Returns: + If copy is True, a modified copy of the original AnnData object with imputed X. + If copy is False, the original AnnData object is modified in place, and None is returned. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.ad.infer_feature_types(adata) + >>> ep.pp.mice_forest_impute(adata) + """ + if copy: + adata = adata.copy() + + _warn_imputation_threshold(adata, var_names, threshold=warning_threshold) + + try: + if np.issubdtype(adata.X.dtype, np.number): + _miceforest_impute( + adata, + var_names, + save_all_iterations_data, + random_state, + inplace, + iterations, + variable_parameters, + verbose, + ) + else: + raise ValueError( + "Can only impute numerical data. Try to restrict imputation to certain columns using " + "var_names parameter." + ) + + except ValueError as e: + if "Data matrix has wrong shape" in str(e): + logger.warning("Check that your matrix does not contain any NaN only columns!") + raise + + return adata if copy else None + + +def _miceforest_impute( + adata, var_names, save_all_iterations_data, random_state, inplace, iterations, variable_parameters, verbose +) -> None: + import miceforest as mf + + data_df = pd.DataFrame(adata.X, columns=adata.var_names, index=adata.obs_names) + data_df = data_df.apply(pd.to_numeric, errors="coerce") + + if isinstance(var_names, Iterable) and all(isinstance(item, str) for item in var_names): + column_indices = get_column_indices(adata, var_names) + selected_columns = data_df.iloc[:, column_indices] + selected_columns = selected_columns.reset_index(drop=True) + + kernel = mf.ImputationKernel( + selected_columns, + num_datasets=1, + save_all_iterations_data=save_all_iterations_data, + random_state=random_state, + ) + + kernel.mice(iterations=iterations, variable_parameters=variable_parameters or {}, verbose=verbose) + data_df.iloc[:, column_indices] = kernel.complete_data(dataset=0, inplace=inplace) + + else: + data_df = data_df.reset_index(drop=True) + + kernel = mf.ImputationKernel( + data_df, num_datasets=1, save_all_iterations_data=save_all_iterations_data, random_state=random_state + ) + + kernel.mice(iterations=iterations, variable_parameters=variable_parameters or {}, verbose=verbose) + data_df = kernel.complete_data(dataset=0, inplace=inplace) + + adata.X = data_df.values + + +def _warn_imputation_threshold(adata: AnnData, var_names: Iterable[str] | None, threshold: int = 75) -> dict[str, int]: + """Warns the user if the more than $threshold percent had to be imputed. + + Args: + adata: The AnnData object to check + var_names: The var names which were imputed. + threshold: A percentage value from 0 to 100 used as minimum. + """ + try: + adata.var["missing_values_pct"] + except KeyError: + from ehrapy.preprocessing import qc_metrics + + qc_metrics(adata) + used_var_names = set(adata.var_names) if var_names is None else set(var_names) + + thresholded_var_names = set(adata.var[adata.var["missing_values_pct"] > threshold].index) & set(used_var_names) + + var_name_to_pct: dict[str, int] = {} + for var in thresholded_var_names: + var_name_to_pct[var] = adata.var["missing_values_pct"].loc[var] + logger.warning(f"Feature '{var}' had more than {var_name_to_pct[var]:.2f}% missing values!") + + return var_name_to_pct + + +def _get_non_numerical_column_indices(arr: np.ndarray) -> set: + """Return indices of columns, that contain at least one non-numerical value that is not "Nan".""" + + def _is_float_or_nan(val) -> bool: # pragma: no cover + """Check whether a given item is a float or np.nan""" + try: + _ = float(val) + return not isinstance(val, bool) + except (ValueError, TypeError): + return False + + def _is_float_or_nan_row(row) -> list[bool]: # pragma: no cover + return [_is_float_or_nan(val) for val in row] + + mask = np.apply_along_axis(_is_float_or_nan_row, 0, arr) + _, column_indices = np.where(~mask) + + return set(column_indices) diff --git a/data/ehrapy/preprocessing/_normalization.py b/data/ehrapy/preprocessing/_normalization.py new file mode 100644 index 0000000000000000000000000000000000000000..67c41d0266b73af2b82cde61fdcd7a3da586291e --- /dev/null +++ b/data/ehrapy/preprocessing/_normalization.py @@ -0,0 +1,535 @@ +from __future__ import annotations + +from functools import singledispatch +from typing import TYPE_CHECKING + +import numpy as np +import sklearn.preprocessing as sklearn_pp + +from ehrapy._compat import _raise_array_type_not_implemented + +try: + import dask.array as da + import dask_ml.preprocessing as daskml_pp + + DASK_AVAILABLE = True +except ImportError: + daskml_pp = None + DASK_AVAILABLE = False + + +from ehrapy.anndata.anndata_ext import ( + assert_numeric_vars, + get_column_indices, + get_numeric_vars, + set_numeric_vars, +) + +if TYPE_CHECKING: + from collections.abc import Callable, Sequence + + import pandas as pd + from anndata import AnnData + + +def _scale_func_group( + adata: AnnData, + scale_func: Callable[[np.ndarray | pd.DataFrame], np.ndarray], + vars: str | Sequence[str] | None, + group_key: str | None, + copy: bool, + norm_name: str, +) -> AnnData | None: + """apply scaling function to selected columns of adata, either globally or per group.""" + + if group_key is not None and group_key not in adata.obs_keys(): + raise KeyError(f"group key '{group_key}' not found in adata.obs.") + + if isinstance(vars, str): + vars = [vars] + if vars is None: + vars = get_numeric_vars(adata) + else: + assert_numeric_vars(adata, vars) + + adata = _prep_adata_norm(adata, copy) + + var_idx = get_column_indices(adata, vars) + var_values = np.take(adata.X, var_idx, axis=1) + + if group_key is None: + var_values = scale_func(var_values) + + else: + for group in adata.obs[group_key].unique(): + group_idx = adata.obs[group_key] == group + var_values[group_idx] = scale_func(var_values[group_idx]) + + set_numeric_vars(adata, var_values, vars) + + _record_norm(adata, vars, norm_name) + + if copy: + return adata + else: + return None + + +@singledispatch +def _scale_norm_function(arr): + _raise_array_type_not_implemented(_scale_norm_function, type(arr)) + + +@_scale_norm_function.register +def _(arr: np.ndarray, **kwargs): + return sklearn_pp.StandardScaler(**kwargs).fit_transform + + +if DASK_AVAILABLE: + + @_scale_norm_function.register + def _(arr: da.Array, **kwargs): + return daskml_pp.StandardScaler(**kwargs).fit_transform + + +def scale_norm( + adata: AnnData, + vars: str | Sequence[str] | None = None, + group_key: str | None = None, + copy: bool = False, + **kwargs, +) -> AnnData | None: + """Apply scaling normalization. + + Functionality is provided by :class:`~sklearn.preprocessing.StandardScaler`, see https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html for details. + If `adata.X` is a Dask Array, functionality is provided by :func:`~dask_ml.preprocessing.StandardScaler`, see https://ml.dask.org/modules/generated/dask_ml.preprocessing.StandardScaler.html for details. + + Args: + adata: :class:`~anndata.AnnData` object containing X to normalize values in. Must already be encoded using :func:`~ehrapy.preprocessing.encode`. + vars: List of the names of the numeric variables to normalize. + If None all numeric variables will be normalized. + group_key: Key in adata.obs that contains group information. If provided, scaling is applied per group. + copy: Whether to return a copy or act in place. + **kwargs: Additional arguments passed to the StandardScaler. + + Returns: + `None` if `copy=False` and modifies the passed adata, else returns an updated AnnData object. Also stores a record of applied normalizations as a dictionary in adata.uns["normalization"]. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> adata_norm = ep.pp.scale_norm(adata, copy=True) + """ + + scale_func = _scale_norm_function(adata.X, **kwargs) + + return _scale_func_group( + adata=adata, + scale_func=scale_func, + vars=vars, + group_key=group_key, + copy=copy, + norm_name="scale", + ) + + +@singledispatch +def _minmax_norm_function(arr): + _raise_array_type_not_implemented(_minmax_norm_function, type(arr)) + + +@_minmax_norm_function.register +def _(arr: np.ndarray, **kwargs): + return sklearn_pp.MinMaxScaler(**kwargs).fit_transform + + +if DASK_AVAILABLE: + + @_minmax_norm_function.register + def _(arr: da.Array, **kwargs): + return daskml_pp.MinMaxScaler(**kwargs).fit_transform + + +def minmax_norm( + adata: AnnData, + vars: str | Sequence[str] | None = None, + group_key: str | None = None, + copy: bool = False, + **kwargs, +) -> AnnData | None: + """Apply min-max normalization. + + Functionality is provided by :class:`~sklearn.preprocessing.MinMaxScaler`, see https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html for details. + If `adata.X` is a Dask Array, functionality is provided by :func:`~dask_ml.preprocessing.MinMaxScaler`, see https://ml.dask.org/modules/generated/dask_ml.preprocessing.MinMaxScaler.html for details. + + Args: + adata: :class:`~anndata.AnnData` object containing X to normalize values in. + Must already be encoded using :func:`~ehrapy.preprocessing.encode`. + vars: List of the names of the numeric variables to normalize. + If None all numeric variables will be normalized. + group_key: Key in adata.obs that contains group information. If provided, scaling is applied per group. + copy: Whether to return a copy or act in place. + **kwargs: Additional arguments passed to the MinMaxScaler. + + Returns: + `None` if `copy=False` and modifies the passed adata, else returns an updated AnnData object. Also stores a record of applied normalizations as a dictionary in adata.uns["normalization"]. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> adata_norm = ep.pp.minmax_norm(adata, copy=True) + """ + + scale_func = _minmax_norm_function(adata.X, **kwargs) + + return _scale_func_group( + adata=adata, + scale_func=scale_func, + vars=vars, + group_key=group_key, + copy=copy, + norm_name="minmax", + ) + + +@singledispatch +def _maxabs_norm_function(arr): + _raise_array_type_not_implemented(_scale_norm_function, type(arr)) + + +@_maxabs_norm_function.register +def _(arr: np.ndarray): + return sklearn_pp.MaxAbsScaler().fit_transform + + +def maxabs_norm( + adata: AnnData, + vars: str | Sequence[str] | None = None, + group_key: str | None = None, + copy: bool = False, +) -> AnnData | None: + """Apply max-abs normalization. + + Functionality is provided by :class:`~sklearn.preprocessing.MaxAbsScaler`, see https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MaxAbsScaler.html for details. + + Args: + adata: :class:`~anndata.AnnData` object containing X to normalize values in. + Must already be encoded using :func:`~ehrapy.preprocessing.encode`. + vars: List of the names of the numeric variables to normalize. + If None all numeric variables will be normalized. + group_key: Key in adata.obs that contains group information. If provided, scaling is applied per group. + copy: Whether to return a copy or act in place. + + Returns: + `None` if `copy=False` and modifies the passed adata, else returns an updated AnnData object. Also stores a record of applied normalizations as a dictionary in adata.uns["normalization"]. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> adata_norm = ep.pp.maxabs_norm(adata, copy=True) + """ + + scale_func = _maxabs_norm_function(adata.X) + + return _scale_func_group( + adata=adata, + scale_func=scale_func, + vars=vars, + group_key=group_key, + copy=copy, + norm_name="maxabs", + ) + + +@singledispatch +def _robust_scale_norm_function(arr, **kwargs): + _raise_array_type_not_implemented(_robust_scale_norm_function, type(arr)) + + +@_robust_scale_norm_function.register +def _(arr: np.ndarray, **kwargs): + return sklearn_pp.RobustScaler(**kwargs).fit_transform + + +if DASK_AVAILABLE: + + @_robust_scale_norm_function.register + def _(arr: da.Array, **kwargs): + return daskml_pp.RobustScaler(**kwargs).fit_transform + + +def robust_scale_norm( + adata: AnnData, + vars: str | Sequence[str] | None = None, + group_key: str | None = None, + copy: bool = False, + **kwargs, +) -> AnnData | None: + """Apply robust scaling normalization. + + Functionality is provided by :func:`~sklearn.preprocessing.RobustScaler`, + see https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html for details. + If `adata.X` is a Dask Array, functionality is provided by :func:`~dask_ml.preprocessing.RobustScaler`, see https://ml.dask.org/modules/generated/dask_ml.preprocessing.RobustScaler.html for details. + + Args: + adata: :class:`~anndata.AnnData` object containing X to normalize values in. + Must already be encoded using :func:`~ehrapy.preprocessing.encode`. + vars: List of the names of the numeric variables to normalize. + If None all numeric variables will be normalized. + group_key: Key in adata.obs that contains group information. If provided, scaling is applied per group. + copy: Whether to return a copy or act in place. + **kwargs: Additional arguments passed to the RobustScaler. + + Returns: + `None` if `copy=False` and modifies the passed adata, else returns an updated AnnData object. Also stores a record of applied normalizations as a dictionary in adata.uns["normalization"]. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> adata_norm = ep.pp.robust_scale_norm(adata, copy=True) + """ + + scale_func = _robust_scale_norm_function(adata.X, **kwargs) + + return _scale_func_group( + adata=adata, + scale_func=scale_func, + vars=vars, + group_key=group_key, + copy=copy, + norm_name="robust_scale", + ) + + +@singledispatch +def _quantile_norm_function(arr): + _raise_array_type_not_implemented(_quantile_norm_function, type(arr)) + + +@_quantile_norm_function.register +def _(arr: np.ndarray, **kwargs): + return sklearn_pp.QuantileTransformer(**kwargs).fit_transform + + +if DASK_AVAILABLE: + + @_quantile_norm_function.register + def _(arr: da.Array, **kwargs): + return daskml_pp.QuantileTransformer(**kwargs).fit_transform + + +def quantile_norm( + adata: AnnData, + vars: str | Sequence[str] | None = None, + group_key: str | None = None, + copy: bool = False, + **kwargs, +) -> AnnData | None: + """Apply quantile normalization. + + Functionality is provided by :func:`~sklearn.preprocessing.QuantileTransformer`, + see https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.QuantileTransformer.html for details. + If `adata.X` is a Dask Array, functionality is provided by :func:`~dask_ml.preprocessing.QuantileTransformer`, see https://ml.dask.org/modules/generated/dask_ml.preprocessing.QuantileTransformer.html for details. + + Args: + adata: :class:`~anndata.AnnData` object containing X to normalize values in. Must already be encoded using :func:`~ehrapy.preprocessing.encode`. + vars: List of the names of the numeric variables to normalize. + If None all numeric variables will be normalized. + group_key: Key in adata.obs that contains group information. If provided, scaling is applied per group. + copy: Whether to return a copy or act in place. + **kwargs: Additional arguments passed to the QuantileTransformer. + + Returns: + `None` if `copy=False` and modifies the passed adata, else returns an updated AnnData object. Also stores a record of applied normalizations as a dictionary in adata.uns["normalization"]. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> adata_norm = ep.pp.quantile_norm(adata, copy=True) + """ + + scale_func = _quantile_norm_function(adata.X, **kwargs) + + return _scale_func_group( + adata=adata, + scale_func=scale_func, + vars=vars, + group_key=group_key, + copy=copy, + norm_name="quantile", + ) + + +@singledispatch +def _power_norm_function(arr, **kwargs): + _raise_array_type_not_implemented(_power_norm_function, type(arr)) + + +@_power_norm_function.register +def _(arr: np.ndarray, **kwargs): + return sklearn_pp.PowerTransformer(**kwargs).fit_transform + + +def power_norm( + adata: AnnData, + vars: str | Sequence[str] | None = None, + group_key: str | None = None, + copy: bool = False, + **kwargs, +) -> AnnData | None: + """Apply power transformation normalization. + + Functionality is provided by :func:`~sklearn.preprocessing.PowerTransformer`, + see https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PowerTransformer.html for details. + + Args: + adata: :class:`~anndata.AnnData` object containing X to normalize values in. + Must already be encoded using :func:`~ehrapy.preprocessing.encode`. + vars: List of the names of the numeric variables to normalize. + If None all numeric variables will be normalized. + group_key: Key in adata.obs that contains group information. If provided, scaling is applied per group. + copy: Whether to return a copy or act in place. + **kwargs: Additional arguments passed to the PowerTransformer. + + Returns: + `None` if `copy=False` and modifies the passed adata, else returns an updated AnnData object. Also stores a record of applied normalizations as a dictionary in adata.uns["normalization"]. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> adata_norm = ep.pp.power_norm(adata, copy=True) + """ + + scale_func = _power_norm_function(adata.X, **kwargs) + + return _scale_func_group( + adata=adata, + scale_func=scale_func, + vars=vars, + group_key=group_key, + copy=copy, + norm_name="power", + ) + + +def log_norm( + adata: AnnData, + vars: str | Sequence[str] | None = None, + base: int | float | None = None, + offset: int | float = 1, + copy: bool = False, +) -> AnnData | None: + """Apply log normalization. + + Computes :math:`x = \\log(x + offset)`, where :math:`log` denotes the natural logarithm + unless a different base is given and the default :math:`offset` is :math:`1`. + + Args: + adata: :class:`~anndata.AnnData` object containing X to normalize values in. Must already be encoded using :func:`~ehrapy.preprocessing.encode`. + vars: List of the names of the numeric variables to normalize. + If None all numeric variables will be normalized. + base: Numeric base for logarithm. If None the natural logarithm is used. + offset: Offset added to values before computing the logarithm. + copy: Whether to return a copy or act in place. + + Returns: + `None` if `copy=False` and modifies the passed adata, else returns an updated AnnData object. Also stores a record of applied normalizations as a dictionary in adata.uns["normalization"]. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> adata_norm = ep.pp.log_norm(adata, copy=True) + """ + if isinstance(vars, str): + vars = [vars] + if vars is None: + vars = get_numeric_vars(adata) + else: + assert_numeric_vars(adata, vars) + + adata = _prep_adata_norm(adata, copy) + + adata_to_check_for_negatives = adata[:, vars] if vars else adata + offset_tmp_applied = adata_to_check_for_negatives.X + offset + if np.any(offset_tmp_applied < 0): + raise ValueError( + "Matrix X contains negative values. " + "Undefined behavior for log normalization. " + "Please specifiy a higher offset to this function " + "or offset negative values with ep.pp.offset_negative_values()." + ) + + var_idx = get_column_indices(adata, vars) + var_values = np.take(adata.X, var_idx, axis=1) + + if offset == 1: + np.log1p(var_values, out=var_values) + else: + var_values = var_values + offset + np.log(var_values, out=var_values) + + if base is not None: + np.divide(var_values, np.log(base), out=var_values) + + set_numeric_vars(adata, var_values, vars) + + _record_norm(adata, vars, "log") + + return adata + + +def _prep_adata_norm(adata: AnnData, copy: bool = False) -> AnnData | None: # pragma: no cover + if copy: + adata = adata.copy() + + if "raw_norm" not in adata.layers.keys(): + adata.layers["raw_norm"] = adata.X.copy() + + return adata + + +def _record_norm(adata: AnnData, vars: Sequence[str], method: str) -> None: + if "normalization" in adata.uns_keys(): + norm_record = adata.uns["normalization"] + else: + norm_record = {} + + for var in vars: + if var in norm_record.keys(): + norm_record[var].append(method) + else: + norm_record[var] = [method] + + adata.uns["normalization"] = norm_record + + return None + + +def offset_negative_values(adata: AnnData, layer: str = None, copy: bool = False) -> AnnData: + """Offsets negative values into positive ones with the lowest negative value becoming 0. + + This is primarily used to enable the usage of functions such as log_norm that + do not allow negative values for mathematical or technical reasons. + + Args: + adata: :class:`~anndata.AnnData` object containing X to normalize values in. + layer: The layer to offset. + copy: Whether to return a modified copy of the AnnData object. + + Returns: + Copy of AnnData object if copy is True. + """ + if copy: + adata = adata.copy() + + if layer: + minimum = np.min(adata[layer]) + if minimum < 0: + adata[layer] = adata[layer] + np.abs(minimum) + else: + minimum = np.min(adata.X) + if minimum < 0: + adata.X = adata.X + np.abs(minimum) + + if copy: + return adata diff --git a/data/ehrapy/preprocessing/_outliers.py b/data/ehrapy/preprocessing/_outliers.py new file mode 100644 index 0000000000000000000000000000000000000000..8b5a2f49e45ed84022c16a59efc8b077250fb7b9 --- /dev/null +++ b/data/ehrapy/preprocessing/_outliers.py @@ -0,0 +1,124 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np +import pandas as pd +import scipy.stats.mstats + +if TYPE_CHECKING: + from collections.abc import Collection + + from anndata import AnnData + + +def winsorize( + adata: AnnData, + vars: Collection[str] = None, + obs_cols: Collection[str] = None, + *, + limits: tuple[float, float] = (0.01, 0.99), + copy: bool = False, + **kwargs, +) -> AnnData: + """Returns a Winsorized version of the input array. + + The implementation is based on https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.winsorize.html + + Args: + adata: AnnData object to winsorize. + vars: The features to winsorize. + obs_cols: Columns in obs with features to winsorize. + limits: Tuple of the percentages to cut on each side of the array as floats between 0. and 1. + copy: Whether to return a copy. + **kwargs: Keywords arguments get passed to scipy.stats.mstats.winsorize. + + Returns: + Winsorized AnnData object if copy is True. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.winsorize(adata, vars=["bmi"]) + """ + if copy: # pragma: no cover + adata = adata.copy() + + obs_cols_set, vars_set = _validate_outlier_input(adata, obs_cols, vars) + + if vars_set: + for var in vars_set: + data_array = np.array(adata[:, var].X, dtype=float) + winsorized_data = scipy.stats.mstats.winsorize(data_array, limits=limits, nan_policy="omit", **kwargs) + adata[:, var].X = winsorized_data + + if obs_cols_set: + for col in obs_cols_set: + obs_array = adata.obs[col].to_numpy(dtype=float) + winsorized_obs = scipy.stats.mstats.winsorize(obs_array, limits=limits, nan_policy="omit", **kwargs) + adata.obs[col] = pd.Series(winsorized_obs).values + + return adata if copy else None + + +def clip_quantile( + adata: AnnData, + limits: tuple[float, float], + vars: Collection[str] = None, + obs_cols: Collection[str] = None, + *, + copy: bool = False, +) -> AnnData: + """Clips (limits) features. + + Given an interval, values outside the interval are clipped to the interval edges. + + Args: + adata: The AnnData object to clip. + limits: Values outside the interval are clipped to the interval edges. + vars: Columns in var with features to clip. + obs_cols: Columns in obs with features to clip + copy: Whether to return a copy of AnnData or not + + Returns: + A copy of original AnnData object with clipped features. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.clip_quantile(adata, vars=["bmi"]) + """ + obs_cols, vars = _validate_outlier_input(adata, obs_cols, vars) # type: ignore + + if vars: + for var in vars: + adata[:, var].X = np.clip(adata[:, var].X, limits[0], limits[1]) + + if obs_cols: + for col in obs_cols: + obs_array = adata.obs[col].to_numpy() + clipped_array = np.clip(obs_array, limits[0], limits[1]) + adata.obs[col] = pd.Series(clipped_array).values + + if copy: # pragma: no cover + adata = adata.copy() + + if copy: + return adata + + +def _validate_outlier_input(adata, obs_cols: Collection[str], vars: Collection[str]) -> tuple[set[str], set[str]]: + """Validates the obs/var columns for outlier preprocessing.""" + vars = set(vars) if vars else set() + obs_cols = set(obs_cols) if obs_cols else set() + + if vars is not None: + diff = vars - set(adata.var_names) + if len(diff) != 0: + raise ValueError(f"Columns {','.join(var for var in diff)} are not in var_names.") + if obs_cols is not None: + diff = obs_cols - set(adata.obs.columns.values) + if len(diff) != 0: + raise ValueError(f"Columns {','.join(var for var in diff)} are not in obs.") + + return obs_cols, vars diff --git a/data/ehrapy/preprocessing/_quality_control.py b/data/ehrapy/preprocessing/_quality_control.py new file mode 100644 index 0000000000000000000000000000000000000000..7a018e42a4eb2a68d4dc40261a23715ae1cb743d --- /dev/null +++ b/data/ehrapy/preprocessing/_quality_control.py @@ -0,0 +1,395 @@ +from __future__ import annotations + +import copy +from pathlib import Path +from typing import TYPE_CHECKING, Literal + +import numpy as np +import pandas as pd +from lamin_utils import logger +from thefuzz import process + +from ehrapy.anndata import anndata_to_df +from ehrapy.preprocessing._encoding import _get_encoded_features + +if TYPE_CHECKING: + from collections.abc import Collection + + from anndata import AnnData + + +def qc_metrics( + adata: AnnData, qc_vars: Collection[str] = (), layer: str = None +) -> tuple[pd.DataFrame, pd.DataFrame] | None: + """Calculates various quality control metrics. + + Uses the original values to calculate the metrics and not the encoded ones. + Look at the return type for a more in depth description of the calculated metrics. + + Args: + adata: Annotated data matrix. + qc_vars: Optional List of vars to calculate additional metrics for. + layer: Layer to use to calculate the metrics. + + Returns: + Two Pandas DataFrames of all calculated QC metrics for `obs` and `var` respectively. + + Observation level metrics include: + + - `missing_values_abs`: Absolute amount of missing values. + - `missing_values_pct`: Relative amount of missing values in percent. + + Feature level metrics include: + + - `missing_values_abs`: Absolute amount of missing values. + - `missing_values_pct`: Relative amount of missing values in percent. + - `mean`: Mean value of the features. + - `median`: Median value of the features. + - `std`: Standard deviation of the features. + - `min`: Minimum value of the features. + - `max`: Maximum value of the features. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> obs_qc, var_qc = ep.pp.qc_metrics(adata) + >>> obs_qc["missing_values_pct"].plot(kind="hist", bins=20) + """ + obs_metrics = _obs_qc_metrics(adata, layer, qc_vars) + var_metrics = _var_qc_metrics(adata, layer) + + adata.obs[obs_metrics.columns] = obs_metrics + adata.var[var_metrics.columns] = var_metrics + + return obs_metrics, var_metrics + + +def _missing_values( + arr: np.ndarray, mode: Literal["abs", "pct"] = "abs", df_type: Literal["obs", "var"] = "obs" +) -> np.ndarray: + """Calculates the absolute or relative amount of missing values. + + Args: + arr: Numpy array containing a data row which is a subset of X (mtx). + mode: Whether to calculate absolute or percentage of missing values. + df_type: Whether to calculate the proportions for obs or var. One of 'obs' or 'var'. + + Returns: + Absolute or relative amount of missing values. + """ + num_missing = pd.isnull(arr).sum() + if mode == "abs": + return num_missing + elif mode == "pct": + total_elements = arr.shape[0] if df_type == "obs" else len(arr) + return (num_missing / total_elements) * 100 + + +def _obs_qc_metrics( + adata: AnnData, layer: str = None, qc_vars: Collection[str] = (), log1p: bool = True +) -> pd.DataFrame: + """Calculates quality control metrics for observations. + + See :func:`~ehrapy.preprocessing._quality_control.calculate_qc_metrics` for a list of calculated metrics. + + Args: + adata: Annotated data matrix. + layer: Layer containing the actual data matrix. + qc_vars: A list of previously calculated QC metrics to calculate summary statistics for. + log1p: Whether to apply log1p normalization for the QC metrics. Only used with parameter 'qc_vars'. + + Returns: + A Pandas DataFrame with the calculated metrics. + """ + obs_metrics = pd.DataFrame(index=adata.obs_names) + var_metrics = pd.DataFrame(index=adata.var_names) + mtx = adata.X if layer is None else adata.layers[layer] + + if "encoding_mode" in adata.var: + for original_values_categorical in _get_encoded_features(adata): + mtx = mtx.astype(object) + index = np.where(var_metrics.index.str.contains(original_values_categorical))[0] + + if original_values_categorical not in adata.obs.keys(): + raise KeyError(f"Original values for {original_values_categorical} not found in adata.obs.") + mtx[:, index[0]] = np.squeeze( + np.where( + adata.obs[original_values_categorical].astype(object) == "nan", + np.nan, + adata.obs[original_values_categorical].astype(object), + ) + ) + + obs_metrics["missing_values_abs"] = np.apply_along_axis(_missing_values, 1, mtx, mode="abs") + obs_metrics["missing_values_pct"] = np.apply_along_axis(_missing_values, 1, mtx, mode="pct", df_type="obs") + + # Specific QC metrics + for qc_var in qc_vars: + obs_metrics[f"total_features_{qc_var}"] = np.ravel(mtx[:, adata.var[qc_var].values].sum(axis=1)) + if log1p: + obs_metrics[f"log1p_total_features_{qc_var}"] = np.log1p(obs_metrics[f"total_features_{qc_var}"]) + obs_metrics["total_features"] = np.ravel(mtx.sum(axis=1)) + obs_metrics[f"pct_features_{qc_var}"] = ( + obs_metrics[f"total_features_{qc_var}"] / obs_metrics["total_features"] * 100 + ) + + return obs_metrics + + +def _var_qc_metrics(adata: AnnData, layer: str | None = None) -> pd.DataFrame: + var_metrics = pd.DataFrame(index=adata.var_names) + mtx = adata.X if layer is None else adata.layers[layer] + categorical_indices = np.ndarray([0], dtype=int) + + if "encoding_mode" in adata.var.keys(): + for original_values_categorical in _get_encoded_features(adata): + mtx = copy.deepcopy(mtx.astype(object)) + index = np.where(var_metrics.index.str.startswith("ehrapycat_" + original_values_categorical))[0] + + if original_values_categorical not in adata.obs.keys(): + raise KeyError(f"Original values for {original_values_categorical} not found in adata.obs.") + mtx[:, index] = np.tile( + np.where( + adata.obs[original_values_categorical].astype(object) == "nan", + np.nan, + adata.obs[original_values_categorical].astype(object), + ).reshape(-1, 1), + mtx[:, index].shape[1], + ) + categorical_indices = np.concatenate([categorical_indices, index]) + non_categorical_indices = np.ones(mtx.shape[1], dtype=bool) + non_categorical_indices[categorical_indices] = False + var_metrics["missing_values_abs"] = np.apply_along_axis(_missing_values, 0, mtx, mode="abs") + var_metrics["missing_values_pct"] = np.apply_along_axis(_missing_values, 0, mtx, mode="pct", df_type="var") + + var_metrics["mean"] = np.nan + var_metrics["median"] = np.nan + var_metrics["standard_deviation"] = np.nan + var_metrics["min"] = np.nan + var_metrics["max"] = np.nan + + try: + var_metrics.loc[non_categorical_indices, "mean"] = np.nanmean( + np.array(mtx[:, non_categorical_indices], dtype=np.float64), axis=0 + ) + var_metrics.loc[non_categorical_indices, "median"] = np.nanmedian( + np.array(mtx[:, non_categorical_indices], dtype=np.float64), axis=0 + ) + var_metrics.loc[non_categorical_indices, "standard_deviation"] = np.nanstd( + np.array(mtx[:, non_categorical_indices], dtype=np.float64), axis=0 + ) + var_metrics.loc[non_categorical_indices, "min"] = np.nanmin( + np.array(mtx[:, non_categorical_indices], dtype=np.float64), axis=0 + ) + var_metrics.loc[non_categorical_indices, "max"] = np.nanmax( + np.array(mtx[:, non_categorical_indices], dtype=np.float64), axis=0 + ) + + # Calculate IQR and define IQR outliers + q1 = np.nanpercentile(mtx[:, non_categorical_indices], 25, axis=0) + q3 = np.nanpercentile(mtx[:, non_categorical_indices], 75, axis=0) + iqr = q3 - q1 + lower_bound = q1 - 1.5 * iqr + upper_bound = q3 + 1.5 * iqr + var_metrics.loc[non_categorical_indices, "iqr_outliers"] = ( + (mtx[:, non_categorical_indices] < lower_bound) | (mtx[:, non_categorical_indices] > upper_bound) + ).any(axis=0) + # Fill all non_categoricals with False because else we have a dtype object Series which h5py cannot save + var_metrics["iqr_outliers"] = var_metrics["iqr_outliers"].astype(bool).fillna(False) + var_metrics = var_metrics.infer_objects() + except (TypeError, ValueError): + # We assume that the data just hasn't been encoded yet + pass + + return var_metrics + + +def qc_lab_measurements( + adata: AnnData, + reference_table: pd.DataFrame = None, + measurements: list[str] = None, + unit: Literal["traditional", "SI"] = None, + layer: str = None, + threshold: int = 20, + age_col: str = None, + age_range: str = None, + sex_col: str = None, + sex: str = None, + ethnicity_col: str = None, + ethnicity: str = None, + copy: bool = False, + verbose: bool = False, +) -> AnnData: + """Examines lab measurements for reference ranges and outliers. + + Source: + The used reference values were obtained from https://accessmedicine.mhmedical.com/content.aspx?bookid=1069§ionid=60775149 . + This table is compiled from data in the following sources: + + * Tietz NW, ed. Clinical Guide to Laboratory Tests. 3rd ed. Philadelphia: WB Saunders Co; 1995; + * Laposata M. SI Unit Conversion Guide. Boston: NEJM Books; 1992; + * American Medical Association Manual of Style: A Guide for Authors and Editors. 9th ed. Chicago: AMA; 1998:486–503. Copyright 1998, American Medical Association; + * Jacobs DS, DeMott WR, Oxley DK, eds. Jacobs & DeMott Laboratory Test Handbook With Key Word Index. 5th ed. Hudson, OH: Lexi-Comp Inc; 2001; + * Henry JB, ed. Clinical Diagnosis and Management by Laboratory Methods. 20th ed. Philadelphia: WB Saunders Co; 2001; + * Kratz A, et al. Laboratory reference values. N Engl J Med. 2006;351:1548–1563; 7) Burtis CA, ed. Tietz Textbook of Clinical Chemistry and Molecular Diagnostics. 5th ed. St. Louis: Elsevier; 2012. + + This version of the table of reference ranges was reviewed and updated by Jessica Franco-Colon, PhD, and Kay Brooks. + + Limitations: + * Reference ranges differ between continents, countries and even laboratories (https://informatics.bmj.com/content/28/1/e100419). + The default values used here are only one of many options. + * Ensure that the values used as input are provided with the correct units. We recommend the usage of SI values. + * The reference values pertain to adults. Many of the reference ranges need to be adapted for children. + * By default if no gender is provided and no unisex values are available, we use the **male** reference ranges. + * The used reference ranges may be biased for ethnicity. Please examine the primary sources if required. + * We recommend a glance at https://www.nature.com/articles/s41591-021-01468-6 for the effect of such covariates. + + Additional values: + * Interleukin-6 based on https://pubmed.ncbi.nlm.nih.gov/33155686/ + + If you want to specify your own table as a Pandas DataFrame please examine the existing default table. + Ethnicity and age columns can be added. + https://github.com/theislab/ehrapy/blob/main/ehrapy/preprocessing/laboratory_reference_tables/laposata.tsv + + Args: + adata: Annotated data matrix. + reference_table: A custom DataFrame with reference values. Defaults to the laposata table if not specified. + measurements: A list of measurements to check. + unit: The unit of the measurements. + layer: Layer containing the matrix to calculate the metrics for. + threshold: Minimum required matching confidence score of the fuzzysearch. + 0 = no matches, 100 = all must match. + age_col: Column containing age values. + age_range: The inclusive age-range to filter for such as 5-99. + sex_col: Column containing sex values. Column must contain 'U', 'M' or 'F'. + sex: Sex to filter the reference values for. Use U for unisex which uses male values when male and female conflict. + ethnicity_col: Column containing ethnicity values. + ethnicity: Ethnicity to filter for. + copy: Whether to return a copy. + verbose: Whether to have verbose stdout. Notifies user of matched columns and value ranges. + + Returns: + A modified AnnData object (copy if specified). + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=True) + >>> ep.pp.qc_lab_measurements(adata, measurements=["potassium_first"], verbose=True) + """ + if copy: + adata = adata.copy() + + preprocessing_dir = Path(__file__).parent.resolve() + if reference_table is None: + reference_table = pd.read_csv( + f"{preprocessing_dir}/laboratory_reference_tables/laposata.tsv", sep="\t", index_col="Measurement" + ) + + for measurement in measurements: + best_column_match, score = process.extractOne( + query=measurement, choices=reference_table.index, score_cutoff=threshold + ) + if best_column_match is None: + logger.warning(f"Unable to find a match for {measurement}") + continue + if verbose: + logger.info(f"Detected '{best_column_match}' for '{measurement}' with score {score}.") + + reference_column = "SI Reference Interval" if unit == "SI" else "Traditional Reference Interval" + + # Fetch all non None columns from the reference statistics + not_none_columns = [col for col in [sex_col, age_col, ethnicity_col] if col is not None] + not_none_columns.append(reference_column) + reference_values = reference_table.loc[[best_column_match], not_none_columns] + + additional_columns = False + if sex_col or age_col or ethnicity_col: # check if additional columns were provided + additional_columns = True + + # Check if multiple reference values occur and no additional information is available: + if reference_values.shape[0] > 1 and additional_columns is False: + raise ValueError( + f"Several options for {best_column_match} reference value are available. Please specify sex, age or " + f"ethnicity columns and their values." + ) + + try: + if age_col: + min_age, max_age = age_range.split("-") + reference_values = reference_values[ + (reference_values[age_col].str.split("-").str[0].astype(int) >= int(min_age)) + and (reference_values[age_col].str.split("-").str[1].astype(int) <= int(max_age)) + ] + if sex_col: + sexes = "U|M" if sex is None else sex + reference_values = reference_values[reference_values[sex_col].str.contains(sexes)] + if ethnicity_col: + reference_values = reference_values[reference_values[ethnicity_col].isin([ethnicity])] + + if layer is not None: + actual_measurements = adata[:, measurement].layers[layer] + else: + actual_measurements = adata[:, measurement].X + except TypeError: + logger.warning(f"Unable to find specified reference values for {measurement}.") + + check = reference_values[reference_column].values + check_str: str = np.array2string(check) + check_str = check_str.replace("[", "").replace("]", "").replace("'", "") + if "<" in check_str: + upperbound = float(check_str.replace("<", "")) + if verbose: + logger.info(f"Using upperbound {upperbound}") + + upperbound_check_results = actual_measurements < upperbound + upperbound_check_results_array: np.ndarray = upperbound_check_results.copy() + adata.obs[f"{measurement} normal"] = upperbound_check_results_array + elif ">" in check_str: + lower_bound = float(check_str.replace(">", "")) + if verbose: + logger.info(f"Using lowerbound {lower_bound}") + + lower_bound_check_results = actual_measurements > lower_bound + lower_bound_check_results_array = lower_bound_check_results.copy() + adata.obs[f"{measurement} normal"] = lower_bound_check_results_array + else: # "-" range case + min_value = float(check_str.split("-")[0]) + max_value = float(check_str.split("-")[1]) + if verbose: + logger.info(f"Using minimum of {min_value} and maximum of {max_value}") + + range_check_results = (actual_measurements >= min_value) & (actual_measurements <= max_value) + range_check_results_array: np.ndarray = range_check_results.copy() + adata.obs[f"{measurement} normal"] = range_check_results_array + + if copy: + return adata + + +def mcar_test( + adata: AnnData, method: Literal["little", "ttest"] = "little", *, layer: str = None +) -> float | pd.DataFrame: + """Statistical hypothesis test for Missing Completely At Random (MCAR). + + The null hypothesis of the Little's test is that data is Missing Completely At Random (MCAR). + + We advise to use Little’s MCAR test carefully. + Rejecting the null hypothesis may not always mean that data is not MCAR, nor is accepting the null hypothesis a guarantee that data is MCAR. + See Schouten, R. M., & Vink, G. (2021). The Dance of the Mechanisms: How Observed Information Influences the Validity of Missingness Assumptions. + Sociological Methods & Research, 50(3), 1243-1258. https://doi.org/10.1177/0049124118799376 + for a thorough discussion of missingness mechanisms. + + Args: + adata: Annotated data matrix. + method: Whether to perform a chi-square test on the entire dataset (“little”) or separate t-tests for every combination of variables (“ttest”). + layer: Layer to apply the test to. Uses X matrix if set to `None`. + + Returns: + A single p-value if the Little's test was applied or a Pandas DataFrame of the p-value of t-tests for each pair of features. + """ + df = anndata_to_df(adata, layer=layer) + from pyampute.exploration.mcar_statistical_tests import MCARTest + + mt = MCARTest(method=method) + + return mt(df) diff --git a/data/ehrapy/preprocessing/_scanpy_pp_api.py b/data/ehrapy/preprocessing/_scanpy_pp_api.py new file mode 100644 index 0000000000000000000000000000000000000000..e0e5022178168ceecd43771182fcfe66f5c84b94 --- /dev/null +++ b/data/ehrapy/preprocessing/_scanpy_pp_api.py @@ -0,0 +1,281 @@ +from __future__ import annotations + +from collections.abc import Callable +from types import MappingProxyType +from typing import TYPE_CHECKING, Any, Literal, TypeAlias + +import numpy as np +import scanpy as sc + +if TYPE_CHECKING: + from collections.abc import Collection, Mapping, Sequence + + from anndata import AnnData + from scanpy.neighbors import KnnTransformerLike + from scipy.sparse import spmatrix + + from ehrapy.preprocessing._types import KnownTransformer + +AnyRandom: TypeAlias = int | np.random.RandomState | None + + +def pca( + data: AnnData | np.ndarray | spmatrix, + n_comps: int | None = None, + zero_center: bool | None = True, + svd_solver: str = "arpack", + random_state: AnyRandom = 0, + return_info: bool = False, + dtype: str = "float32", + copy: bool = False, + chunked: bool = False, + chunk_size: int | None = None, +) -> AnnData | np.ndarray | spmatrix | None: # pragma: no cover + """Computes a principal component analysis. + + Computes PCA coordinates, loadings and variance decomposition. Uses the implementation of *scikit-learn*. + + Args: + data: The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond to observations and columns to features. + n_comps: Number of principal components to compute. + Defaults to 50, or 1 - minimum dimension size of selected representation. + zero_center: If `True`, compute standard PCA from covariance matrix. + If `False`, omit zero-centering variables (uses :class:`~sklearn.decomposition.TruncatedSVD`), which allows to handle sparse input efficiently. + Passing `None` decides automatically based on sparseness of the data. + svd_solver: SVD solver to use: + + * `'arpack'` (the default) for the ARPACK wrapper in SciPy (:func:`~scipy.sparse.linalg.svds`) + + * `'randomized'` for the randomized algorithm due to Halko (2009). + + * `'auto'` chooses automatically depending on the size of the problem. + + * `'lobpcg'` An alternative SciPy solver. + + Efficient computation of the principal components of a sparse matrix currently only works with the `'arpack`' or `'lobpcg'` solvers. + random_state: Change to use different initial states for the optimization. + return_info: Only relevant when not passing an :class:`~anndata.AnnData`: see “**Returns**”. + dtype: Numpy data type string to which to convert the result. + copy: If an :class:`~anndata.AnnData` is passed, determines whether a copy is returned. Is ignored otherwise. + chunked: If `True`, perform an incremental PCA on segments of `chunk_size`. + The incremental PCA automatically zero centers and ignores settings of + `random_seed` and `svd_solver`. If `False`, perform a full PCA. + chunk_size: Number of observations to include in each chunk. Required if `chunked=True` was passed. + + Returns: + :X_pca: :class:`~scipy.sparse.spmatrix`, :class:`~numpy.ndarray` + + If `data` is array-like and `return_info=False` was passed, this function only returns `X_pca`... + + adata : :class:`~anndata.AnnData` + + …otherwise if `copy=True` it returns or else adds fields to `adata`: + + `.obsm['X_pca']` + PCA representation of data. + + `.varm['PCs']` + The principal components containing the loadings. + + `.uns['pca']['variance_ratio']` + Ratio of explained variance. + + `.uns['pca']['variance']` + Explained variance, equivalent to the eigenvalues of the covariance matrix. + """ + return sc.pp.pca( + data=data, + n_comps=n_comps, + zero_center=zero_center, + svd_solver=svd_solver, + random_state=random_state, + return_info=return_info, + use_highly_variable=False, + dtype=dtype, + copy=copy, + chunked=chunked, + chunk_size=chunk_size, + ) + + +def regress_out( + adata: AnnData, + keys: str | Sequence[str], + n_jobs: int | None = None, + copy: bool = False, +) -> AnnData | None: # pragma: no cover + """Regress out (mostly) unwanted sources of variation. + + Uses simple linear regression. This is inspired by Seurat's `regressOut` function in R [Satija15]. + Note that this function tends to overcorrect in certain circumstances. + + Args: + adata: :class:`~anndata.AnnData` object containing all observations. + keys: Keys for observation annotation on which to regress on. + n_jobs: Number of jobs for parallel computation. `None` means using :attr:`scanpy._settings.ScanpyConfig.n_jobs`. + copy: Determines whether a copy of `adata` is returned. + + Returns: + Depending on `copy` returns or updates an :class:`~anndata.AnnData` object with the corrected data matrix. + """ + return sc.pp.regress_out(adata=adata, keys=keys, n_jobs=n_jobs, copy=copy) + + +def subsample( + data: AnnData | np.ndarray | spmatrix, + fraction: float | None = None, + n_obs: int | None = None, + random_state: AnyRandom = 0, + copy: bool = False, +) -> AnnData | None: # pragma: no cover + """Subsample to a fraction of the number of observations. + + Args: + data: The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond to observations (patients) and columns to features. + fraction: Subsample to this `fraction` of the number of observations. + n_obs: Subsample to this number of observations. + random_state: Random seed to change subsampling. + copy: If an :class:`~anndata.AnnData` is passed, determines whether a copy is returned. + + Returns: + Returns `X[obs_indices], obs_indices` if data is array-like, otherwise subsamples the passed + :class:`~anndata.AnnData` (`copy == False`) or returns a subsampled copy of it (`copy == True`). + """ + return sc.pp.subsample(data=data, fraction=fraction, n_obs=n_obs, random_state=random_state, copy=copy) + + +def combat( + adata: AnnData, + key: str = "batch", + covariates: Collection[str] | None = None, + inplace: bool = True, +) -> AnnData | np.ndarray | None: # pragma: no cover + """ComBat function for batch effect correction [Johnson07]_ [Leek12]_ [Pedersen12]_. + + Corrects for batch effects by fitting linear models, gains statistical power via an EB framework where information is borrowed across features. + This uses the implementation `combat.py`_ [Pedersen12]_. + + .. _combat.py: https://github.com/brentp/combat.py + + Args: + adata: :class:`~anndata.AnnData` object containing all observations. + key: Key to a categorical annotation from :attr:`~anndata.AnnData.obs` that will be used for batch effect removal. + covariates: Additional covariates besides the batch variable such as adjustment variables or biological condition. + This parameter refers to the design matrix `X` in Equation 2.1 in [Johnson07]_ and to the `mod` argument in + the original combat function in the sva R package. + Note that not including covariates may introduce bias or lead to the removal of signal in unbalanced designs. + inplace: Whether to replace adata.X or to return the corrected data + + Returns: + Depending on the value of `inplace`, either returns the corrected matrix or modifies `adata.X`. + """ + return sc.pp.combat(adata=adata, key=key, covariates=covariates, inplace=inplace) + + +_Method = Literal["umap", "gauss"] +_MetricFn = Callable[[np.ndarray, np.ndarray], float] +_MetricSparseCapable = Literal["cityblock", "cosine", "euclidean", "l1", "l2", "manhattan"] +_MetricScipySpatial = Literal[ + "braycurtis", + "canberra", + "chebyshev", + "correlation", + "dice", + "hamming", + "jaccard", + "kulsinski", + "mahalanobis", + "minkowski", + "rogerstanimoto", + "russellrao", + "seuclidean", + "sokalmichener", + "sokalsneath", + "sqeuclidean", + "yule", +] +_Metric = _MetricSparseCapable | _MetricScipySpatial + + +def neighbors( + adata: AnnData, + n_neighbors: int = 15, + n_pcs: int | None = None, + use_rep: str | None = None, + knn: bool = True, + random_state: AnyRandom = 0, + method: _Method = "umap", + transformer: KnnTransformerLike | KnownTransformer | None = None, + metric: _Metric | _MetricFn = "euclidean", + metric_kwds: Mapping[str, Any] = MappingProxyType({}), + key_added: str | None = None, + copy: bool = False, +) -> AnnData | None: # pragma: no cover + """Compute a neighborhood graph of observations [McInnes18]_. + + The neighbor search efficiency of this heavily relies on UMAP [McInnes18]_, + which also provides a method for estimating connectivities of data points - + the connectivity of the manifold (`method=='umap'`). If `method=='gauss'`, + connectivities are computed according to [Coifman05]_, in the adaption of [Haghverdi16]_. + + Args: + adata: :class:`~anndata.AnnData` object containing all observations. + n_neighbors: The size of local neighborhood (in terms of number of neighboring data points) used for manifold approximation. + Larger values result in more global views of the manifold, while smaller values result in more local data being preserved. + In general values should be in the range 2 to 100. If `knn` is `True`, number of nearest neighbors to be searched. + If `knn` is `False`, a Gaussian kernel width is set to the distance of the `n_neighbors` neighbor. + n_pcs: Use this many PCs. If `n_pcs==0` use `.X` if `use_rep is None`. + use_rep: Use the indicated representation. `'X'` or any key for `.obsm` is valid. + If `None`, the representation is chosen automatically: + For `.n_vars` < 50, `.X` is used, otherwise 'X_pca' is used. + If 'X_pca' is not present, it’s computed with default parameters. + knn: If `True`, use a hard threshold to restrict the number of neighbors to `n_neighbors`, that is, consider a knn graph. + Otherwise, use a Gaussian Kernel to assign low weights to neighbors more distant than the `n_neighbors` nearest neighbor. + random_state: A numpy random seed. + method: Use 'umap' [McInnes18]_ or 'gauss' (Gauss kernel following [Coifman05]_ with adaptive width [Haghverdi16]_) for computing connectivities. + Use 'rapids' for the RAPIDS implementation of UMAP (experimental, GPU only). + metric: A known metric’s name or a callable that returns a distance. + transformer: Approximate kNN search implementation. Follows the API of + :class:`~sklearn.neighbors.KNeighborsTransformer`. + See scanpy's `knn-transformers tutorial `_ for more details. This tutorial is also valid for ehrapy's `neighbors` function. + Next to the advanced options from the knn-transformers tutorial, this argument accepts the following basic options: + + `None` (the default) + Behavior depends on data size. + For small data, uses :class:`~sklearn.neighbors.KNeighborsTransformer` with algorithm="brute" for exact kNN, otherwise uses + :class:`~pynndescent.pynndescent_.PyNNDescentTransformer` for approximate kNN. + `'pynndescent'` + Uses :class:`~pynndescent.pynndescent_.PyNNDescentTransformer` for approximate kNN. + `'sklearn'` + Uses :class:`~sklearn.neighbors.KNeighborsTransformer` with algorithm="brute" for exact kNN. + metric_kwds: Options for the metric. + key_added: If not specified, the neighbors data is stored in .uns['neighbors'], + distances and connectivities are stored in .obsp['distances'] and .obsp['connectivities'] respectively. + If specified, the neighbors data is added to .uns[key_added], distances are stored in .obsp[key_added+'_distances'] + and connectivities in .obsp[key_added+'_connectivities']. + copy: Determines whether a copy of `adata` is returned. + + Returns: + Depending on `copy`, updates or returns `adata` with the following; + See `key_added` parameter description for the storage path of connectivities and distances. + + **connectivities** : sparse matrix of dtype `float32`. + Weighted adjacency matrix of the neighborhood graph of data points. Weights should be interpreted as connectivities. + + **distances** : sparse matrix of dtype `float32`. + Instead of decaying weights, this stores distances for each pair of neighbors. + """ + return sc.pp.neighbors( + adata=adata, + n_neighbors=n_neighbors, + n_pcs=n_pcs, + use_rep=use_rep, + knn=knn, + random_state=random_state, + method=method, + transformer=transformer, + metric=metric, + metric_kwds=metric_kwds, + key_added=key_added, + copy=copy, + ) diff --git a/data/ehrapy/preprocessing/_summarize_measurements.py b/data/ehrapy/preprocessing/_summarize_measurements.py new file mode 100644 index 0000000000000000000000000000000000000000..441d31d9c5c75071dbe9659486974a00ca457f5e --- /dev/null +++ b/data/ehrapy/preprocessing/_summarize_measurements.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from ehrapy.anndata import anndata_to_df, df_to_anndata + +if TYPE_CHECKING: + from collections.abc import Iterable + + from anndata import AnnData + + +def summarize_measurements( + adata: AnnData, + layer: str = None, + var_names: Iterable[str] | None = None, + statistics: Iterable[str] = None, +) -> AnnData: + """Summarizes numerical measurements into minimum, maximum and average values. + + Args: + adata: AnnData object containing measurements. + layer: Layer to calculate the expanded measurements for. + var_names: For which measurements to determine the expanded measurements for. Defaults to None (all numerical measurements). + statistics: Which expanded measurements to calculate. + Possible values are 'min', 'max', 'mean' + If None, it calculates minimum, maximum and mean. + + Returns: + A new AnnData object with expanded X containing the specified statistics as additional columns replacing the original values. + """ + if var_names is None: + var_names = adata.var_names + + if statistics is None: + statistics = ["min", "max", "mean"] + + aggregation_functions = {measurement: statistics for measurement in var_names} + + grouped = anndata_to_df(adata, layer=layer).groupby(adata.obs.index).agg(aggregation_functions) + grouped.columns = [f"{col}_{stat}" for col, stat in grouped.columns] + + expanded_adata = df_to_anndata(grouped) + + return expanded_adata diff --git a/data/ehrapy/preprocessing/_types.py b/data/ehrapy/preprocessing/_types.py new file mode 100644 index 0000000000000000000000000000000000000000..6810761afc58f7d74a04a8b8ad115b8e3e0c7716 --- /dev/null +++ b/data/ehrapy/preprocessing/_types.py @@ -0,0 +1,5 @@ +from __future__ import annotations + +from typing import Literal + +KnownTransformer = Literal["pynndescent", "sklearn"] diff --git a/data/ehrapy/preprocessing/laboratory_reference_tables/laposata.tsv b/data/ehrapy/preprocessing/laboratory_reference_tables/laposata.tsv new file mode 100644 index 0000000000000000000000000000000000000000..615f76039a0c8b71803972fc685e9f66efdf299f --- /dev/null +++ b/data/ehrapy/preprocessing/laboratory_reference_tables/laposata.tsv @@ -0,0 +1,432 @@ +Measurement Specimen Traditional Reference Interval Traditional Units Conversion Factor SI Reference Interval SI Units Sex +Acetaminophen (therapeutic) Serum, plasma 10-30 μg/mL 6.62 70-200 μmol/L U +Acetoacetic acid Serum, plasma <1 mg/dL 0.098 <0.1 mmol/L U +Acetone Serum, plasma <2.0 mg/dL 0.172 <0.34 mmol/L U +Acetylcholinesterase Red blood cells 5-10 U/mL 1.0 5-10 U/L U +Activated partial thromboplastin time (APTT) Whole blood 25-40 s 1.0 25-40 s U +Adenosine deaminasea Serum 11.5-25.0 U/L 0.017 0.20-0.43 μKat/L U +Alanine (adult) Plasma 1.87-5.88 mg/dL 112.2 210-661 μmol/day U +Alanine aminotransferase (ALT, SGPT) Serum 10-40 U/L 1.0 10-40 U/L U +Albumin Serum 3.5-5.0 g/dL 10.0 35-50 g/L U +Alcohol dehydrogenasea Serum <2.8 U/L 0.017 <0.05 μKat/L U +Aldolasea Serum 1.0-7.5 U/L 0.017 0.02-0.13 μKat/L U +Aldosterone (upright) Plasma 7-30 ng/dL 0.0277 0.19-0.83 nmol/L U +Aldosterone Urine, 24 h 3-20 μg/24 h 2.77 8-55 nmol/day U +Alkaline phosphatase Serum 50-120 U/L 1.0 50-120 U/L U +α1-Acid glycoprotein Serum 50-120 mg/dL 0.01 0.5-1.2 g/L U +α2-Macroglobulin Serum 130-300 mg/dL 0.01 1.3-3.0 g/L U +Alprazolam (therapeutic) Serum, plasma 10-50 ng/mL 3.24 32-162 nmol/L U +Aluminum Serum, plasma <6 ng/mL 37.06 0.0-222.4 nmol/L U +Amikacin (therapeutic, peak) Serum, plasma 20-30 μg/mL 1.71 34-52 μmol/L U +Alanine Plasma 1.87-5.89 mg/dL 112.2 210-661 μmol/L U +α-Aminobutyric acid Plasma 0.08-0.36 mg/dL 97.0 8-35 μmol/L U +Arginine Plasma 0.37-2.40 mg/dL 57.4 21-138 μmol/L U +Aspartic acid Plasma <0.3 mg/dL 75.1 <25 μmol/L U +Cystine Plasma 0.40-1.40 mg/dL 83.3 33-117 μmol/L U +Glutamic acid Plasma 0.2-2.8 mg/dL 67.97 15-190 μmol/L U +Glutamine Plasma 6.1-10.2 mg/dL 68.42 420-700 μmol/L U +Glycine Plasma 0.9-4.2 mg/dL 133.3 120-560 μmol/L U +Histidine Plasma 0.5-1.7 mg/dL 64.5 32-110 μmol/L U +Hydroxyproline Plasma <0.55 mg/dL 76.3 <42 μmol/L U +Isoleucine Plasma 0.5-1.3 mg/dL 76.24 40-100 μmol/L U +Leucine Plasma 1.0-2.3 mg/dL 76.3 75-175 μmol/L U +Lysine Plasma 1.2-3.5 mg/dL 68.5 80-240 μmol/L U +Methionine Plasma 0.1-0.6 mg/dL 67.1 6-40 μmol/L U +Ornithine Plasma 0.4-1.4 mg/dL 75.8 30-106 μmol/L U +Phenylalanine Plasma 0.6-1.5 mg/dL 60.5 35-90 μmol/L U +Proline Plasma 1.2-3.9 mg/dL 86.9 104-340 μmol/L U +Serine Plasma 0.7-2.0 mg/dL 95.2 65-193 μmol/L U +Taurine Plasma 0.3-2.1 mg/dL 80.0 24-168 μmol/L U +Threonine Plasma 0.9-2.5 mg/dL 84.0 75-210 μmol/L U +Tryptophan Plasma 0.5-1.5 mg/dL 48.97 25-73 μmol/L U +Tyrosine Plasma 0.4-1.6 mg/dL 55.19 20-90 μmol/L U +Valine Plasma 1.7-3.7 mg/dL 85.5 145-315 μmol/L U +Amiodarone (therapeutic) Serum, plasma 0.5-2.5 μg/mL 1.55 0.8-3.9 μmol/L U +δ-Aminolevulinic acid Urine 1.0-7.0 mg/24 h 7.626 8-53 μmol/day U +Amitriptyline (therapeutic) Serum, plasma 80-250 ng/mL 3.61 289-903 nmol/L U +Ammonia (as NH3) Plasma 15-50 μg/dL 0.714 11-35 μmol/L U +Amobarbital (therapeutic) Serum 1-5 μg/mL 4.42 4-22 μmol/L U +Amoxapine (therapeutic) Plasma 200-600 ng/mL 1.0 200-600 μg/L U +Amylasea Serum 27-130 U/L 0.017 0.46-2.21 μKat/L U +Androstenedione Serum 75-205 ng/dL 0.0349 2.6-7.2 nmol/L M +Androstenedione Serum 85-275 ng/dL 0.0349 3.0-9.6 nmol/L F +Angiotensin I Plasma <25 pg/mL 1.0 <25 ng/L U +Angiotensin II Plasma 10-60 pg/mL 1.0 10-60 ng/L U +Angiotensin-converting enzyme (ACE) Serum 8-52 U/L 0.017 0.14-0.88 μKat/L U +Anion gap (Na+)−(Cl− + HCO3−) Serum, plasma 8-16 mEq/L 1.0 8-16 nmol/L U +Antidiuretic hormone (ADH, vasopressin) Plasma 1-5 pg/mL 0.926 0.9-4.6 pmol/L U +α2-Antiplasmin (fraction) Plasma 80-130 % 0.01 0.8-1.3 Fraction of 1.0 U +Antithrombin III Plasma 21-30 mg/dL 10.0 210-300 mg/L U +α1-Antitrypsin Serum 80-200 mg/dL 0.01 0.8-2.0 g/L U +Apolipoprotein A Serum 80-151 mg/dL 0.01 0.8-1.5 g/L M +Apolipoprotein A Serum 80-170 mg/dL 0.01 0.8-1.5 g/L F +Apolipoprotein B Serum, plasma 50-123 mg/dL 0.01 0.5-1.2 g/L M +Apolipoprotein B Serum, plasma 25-120 mg/dL 0.01 0.5-1.2 g/L F +Arsenic (As) Whole blood <23 μg/L 0.0133 <0.31 μmol/L U +Arsenic (As), chronic poisoning Whole blood 100-500 μg/L 0.0133 1.33-6.65 μmol/L U +Arsenic (As), acute poisoning Whole blood 600-9300 μg/L 0.0133 7.9-123.7 μmol/L U +Asparagine Plasma 0.40-0.91 mg/dL 75.7 30-69 μmol/L U +Aspartate aminotransferase (AST, SGOT) Serum 20-48 U/L 0.017 0.34-0.82 μKat/L U +Atrial natriuretic hormone Plasma 20-77 pg/mL 1.0 20-77 ng/L U +Beryllium, toxic Urine >20 μg/L 0.111 >2.22 μmol/L U +Bicarbonate Plasma 21-28 mEq/L 1.0 21-28 mmol/L U +Bile acids (total) Serum 0.3-2.3 μg/mL 2.448 0.73-5.63 μmol/L U +Bilirubin Total Serum 0.3-1.2 mg/dL 17.1 2-18 μmol/L U +Bilirubin Direct (conjugated) Serum <0.2 mg/dL 17.1 <3.4 μmol/L U +Biotin Whole blood, serum 200-500 pg/mL 0.0041 0.82-2.05 nmol/L U +Bismuth Whole blood 1-12 μg/L 4.785 4.8-57.4 nmol/L U +Blood gases PCO2 Arterial blood 35-45 mm Hg 1.0 35-45 mm Hg U +Blood gases pH Arterial blood 7.35-7.45 — 1.0 7.35-7.45 — U +Blood gases PO2 Arterial blood 80-100 mm Hg 1.0 80-100 mm Hg U +BNP Plasma <100 pg/mL 1.0 <100 pg/mL U +Bupropion (Wellbutrin, Zyban) Serum, plasma 25-100 ng/mL 3.62 91-362 nmol/L U +C1 esterase inhibitor Serum 12-30 mg/dL 0.01 0.12-0.30 g/L U +C3 complement Serum 1200-1500 μg/mL 0.001 1.2-1.5 g/L U +C4 complement Serum 350-600 μg/mL 0.001 0.35-0.60 g/L U +CA125 Serum <35 U/mL 1.0 <35 kU/L U +CA19-9 Serum <37 U/mL 1.0 <37 kU/L U +CA15-3 Serum <30 U/mL 1.0 <30 kU/L U +CA27.29 Serum <37.7 U/mL 1.0 <37.7 kU/L U +Cadmium (nonsmoker) Whole blood 0.3-1.2 μg/L 8.897 2.7-10.7 nmol/L U +Caffeine (therapeutic, infants) Serum, plasma 8-20 μg/mL 5.15 41-103 μmol/L U +Calcitonin Serum, plasma <19 pg/mL 1.0 <19 ng/L U +Calcium, ionized Serum 4.60-5.08 mg/dL 0.25 1.15-1.27 mmol/L U +Calcium, total Serum 8.2-10.2 mg/dL 0.25 2.05-2.55 mmol/L U +Calcium, normal diet Urine <250 mg/24 h 0.025 <6.2 mmol/day U +Carbamazepine (therapeutic) Serum, plasma 8-12 μg/mL 4.23 34-51 μmol/L U +Carbon dioxide Serum, plasma, venous blood 22-28 mEq/L 1.0 22-28 mmol/L U +Carboxyhemoglobin (carbon monoxide) Nonsmoker (fraction) Whole blood <2.0 % 0.01 <0.02 Fraction of 1.0 U +Carboxyhemoglobin (carbon monoxide) Toxic (fraction) Whole blood >20 % 0.01 >0.2 Fraction of 1.0 U +β-Carotene Serum 10-85 μg/dL 0.0186 0.2-1.6 μmol/L U +CEA, nonsmoker Serum <3 ng/mL 1.0 <3 μg/L U +CEA, smoker Serum <5 ng/mL 1.0 <5 μg/L U +Ceruloplasmin Serum 20-40 mg/dL 10.0 200-400 mg/L U +Chloramphenicol (therapeutic) Serum 10-25 μg/mL 3.1 31-77 μmol/L U +Chlordiazepoxide (therapeutic) Serum, plasma 0.7-1.0 μg/mL 3.34 2.3-3.3 μmol/L U +Chloride Serum, plasma 96-106 mEq/L 1.0 96-106 mmol/L U +Chloride CSF 118-132 mEq/L 1.0 118-132 mmol/L U +Chlorpromazine (therapeutic, adult) Plasma 50-300 ng/mL 3.14 157-942 nmol/L U +Cholesterol, high-density lipoproteins (HDL) Plasma 40-60 mg/dL 0.02586 1.03-1.55 mmol/L U +Optimal Plasma <100 mg/dL 0.02586 <2.59 mmol/L U +Near optimal Plasma 100-129 mg/dL 0.02586 2.59-3.34 mmol/L U +Borderline high Plasma 130-159 mg/dL 0.02586 3.37-4.12 mmol/L U +High Plasma 160-189 mg/dL 0.02586 4.15-4.90 mmol/L U +Very high Plasma >190 mg/dL 0.02586 >4.90 mmol/L U +Cholesterol (total) Desirable Serum <200 mg/dL 0.02586 <5.17 mmol/L U +Cholesterol Borderline high Serum 200-239 mg/dL 0.02586 5.17-6.18 mmol/L U +Cholesterol High Serum >240 mg/dL 0.02586 >6.21 mmol/L U +Chromium Whole blood 0.7-28.0 μg/L 19.2 13.4-538.6 nmol/L U +Citrate Serum 1.2-3.0 mg/dL 52.05 60-160 μmol/L U +Citrulline Plasma 0.4-2.4 mg/dL 57.1 20-135 μmol/L U +Clonazepam (therapeutic) Serum 15-60 ng/mL 3.17 48-190 nmol/L U +Coagulation factor I (fibrinogen) Plasma 150-400 mg/dL 0.01 1.5-4.0 g/L U +Coagulation factor II (prothrombin) (fraction) Plasma 60-140 % 0.01 0.60-1.40 Fraction of 1.0 U +Coagulation factor VIII (fraction) Plasma 50-200 % 0.01 0.50-2.00 Fraction of 1.0 U +Cobalt Serum <1.0 μg/L 16.97 <17 nmol/L U +Codeine (therapeutic) Serum 10-100 ng/mL 3.34 33-334 nmol/L U +Hematocrit (fraction) Whole blood 41-50 % 0.01 0.41-0.50 Fraction of 1.0 M +Hematocrit (fraction) Whole blood 35-45 % 0.01 0.35-0.45 Fraction of 1.0 F +Hemoglobin (mass concentration) Whole blood 13.5-17.5 g/dL 10.0 135-175 g/L M +Hemoglobin (mass concentration) Whole blood 12.0-15.5 g/dL 10.0 120-155 g/L F +Hemoglobin (substance concentration, Hb [Fe]) Whole blood 13.6-17.2 g/dL 0.6206 8.44-10.65 mmol/L M +Hemoglobin (substance concentration, Hb [Fe]) Whole blood 12.0-15.0 g/dL 0.6206 7.45-9.30 mmol/L F +Mean corpuscular hemoglobin (MCH), mass concentrationb Whole blood 27-33 pg/cell 1.0 27-33 pg/cell U +MCH, substance concentration, Hb [Fe] Whole blood 27-33 pg/cell 0.06206 1.70-2.05 fmol U +Mean corpuscular hemoglobin concentration (MCHC), mass concentration Whole blood 33-37 g Hb/dL 10.0 330-370 g Hb/L U +MCHC, substance concentration, Hb [Fe] Whole blood 33-37 g Hb/dL 0.6206 20-23 mmol/L U +Mean cell volume (MCV) Whole blood 80-100 μm3 1.0 80-100 fL U +Platelet count Whole blood 150-450 103 μL−1 1.0 150-450 10^9 L^−1 U +Thrombocytes Whole blood 150-450 103 μL−1 1.0 150-450 10^9 L^−1 U +Red blood cell count Whole blood 3.9-5.5 106 μL−1 1.0 3.9-5.5 10^12 L^−1 M +Red blood cell count Whole blood 4.6-6.0 106 μL−1 1.0 4.6-6.0 10^12 L^−1 F +Reticulocyte count Whole blood 25-75 103 μL−1 1.0 25-75 10^9 L^−1 U +Reticulocyte count (fraction) Whole blood 0.5-1.5 % of RBCs 0.01 0.005-0.015 Fraction of RBCs U +White blood cell count Whole blood 4.5-11.0 103 μL−1 1.0 4.5-11.0 10^9 L^−1 +Neutrophils (absolute) Whole blood 1800-7800 μL−1 1.0 1.8-7.8 10^9 L^−1 U +Bands (absolute) Whole blood 0-700 μL−1 1.0 0.00-0.70 10^9 L^−1 U +Lymphocytes (absolute) Whole blood 1000-4800 μL−1 1.0 1.0-4.8 10^9 L^−1 U +Monocytes (absolute) Whole blood 0-800 μL−1 1.0 0.00-0.80 10^9 L^−1 U +Eosinophils (absolute) Whole blood 0-450 μL−1 1.0 0.00-0.45 10^9 L^−1 U +Basophils (absolute) Whole blood 0-200 μL−1 1.0 0.00-0.20 10^9 L^−1 U +Neutrophils (differential, fraction) Whole blood 56 % 0.01 0.56 Fraction of 1.0 U +Bands (differential, fraction) Whole blood 3 % 0.01 0.03 Fraction of 1.0 U +Lymphocytes (differential, fraction) Whole blood 34 % 0.01 0.34 Fraction of 1.0 U +Monocytes (differential, fraction) Whole blood 4 % 0.01 0.04 Fraction of 1.0 U +Eosinophils (differential, fraction) Whole blood 2.7 % 0.01 0.027 Fraction of 1.0 U +Basophils (differential, fraction) Whole blood 0.3 % 0.01 0.003 Fraction of 1.0 U +Copper Serum 70-140 μg/dL 0.1574 11.0-22.0 μmol/L U +Coproporphyrin Urine <200 μg/24 h 1.527 <300 nmol/day U +Corticotropin (08:00) Plasma <120 pg/mL 0.22 <26 pmol/L U +Cortisol, total 08:00 Plasma 5-25 μg/dL 27.6 138-690 nmol/L U +Cortisol, total 16:00 Plasma 3-16 μg/dL 27.6 83-442 nmol/L U +Cortisol, total 20:00 Plasma 2.5-12.5 μg/dL 1.0 69-345 nmol/L U +Cortisol, free Urine 30-100 μg/24 h 2.76 80-280 nmol/day U +Cotinine (smoker) Plasma 16-145 ng/mL 5.68 91-823 nmol/L U +C-peptide Serum 0.5-3.5 ng/mL 0.333 0.17-1.17 nmol/L U +Creatine Serum 0.2-0.7 mg/dL 76.3 15.3-53.3 μmol/L M +Creatine Serum 0.3-0.9 mg/dL 76.3 22.9-68.6 μmol/L F +Creatine kinase (CK) Serum 50-200 U/L 0.017 0.85-3.40 μKat/L U +CK-MB (fraction) Serum <6 % 0.01 <0.06 Fraction of 1.0 U +Creatinine Serum, plasma 0.6-1.2 mg/dL 88.4 53-106 μmol/L U +Creatinine Urine 1-2 g/24 h 8.84 8.8-17.7 mmol/day U +Creatinine clearance, glomerular filtration rate Serum, urine 75-125 mL/min/1.73 m2 0.00963 0.72-1.2 mL/s/m2 U +C-telopeptide Serum, plasma 60-700 pg/mL 1.0 60-700 pg/mL M +Premenopausal Serum, plasma 40-465 pg/mL 1.0 40-465 pg/mL F +Cyanide (toxic) Whole blood >1.0 μg/mL 38.4 >38.4 μmol/L U +Cyclic adenosine monophosphate (cAMP) Plasma 4.6-8.6 ng/mL 3.04 14-26 nmol/L U +Cyclosporine (toxic) Whole blood >400 ng/mL 0.832 >333 nmol/L U +D-dimer Plasma Negative (<500) ng/mL 1.0 Negative (<500) ng/mL U +Dehydroepiandrosterone (DHEA) (unconjugated) Plasma, serum 180-1250 ng/dL 0.0347 6.2-43.3 nmol/L M +Dehydroepiandrosterone sulfate (DHEA-S) Plasma, serum 10-619 μg/dL 0.027 0.3-16.7 μmol/L M +Desipramine (therapeutic) Plasma, serum 50-200 ng/mL 3.75 170-700 nmol/L U +Diazepam (therapeutic) Plasma, serum 100-1000 ng/mL 0.00351 0.35-3.51 μmol/L U +Digoxin (therapeutic) Plasma 0.5-2.0 ng/mL 1.281 0.6-2.6 nmol/L U +Disopyramide (therapeutic) Plasma, serum 2.8-7.0 mg/L 2.95 8-21 μmol/L U +Doxepin (therapeutic) Plasma, serum 150-250 ng/mL 3.58 540-890 nmol/L U +Potassium Plasma 3.5-5.0 mEq/L 1.0 3.5-5.0 mmol/L U +Sodium Plasma 136-142 mEq/L 1.0 136-142 mmol/L U +Epinephrine (supine) Plasma <50 pg/mL 5.46 <273 pmol/L U +Epinephrine Urine <20 μg/24 h 5.46 <109 nmol/day U +Erythrocyte sedimentation rate (ESR) Whole blood 0-20 mm/h 1.0 0-20 mm/h U +Erythropoietin Serum 5-36 mU/mL 1.0 5-36 IU/L U +Estradiol (E2, unconjugated) Follicular phase Serum 20-350 pg/mL 3.69 73-1285 pmol/L F +Estradiol Midcycle peak Serum 150-750 pg/mL 3.69 551-2753 pmol/L F +Estradiol Luteal phase Serum 30-450 pg/mL 3.69 110-1652 pmol/L F +Estradiol Postmenopausal Serum <59 pg/mL 3.69 <218 pmol/L F +Estradiol (unconjugated) Serum <20 pg/mL 3.67 <184 pmol/L M +Estriol (E3, unconjugated) Serum <2 ng/mL 3.47 <6.9 nmol/L U +Estrogens (total) Follicular phase Serum 60-200 pg/mL 1.0 60-200 ng/L F +Estrogens (total) Luteal phase Serum 160-400 pg/mL 1.0 160-400 ng/L F +Estrogens (total) Postmenopausal Serum <130 pg/mL 1.0 <130 ng/L F +Estrogens (total) Serum 20-80 pg/mL 1.0 20-80 ng/L M +Estrone (E1) Follicular phase Plasma, serum 100-250 pg/mL 3.69 370-925 pmol/L F +Estrone (E1) Luteal phase Plasma, serum 15-200 pg/mL 3.69 55-740 pmol/L F +Estrone (E1) Postmenopausal Plasma, serum 15-55 pg/mL 3.69 55-204 pmol/L F +Estrone (E1) Plasma, serum 15-65 pg/mL 3.69 55-240 pmol/L M +Ethanol (ethyl alcohol), toxic Serum, whole blood >100 mg/dL 0.2171 >21.7 mmol/L U +Ethosuximide Plasma, serum 40-100 μg/mL 7.08 283-708 μmol/L U +Ethylene glycol (toxic) Plasma, serum >30 mg/dL 0.1611 >5 mmol/L U +Everolimus Whole blood 3-15 ng/mL 1.04 3-16 nmol/L U +Fatty acids (nonesterified) Plasma 8-25 mg/dL 0.0354 0.28-0.89 mmol/L U +Fecal fat (as stearic acid) Stool 2.0-6.0 g/day 1.0 2-6 g/day U +Felbamate Serum, plasma 30-60 μg/mL 4.2 126-252 μmol/L U +Ferritin Plasma 15-200 ng/mL 1.0 15-200 μg/L U +α-Fetoprotein Serum <10 ng/mL 1.0 <10 μg/L U +Fibrin breakdown products (fibrin split products) Serum <10 μg/mL 1.0 <10 mg/L U +Folate (folic acid) Red blood cells 166-640 ng/mL 2.266 376-1450 nmol/L U +Folate (folic acid) Serum 5-25 ng/mL 2.266 11-57 nmol/L U +Follicle-stimulating hormone (FSH, follitropin) Follicular phase Serum 1.37-9.9 mIU/mL 1.0 1.3-9.9 IU/L F +Follicle-stimulating hormone (FSH, follitropin) Ovulatory phase Serum 6.17-17.2 mIU/mL 1.0 6.1-17.2 IU/L F +Follicle-stimulating hormone (FSH, follitropin) Luteal phase Serum 1.09-9.2 mIU/mL 1.0 1.0-9.2 IU/L F +Follicle-stimulating hormone (FSH, follitropin) Postmenopausal Serum 19.3-100.6 mIU/mL 1.0 19.3-100.6 IU/L F +FSH (follitropin) Serum 1.42-15.4 mIU/mL 1.0 1.4-15.4 IU/L M +FSH (follitropin) Urine 2-15 IU/24 h 1.0 2-15 IU/day F +FSH (follitropin) Urine 3-12 IU/24 h 1.0 3-11 IU/day M +Fructosamine Serum 1.5-2.7 mmol/L 1.0 1.5-2.7 mmol/L U +Gabapentin Serum, plasma 2-20 μg/mL 5.84 12-117 μmol/L U +Gastrin (fasting) Serum <100 pg/mL 1.0 <100 ng/L U +Gentamicin (therapeutic, peak) Serum 6-10 μg/mL 2.1 12-21 μmol/L U +Glucagon Plasma 20-100 pg/mL 1.0 20-100 ng/L U +Glucose Serum, plasma 70-110 mg/dL 0.05551 3.9-6.1 mmol/L U +Glucose CSF 50-80 mg/dL 0.05551 2.8-4.4 mmol/L U +Glucose-6-phosphate dehydrogenase Red blood cells 10-14 U/g of Hb 0.0645 0.65-0.90 U/mol of Hb U +γ-Glutamyltransferase (GGT; γ-glutamyl transpeptidase) Serum <30 U/L 0.017 0.51 μKat/L F +γ-Glutamyltransferase (GGT; γ-glutamyl transpeptidase) Serum <50 U/L 0.017 <0.85 μKat/L M +Glycerol (free) Serum <1.5 mg/dL 0.1086 <0.16 mmol/L U +Glycated hemoglobin (hemoglobin A1, A1c, fraction) Whole blood Whole blood 4-5.6 % of total Hb 1.0 4-5.6 Fraction of total Hb U +Glycated hemoglobin (hemoglobin A1, A1c, fraction) Diagnosis Whole blood >6.5 % of total Hb 1.0 >6.5 Fraction of total Hb U +Gold (therapeutic) Serum 100-200 μg/dL 0.05077 5.1-10.2 μmol/L U +Growth hormone, adult (GH, somatotropin) Plasma, serum <10 ng/mL 1.0 <10 μg/L U +Haloperidol (therapeutic) Serum, plasma 5-20 ng/mL 2.6 13-52 nmol/L U +Haptoglobin Serum 40-180 mg/dL 0.01 0.4-1.8 g/L U +Hemoglobin A2 (fraction) Whole blood 2.0-3.5 % total Hb 2.0-3.5 Fraction of 1.0 U +Hemoglobin F (fraction) (fetal hemoglobin in adult) Whole blood <2 % 0.01 <2 Fraction of 1.0 U +Homocysteine (total) Plasma, serum 4-12 μmol/L 1.0 4-12 μmol/L U +Homovanillic acid Urine <8 mg/24 h 5.489 <45 μmol/day U +Human chorionic gonadotropin (hCG) Serum <3 mIU/mL 1.0 <3 IU/L F +β-Hydroxybutyric acid Serum 0.21-2.81 mg/dL 96.05 20-270 μmol/L U +5-Hydroxyindoleacetic acid (5-HIAA) Urine <25 mg/24 h 5.23 <131 μmol/day U +17α-Hydroxyprogesterone Follicular phase Serum 15-70 ng/dL 0.03 0.4-2.1 nmol/L F +17α-Hydroxyprogesterone Luteal phase Serum 35-290 ng/dL 0.03 1.0-8.7 nmol/L F +17α-Hydroxyprogesterone Postmenopausal Serum <70 ng/dL 0.03 <2.1 nmol/L F +17α-Hydroxyprogesterone Serum 27-199 ng/dL 0.03 0.8-6.0 nmol/L M +Ibuprofen (therapeutic) Serum, plasma 10-50 μg/mL 4.85 49-243 μmol/L U +Imipramine (therapeutic) Serum, plasma 150-250 ng/mL 3.57 536-893 nmol/L U +Immunoglobulin A (IgA) Serum 50-350 mg/dL 0.01 0.5-3.5 g/L U +Immunoglobulin D (IgD) Serum 0.5-3.0 mg/dL 10.0 5-30 mg/L U +Immunoglobulin E (IgE) Serum 10-179 IU/mL 2.4 24-430 μg/L U +Immunoglobulin G (IgG) Serum 600-1560 mg/dL 0.01 6.0-15.6 g/L U +Immunoglobulin M (IgM) Serum 54-222 mg/dL 0.01 0.5-2.2 g/L U +Insulin Plasma 5-20 μU/mL 6.945 34.7-138.9 pmol/L U +Inhibitin A Serum 1.0-3.6 pg/mL 1.0 1.0-3.6 ng/L M +Inhibitin A early follicular Serum 5.5-28.2 pg/mL 1.0 5.5-28.2 ng/L F +Inhibitin A late follicular Serum 19.5-102.3 pg/mL 1.0 19.5-102.3 ng/L F +Inhibitin A midcycle Serum 49.9-155.5 pg/mL 1.0 49.9-155.5 ng/L F +Inhibitin A midluteal Serum 13.2-159.6 pg/mL 1.0 13.2-159.6 ng/L F +Inhibitin A postmenopausal Serum 1.0-3.9 pg/mL 1.0 1.0-3.9 ng/L F +Insulin-like growth factor Serum 130-450 ng/mL 1.0 130-450 μg/L U +Interleukin-6 Serum <43.5 pg/mL 0.22 <9.57 pmol/L U +Iron (total) Serum 60-150 μg/dL 0.179 10.7-26.9 μmol/L U +Iron-binding capacity Serum 250-400 μg/dL 0.179 44.8-71.6 μmol/L U +Isoniazid (therapeutic) Plasma or serum 1-7 μg/mL 7.29 7-51 μmol/L U +Isopropanol (toxic) Plasma, serum >400 mg/L 0.0166 >6.64 mmol/L U +Lactate (lactic acid) Arterial blood 3-11.3 mg/dL 0.111 0.3-1.3 mmol/L U +Lactate (lactic acid) Venous blood 4.5-19.8 mg/dL 0.111 0.5-2.2 mmol/L U +Lactate dehydrogenase (LDH) Serum 50-200 U/L 1.0 50-200 U/L U +Lamotrigine Serum, plasma 2.5-15 μg/dL 3.91 10-59 μmol/L U +Lead Whole blood <25 μg/dL 0.0483 <1.21 μmol/L U +Levetiracetam Serum, plasma 12-46 μg/mL 5.88 71-270 μmol/L U +Lidocaine (therapeutic) Serum, plasma 1.5-6.0 μmL g/mL 4.27 6.4-25.6 μmol/L U +Lipasea Serum 0-160 U/L 0.017 0-2.72 μKat/L U +Lipoprotein(a) (Lp(a)) Serum, plasma 10-30 mg/dL 0.01 0.1-0.3 g/L U +Lithium (therapeutic) Serum, plasma 0.6-1.2 mEq/L 1.0 0.6-1.2 mmol/L U +Lorazepam (therapeutic) Serum, plasma 50-240 ng/mL 3.11 156-746 nmol/L U +LDL cholesterol Serum, plasma 60-130 mg/dL 0.02586 1.55-3.37 mmol/L U +Luteinizing hormone (LH) Follicular phase Serum 2.0-15.0 mIU/L 1.0 2.0-15.0 IU/L F +Luteinizing hormone (LH) Ovulatory peak Serum 22.0-105.0 mIU/L 1.0 22.0-105.0 IU/L F +Luteinizing hormone (LH) Luteal phase Serum 0.6-19.0 mIU/L 1.0 0.6-19.0 IU/L F +Luteinizing hormone (LH) Postmenopausal Serum 16.0-64.0 mIU/L 1.0 16.0-64.0 IU/L F +Luteinizing hormone (LH) Serum 2.0-12.0 mIU/L 1.0 2.0-12.0 IU/L M +Lysozyme (muramidase) Serum 4-13 mg/L 1.0 4-13 mg/L U +Magnesium Serum 1.5-2.5 mg/dL 0.4114 0.62-1.03 mmol/L U +Magnesium Serum 1.3-2.1 mEq/L 0.5 0.65-1.05 mmol/L U +Manganese Whole blood 10-12 μg/L 18.2 182-218 nmol/L U +Maprotiline (therapeutic) Plasma, serum 200-600 ng/mL 1.0 200-600 μg/L U +Meperidine (therapeutic) Serum, plasma 0.4-0.7 μg/mL 4.04 1.6-2.8 μmol/L U +Mercury Whole blood 0.6-59.0 μg/L 4.99 3.0-294.4 nmol/L U +Metanephrines (total) Urine <1.0 mg/24 h 5.07 <5 μmol/day U +Methadone (therapeutic) Serum, plasma 100-400 ng/mL 0.00323 0.32-1.29 μmol/L U +Methanol Whole blood, serum <1.5 mg/L 0.0312 <0.05 mmol/L U +Methemoglobin Whole blood <0.24 g/dL 155.0 <37.2 μmol/L U +Methemoglobin (fraction) Whole blood <1.0 % of total Hb 0.01 <0.01 Fraction of total Hb U +Methsuximide (therapeutic) Serum, plasma 10-40 μg/mL 5.29 53-212 μmol/L U +Methyldopa (therapeutic) Serum, plasma 1-5 μg/mL 4.73 5-24 μmol/L U +Metoprolol (therapeutic) Serum, plasma 75-200 ng/mL 3.74 281-748 nmol/L U +Mexthotrexate Toxic 24 h after dose Serum, plasma ≥10 μmol/L 1.0 ≥10 μmol/L U +Mexthotrexate Toxic 48 h after dose Serum, plasma ≥1 μmol/L 1.0 ≥1 μmol/L U +Mexthotrexate Toxic 72 h after dose Serum, plasma ≥0.1 μmol/L 1.0 ≥0.1 μmol/L U +β2-Microglobulin Serum <2 μg/mL 85.0 <170 nmol/L U +Morphine (therapeutic) Serum, plasma 10-80 ng/mL 3.5 35-280 nmol/L U +Mycophenolic acid Serum, plasma 1.3-3.5 μg/mL 3.12 4-11 μmol/L U +Naproxen (therapeutic trough) Plasma, serum >50 μg/mL 4.34 >217 μmol/L U +Niacin (nicotinic acid) Urine 2.4-6.4 mg/24 h 7.3 17.5-46.7 μmol/day U +Nickel Whole blood 1.0-28.0 μg/L 17.0 17-476 nmol/L U +Nicotine (smoker) Plasma 0.01-0.05 mg/L 6.16 0.062-0.308 μmol/L U +Norepinephrine Plasma 110-410 pg/mL 5.91 650-2423 nmol/L U +Norepinephrine Urine 15-80 μg/24 h 5.91 89-473 nmol/day U +Nortriptyline (therapeutic) Serum, plasma 50-150 ng/mL 3.8 190-570 nmol/L U +N-telopeptide (BCE, bone collagen equivalents) Serum 5.4-24.2 nmol BCE/L 1.0 5.4-24.2 nmol BCE/L M +N-telopeptide Premenopausal Serum 6.2-19.0 nmol BCE/L 1.0 6.2-19.0 nmol BCE/L F +Osmolality Serum 275-295 mOsm/kg H2O 1.0 275-295 mmol/kg H2O U +Osmolality Urine 250-900 mOsm/kg H2O 1.0 250-900 mmol/kg H2O U +Osteocalcin Serum 3.0-13.0 ng/mL 1.0 3.0-13.0 μg/L U +Oxalate Serum 1.0-2.4 mg/L 11.4 11-27 μmol/L U +Oxazepam (therapeutic) Serum, plasma 0.2-1.4 μg/mL 3.49 0.7-54.9 μmol/L U +Oxycodone (therapeutic) Plasma, serum 10-100 ng/mL 3.17 32-317 nmol/L U +Parathyroid hormone Intact Serum 10-50 pg/mL 1.0 10-50 ng/L U +Parathyroid hormone N-terminal specific Serum 8-24 pg/mL 1.0 8-24 ng/L U +Parathyroid hormone C-terminal (mid-molecule) Serum 0-340 pg/mL 1.0 0-340 ng/L U +Pentobarbital (therapeutic) Serum, plasma 1-5 μg/mL 4.42 4.0-22 μmol/L U +Pepsinogen I Serum 28-100 ng/mL 1.0 28-100 μg/L U +Phenobarbital (therapeutic) Serum, plasma 15-40 μg/mL 4.31 65-172 μmol/L U +Phenytoin (therapeutic) Serum, plasma 10-20 μg/mL 3.96 40-79 μmol/L U +Phosphatase, tartrate-resistant acid Serum 1.5-4.5 U/L 0.017 0.03-0.08 μKat/L U +Phosphorus (inorganic) Serum 2.3-4.7 mg/dL 0.3229 0.74-1.52 mmol/L U +Phosphorus (inorganic) Urine 0.4-1.3 g/24 h 32.29 12.9-42.0 mmol/day U +Plasminogen Plasma 8.4-14.0 mg/dL 10.0 84-140 mg/L U +Plasminogen (fraction) Plasma 80-120 % 0.01 0.80-1.20 Fraction of 1.0 U +Plasminogen activator inhibitor Plasma <15 IU/mL 1.0 <15 kIU/L U +Porphobilinogen deaminase Red blood cells >7.0 nmol/s/L 1.0 >7.0 nmol/(s L) U +Prealbumin—transthyretin Serum, plasma 18-45 mg/dL 0.01 0.18-0.45 g/L U +Pregnanediol Follicular phase Urine <2.6 mg/24 h 3.12 <8 μmol/day F +Pregnanediol Luteal phase Urine 2.3-10.6 mg/24 h 3.12 8-33 μmol/day F +Pregnanediol Urine 0-1.9 mg/24 h 3.12 0-5.9 μmol/day M +Pregnanetriol Urine <2.5 mg/24 h 2.97 <7.5 μmol/day U +Primidone (therapeutic) Serum, plasma 5-12 μg/mL 4.58 23-55 μmol/L U +Procainamide (therapeutic) Serum, plasma 4-10 μg/mL 4.23 17-42 μmol/L U +Progesterone Follicular phase Serum 0.1-0.7 ng/mL 3.18 0.5-2.2 nmol/L F +Progesterone Luteal phase Serum 2.0-25.0 ng/mL 3.18 6.4-79.5 nmol/L F +Progesterone Serum 0.13-0.97 ng/mL 3.18 0.4-3.1 nmol/L M +Prolactin (nonlactating subject) Serum 1-25 ng/mL 1.0 1-25 μg/L U +Propoxyphene (therapeutic) Serum 0.1-0.4 μg/mL 2.946 0.3-1.2 μmol/L U +Propanolol (therapeutic) Serum, plasma 50-100 ng/mL 3.86 190-386 nmol/L U +Protein (total) Serum 6.0-8.0 g/dL 10.0 60-80 g/L U +Protein C (fraction) Plasma 70-140 % 0.01 0.70-1.40 Fraction of 1.0 U +Protein electrophoresis (SPEP, fraction of total protein Albumin) Serum 52-65 % 0.01 0.52-0.65 Fraction of 1.0 U +Protein electrophoresis (SPEP, fraction of total protein α1-Globulin) Serum 2.5-5.0 % 0.01 0.025-0.05 Fraction of 1.0 U +Protein electrophoresis (SPEP, fraction of total protein α2-Globulin) Serum 7.0-13.0 % 0.01 0.07-0.13 Fraction of 1.0 U +Protein electrophoresis (SPEP, fraction of total protein β-Globulin) Serum 8.0-14.0 % 0.01 0.08-0.14 Fraction of 1.0 U +Protein electrophoresis (SPEP, fraction of total protein γ-Globulin) Serum 12.0-22.0 % 0.01 0.12-0.22 Fraction of 1.0 U +Protein electrophoresis (SPEP, concentration) Albumin Serum 3.2-5.6 g/dL 10.0 32-56 g/L U +Protein electrophoresis (SPEP, concentration) α1-Globulin Serum 0.1-0.4 g/dL 10.0 1-10 g/L U +Protein electrophoresis (SPEP, concentration) α2-Globulin Serum 0.4-1.2 g/dL 10.0 4-12 g/L U +Protein electrophoresis (SPEP, concentration) β-Globulin Serum 0.5-1.1 g/dL 10.0 5-11 g/L U +Protein electrophoresis (SPEP, concentration) γ-Globulin Serum 0.5-1.6 g/dL 10.0 5-16 g/L U +Protein S (activity) Plasma 70-140 % 0.01 0.70-1.40 Fraction of 1.0 U +Prothrombin time (PT) Plasma 10-13 s 1.0 10-13 s U +Protoporphyrin Red blood cells 15-50 μg/dL 0.0177 0.27-0.89 μmol/L U +PSA Serum 0-4.0 ng/mL 1.0 0-4.0 μg/L U +Pyridinium cross-links (deoxypyridinoline) Urine 10.3-20 nmol/mmol creatinine 1.0 10.3-20 nmol/mmol creatinine M +Premenopausal Urine 15.3-33.6 nmol/mmol creatinine 1.0 15.3-33.6 nmol/mmol creatinine F +Pyruvate (as pyruvic acid) Whole blood 0.3-0.9 mg/dL 113.6 34-102 μmol/L U +Quinidine (therapeutic) Serum, plasma 2.0-5.0 μg/mL 3.08 6.2-15.4 μmol/L U +Renin (normal-sodium diet) Plasma 1.1-4.1 ng/mL/h 1.0 1.1-4.1 ng/(mL h) U +Rheumatoid factor Serum <30 IU/mL 1.0 <30 kIU/L U +Salicylates (therapeutic) Serum, plasma 15-30 mg/dL 0.0724 1.08-2.17 mmol/L U +Selenium Whole blood 58-234 μg/L 0.0127 0.74-2.97 μmol/L U +Serotonin (5-hydroxytryptamine) Whole blood 50-200 ng/mL 0.00568 0.28-1.14 μmol/L U +Sertraline (Zoloft) Serum or plasma 10-50 ng/mL 3.27 33-164 nmol/L U +Sex hormone-binding globulin Serum 0.5-1.5 μg/dL 34.7 17.4-52.1 nmol/L U +Sirolimus Whole blood 4-20 ng/mL 1.1 4-22 nmol/L U +Strychnine (toxic) Whole blood >0.5 mg/L 2.99 >1.5 μmol/L U +Substance P Plasma <240 pg/mL 1.0 <240 ng/L U +Sulfhemoglobin (fraction) Whole blood <1.0 % of total Hb 0.01 <0.010 Fraction of total Hb U +Tacrolimus Whole blood 3-20 ng/mL 1.24 4-25 nmol/L U +Testosterone Plasma, serum 300-1200 ng/dL 0.0347 10.4-41.6 nmol/L M +Testosterone Plasma, serum <85 ng/dL 0.0347 2.95 nmol/L F +Theophylline (therapeutic) Plasma, serum 10-20 μg/mL 5.55 56-111 μmol/L U +Thiocyanate (nonsmoker) Plasma, serum 1-4 mg/L 17.2 17-69 μmol/L U +Thiopental (therapeutic) Plasma, serum 1-5 μg/mL 4.13 4-21 μmol/L U +Thioridazine (therapeutic) Plasma, serum 1.0-1.5 μg/mL 2.7 2.7-4.1 μmol/L U +Thrombin time Plasma 16-24 s 1.0 16-24 s U +Thyroglobulin Serum 3-42 ng/mL 1.0 3-42 μg/L U +Thyrotropin (thyroid-stimulating hormone, TSH) Serum 0.5-5.0 μIU/mL 1.0 0.5-5.0 mU/L U +Thyroxine, free (FT4) Serum 0.9-2.3 ng/dL 12.87 12-30 pmol/L U +Thyroxine, total (T4) Serum 5.5-12.5 μg/dL 12.87 71-160 nmol/L U +Thyroxine-binding globulin (TBG) as T4 binding capacity Serum 10-26 μg/dL 12.9 129-335 nmol/L U +Tissue plasminogen activator Plasma <0.04 IU/mL 1000.0 <40 IU/L U +Tobramycin (therapeutic, peak) Plasma, serum 5-10 μg/mL 2.14 10-21 μmol/L U +Tocainide (therapeutic) Plasma, serum 4-10 μg/mL 5.2 21-52 μmol/L U +Topiramate Serum, plasma 5-20 μg/mL 2.95 15-59 μmol/L U +Transferrin (siderophilin) Serum 200-380 mg/dL 0.01 2.0-3.8 g/L U +Triglycerides Plasma, serum 10-190 mg/dL 0.01129 0.11-2.15 mmol/L U +Triiodothyronine, free (FT3) Serum 260-480 pg/dL 0.0154 4.0-7.4 pmol/L U +Triiodothyronine, resin uptake (fraction) Serum 25-35 % 0.01 0.25-0.35 Fraction of 1.0 U +Triiodothyronine, total (T3) Serum 70-200 ng/dL 0.0154 1.08-3.14 nmol/L U +Troponin I (cardiac) Serum 0-0.4 ng/mL 1.0 0-0.4 μg/L U +Troponin T (cardiac) Serum 0-0.1 ng/mL 1.0 0-0.1 μg/L U +Urea nitrogen (BUN) Serum 8-23 mg/dL 0.0357 2.9-8.2 mmol/L U +Uric acid Serum 4.0-8.5 mg/dL 0.0595 0.24-0.51 mmol/L U +Urobilinogen Urine 0.05-2.5 mg/24 h 1.693 0.1-4.2 μmol/day U +Valproic acid (therapeutic) Plasma, serum 50-150 μg/mL 6.93 346-1040 μmol/L U +Vancomycin (therapeutic, peak) Plasma, serum 10-20 μg/mL 0.69 6.9-13.8 μmol/L U +Vanillylmandelic acid (VMA) Urine 2.1-7.6 mg/24 h 5.046 11-38 μmol/day U +Vasoactive intestinal polypeptide Plasma <50 pg/mL 1.0 <50 ng/L U +Verapamil (therapeutic) Plasma, serum 100-500 ng/mL 2.2 220-1100 nmol/L U +Vitamin A (retinol) Serum 30-80 μg/dL 0.0349 1.05-2.80 μmol/L U +Vitamin B1 (thiamine) Whole blood 2.5-7.5 μg/dL 29.6 74-222 nmol/L U +Vitamin B2 (riboflavin) Plasma, serum 4-24 μg/dL 26.6 106-638 nmol/L U +Vitamin B5 (pantothenic acid) Whole blood 0.2-1.8 μg/mL 4.56 0.9-8.2 μmol/L U +Vitamin B6 (pyridoxine) Plasma 5-30 ng/mL 4.046 20-121 nmol/L U +Vitamin B12 (cyanocobalamin) Serum 160-950 pg/mL 0.7378 118-701 pmol/L U +Vitamin C (ascorbic acid) Plasma, serum 0.4-1.5 mg/dL 56.78 23-85 μmol/L U +Vitamin D, 1,25-dihydroxyvitamin D Plasma, serum 16-65 pg/mL 2.6 42-169 pmol/L U +Vitamin D, 25-hydroxyvitamin D Plasma, serum 14-60 ng/mL 2.496 35-150 nmol/L U +Vitamin E (α-tocopherol) Plasma, serum 0.5-1.8 mg/dL 23.22 12-42 μmol/L U +Vitamin K Plasma, serum 0.13-1.19 ng/mL 2.22 0.29-2.64 nmol/L U +von Willebrand factor Plasma 70-140 % 0.01 0.70-1.40 Fraction of 1.0 U +Warfarin (therapeutic) Plasma, serum 1.0-10 μg/mL 3.24 3.2-32.4 μmol/L U +White blood cell count Whole blood 4.5-11.0 103 μL−1 1.0 4.5-11.0 10^9 L^−1 U +Xylose absorption test (25-g dose) Whole blood >25 mg/dL mg/dL 0.06661 >1.7 mmol/L U +Zidovudine (therapeutic) Plasma, serum 0.15-0.27 μg/mL 3.74 0.56-1.01 μmol/L U +Zinc Serum 50-150 μg/dL 0.153 7.7-23.0 μmol/L U diff --git a/data/ehrapy/py.typed b/data/ehrapy/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/data/ehrapy/tools/__init__.py b/data/ehrapy/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c034882f7f050609a5298b41040ca148747bc94d --- /dev/null +++ b/data/ehrapy/tools/__init__.py @@ -0,0 +1,48 @@ +from ehrapy.tools._sa import ( + anova_glm, + cox_ph, + glm, + kaplan_meier, + kmf, + log_logistic_aft, + nelson_aalen, + ols, + test_kmf_logrank, + test_nested_f_statistic, + weibull, + weibull_aft, +) +from ehrapy.tools._scanpy_tl_api import * # noqa: F403 +from ehrapy.tools.causal._dowhy import causal_inference +from ehrapy.tools.cohort_tracking._cohort_tracker import CohortTracker +from ehrapy.tools.feature_ranking._feature_importances import rank_features_supervised +from ehrapy.tools.feature_ranking._rank_features_groups import filter_rank_features_groups, rank_features_groups + +try: # pragma: no cover + from ehrapy.tools.nlp._medcat import ( + add_medcat_annotation_to_obs, + annotate_text, + get_medcat_annotation_overview, + ) +except ImportError: + pass + +__all__ = [ + "anova_glm", + "cox_ph", + "glm", + "kmf", + "kaplan_meier", + "log_logistic_aft", + "nelson_aalen", + "ols", + "test_kmf_logrank", + "test_nested_f_statistic", + "weibull", + "weibull_aft", + "causal_inference", + "CohortTracker", + "rank_features_supervised", + "filter_rank_features_groups", + "rank_features_groups", +] diff --git a/data/ehrapy/tools/_method_options.py b/data/ehrapy/tools/_method_options.py new file mode 100644 index 0000000000000000000000000000000000000000..5bcad67ca2e7d84a38a9d7c12bd35d14ded97b1d --- /dev/null +++ b/data/ehrapy/tools/_method_options.py @@ -0,0 +1,14 @@ +from typing import Literal + +_InitPos = Literal["paga", "spectral", "random"] + +_LAYOUTS = ("fr", "drl", "kk", "grid_fr", "lgl", "rt", "rt_circular", "fa") +_Layout = Literal[_LAYOUTS] # type: ignore + +_rank_features_groups_method = Literal["logreg", "t-test", "wilcoxon", "t-test_overestim_var"] | None +_correction_method = Literal["benjamini-hochberg", "bonferroni"] +_rank_features_groups_cat_method = Literal[ + "chi-square", "g-test", "freeman-tukey", "mod-log-likelihood", "neyman", "cressie-read" +] + +_marker_feature_overlap_methods = Literal["overlap_count", "overlap_coef", "jaccard"] diff --git a/data/ehrapy/tools/_sa.py b/data/ehrapy/tools/_sa.py new file mode 100644 index 0000000000000000000000000000000000000000..e436e73d103210e110b1ab7009271ca67c762219 --- /dev/null +++ b/data/ehrapy/tools/_sa.py @@ -0,0 +1,850 @@ +from __future__ import annotations + +import warnings +from typing import TYPE_CHECKING, Literal + +import numpy as np # noqa: TC002 +import pandas as pd +import statsmodels.api as sm +import statsmodels.formula.api as smf +from lifelines import ( + CoxPHFitter, + KaplanMeierFitter, + LogLogisticAFTFitter, + NelsonAalenFitter, + WeibullAFTFitter, + WeibullFitter, +) +from lifelines.statistics import StatisticalResult, logrank_test +from scipy import stats + +from ehrapy.anndata import anndata_to_df + +if TYPE_CHECKING: + from collections.abc import Iterable + + from anndata import AnnData + from statsmodels.genmod.generalized_linear_model import GLMResultsWrapper + + +def ols( + adata: AnnData, + var_names: list[str] | None | None = None, + formula: str | None = None, + missing: Literal["none", "drop", "raise"] | None = "none", +) -> sm.OLS: + """Create an Ordinary Least Squares (OLS) Model from a formula and AnnData. + + See https://www.statsmodels.org/stable/generated/statsmodels.formula.api.ols.html#statsmodels.formula.api.ols + + Args: + adata: The AnnData object for the OLS model. + var_names: A list of var names indicating which columns are for the OLS model. + formula: The formula specifying the model. + missing: Available options are 'none', 'drop', and 'raise'. + If 'none', no nan checking is done. If 'drop', any observations with nans are dropped. + If 'raise', an error is raised. + + Returns: + The OLS model instance. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=False) + >>> formula = "tco2_first ~ pco2_first" + >>> var_names = ["tco2_first", "pco2_first"] + >>> ols = ep.tl.ols(adata, var_names, formula, missing="drop") + """ + if isinstance(var_names, list): + data = pd.DataFrame(adata[:, var_names].X, columns=var_names).astype(float) + else: + data = pd.DataFrame(adata.X, columns=adata.var_names) + ols = smf.ols(formula, data=data, missing=missing) + + return ols + + +def glm( + adata: AnnData, + var_names: Iterable[str] | None = None, + formula: str | None = None, + family: Literal["Gaussian", "Binomial", "Gamma", "Gaussian", "InverseGaussian"] = "Gaussian", + missing: Literal["none", "drop", "raise"] = "none", + as_continuous: Iterable[str] | None | None = None, +) -> sm.GLM: + """Create a Generalized Linear Model (GLM) from a formula, a distribution, and AnnData. + + See https://www.statsmodels.org/stable/generated/statsmodels.formula.api.glm.html#statsmodels.formula.api.glm + + Args: + adata: The AnnData object for the GLM model. + var_names: A list of var names indicating which columns are for the GLM model. + formula: The formula specifying the model. + family: The distribution families. Available options are 'Gaussian', 'Binomial', 'Gamma', and 'InverseGaussian'. + missing: Available options are 'none', 'drop', and 'raise'. If 'none', no nan checking is done. + If 'drop', any observations with nans are dropped. If 'raise', an error is raised. + as_continuous: A list of var names indicating which columns are continuous rather than categorical. + The corresponding columns will be set as type float. + + Returns: + The GLM model instance. + + Examples: + >>> import ehrapy as ep + >>> adata = ep.dt.mimic_2(encoded=False) + >>> formula = "day_28_flg ~ age" + >>> var_names = ["day_28_flg", "age"] + >>> family = "Binomial" + >>> glm = ep.tl.glm(adata, var_names, formula, family, missing="drop", as_continuous=["age"]) + """ + family_dict = { + "Gaussian": sm.families.Gaussian(), + "Binomial": sm.families.Binomial(), + "Gamma": sm.families.Gamma(), + "InverseGaussian": sm.families.InverseGaussian(), + } + if family in ["Gaussian", "Binomial", "Gamma", "Gaussian", "InverseGaussian"]: + family = family_dict[family] + if isinstance(var_names, list): + data = pd.DataFrame(adata[:, var_names].X, columns=var_names) + else: + data = pd.DataFrame(adata.X, columns=adata.var_names) + if as_continuous is not None: + data[as_continuous] = data[as_continuous].astype(float) + glm = smf.glm(formula, data=data, family=family, missing=missing) + + return glm + + +def kmf( + durations: Iterable, + event_observed: Iterable | None = None, + timeline: Iterable = None, + entry: Iterable | None = None, + label: str | None = None, + alpha: float | None = None, + ci_labels: tuple[str, str] = None, + weights: Iterable | None = None, + censoring: Literal["right", "left"] = None, +) -> KaplanMeierFitter: + """DEPRECATION WARNING: This function is deprecated and will be removed in the next release. Use `kaplan_meier` instead. + + Fit the Kaplan-Meier estimate for the survival function. + + The Kaplan–Meier estimator, also known as the product limit estimator, is a non-parametric statistic used to estimate the survival function from lifetime data. + In medical research, it is often used to measure the fraction of patients living for a certain amount of time after treatment. + + See https://en.wikipedia.org/wiki/Kaplan%E2%80%93Meier_estimator + https://lifelines.readthedocs.io/en/latest/fitters/univariate/KaplanMeierFitter.html#module-lifelines.fitters.kaplan_meier_fitter + + Args: + durations: length n -- duration (relative to subject's birth) the subject was alive for. + event_observed: True if the death was observed, False if the event was lost (right-censored). Defaults to all True if event_observed is equal to `None`. + timeline: return the best estimate at the values in timelines (positively increasing) + entry: Relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. + If None, all members of the population entered study when they were "born". + label: A string to name the column of the estimate. + alpha: The alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only. + ci_labels: Add custom column names to the generated confidence intervals as a length-2 list: [, ] (default: