Cédric KACZMAREK commited on
Commit
70b87af
1 Parent(s): 6eb1c3c

first commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +168 -0
  2. README.md +4 -4
  3. app.py +126 -0
  4. chroma_db/chroma.sqlite3 +0 -0
  5. img/logo_rizoa_auchan.jpg +0 -0
  6. llama_index/embeddings/mistralai/__init__.py +3 -0
  7. llama_index/embeddings/mistralai/base.py +111 -0
  8. llama_index/llms/mistralai/__init__.py +3 -0
  9. llama_index/llms/mistralai/base.py +297 -0
  10. llama_index/llms/mistralai/utils.py +17 -0
  11. llama_index/readers/web/__init__.py +54 -0
  12. llama_index/readers/web/async_web/README.md +35 -0
  13. llama_index/readers/web/async_web/__init__.py +0 -0
  14. llama_index/readers/web/async_web/base.py +115 -0
  15. llama_index/readers/web/beautiful_soup_web/README.md +92 -0
  16. llama_index/readers/web/beautiful_soup_web/__init__.py +0 -0
  17. llama_index/readers/web/beautiful_soup_web/base.py +206 -0
  18. llama_index/readers/web/beautiful_soup_web/requirements.txt +3 -0
  19. llama_index/readers/web/knowledge_base/README.md +96 -0
  20. llama_index/readers/web/knowledge_base/__init__.py +0 -0
  21. llama_index/readers/web/knowledge_base/base.py +165 -0
  22. llama_index/readers/web/knowledge_base/requirements.txt +1 -0
  23. llama_index/readers/web/main_content_extractor/README.md +67 -0
  24. llama_index/readers/web/main_content_extractor/__init__.py +0 -0
  25. llama_index/readers/web/main_content_extractor/base.py +47 -0
  26. llama_index/readers/web/main_content_extractor/requirements.txt +1 -0
  27. llama_index/readers/web/news/README.md +24 -0
  28. llama_index/readers/web/news/__init__.py +0 -0
  29. llama_index/readers/web/news/base.py +90 -0
  30. llama_index/readers/web/news/requirements.txt +1 -0
  31. llama_index/readers/web/readability_web/README.md +82 -0
  32. llama_index/readers/web/readability_web/Readability.js +2613 -0
  33. llama_index/readers/web/readability_web/__init__.py +0 -0
  34. llama_index/readers/web/readability_web/base.py +147 -0
  35. llama_index/readers/web/readability_web/requirements.txt +1 -0
  36. llama_index/readers/web/rss/README.md +22 -0
  37. llama_index/readers/web/rss/__init__.py +0 -0
  38. llama_index/readers/web/rss/base.py +57 -0
  39. llama_index/readers/web/rss_news/README.md +36 -0
  40. llama_index/readers/web/rss_news/__init__.py +0 -0
  41. llama_index/readers/web/rss_news/base.py +106 -0
  42. llama_index/readers/web/rss_news/sample_rss_feeds.opml +13 -0
  43. llama_index/readers/web/simple_web/README.md +65 -0
  44. llama_index/readers/web/simple_web/__init__.py +0 -0
  45. llama_index/readers/web/simple_web/base.py +74 -0
  46. llama_index/readers/web/simple_web/requirements.txt +1 -0
  47. llama_index/readers/web/sitemap/README.md +54 -0
  48. llama_index/readers/web/sitemap/__init__.py +0 -0
  49. llama_index/readers/web/sitemap/base.py +52 -0
  50. llama_index/readers/web/sitemap/requirements.txt +0 -0
.gitignore ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Everything in local
2
+ local/*
3
+ out/*
4
+
5
+ !.gitkeep
6
+
7
+ # Byte-compiled / optimized / DLL files
8
+ __pycache__/
9
+ *.py[cod]
10
+ *$py.class
11
+
12
+ # C extensions
13
+ *.so
14
+
15
+ # Distribution / packaging
16
+ .Python
17
+ build/
18
+ develop-eggs/
19
+ dist/
20
+ downloads/
21
+ eggs/
22
+ .eggs/
23
+ lib/
24
+ lib64/
25
+ parts/
26
+ sdist/
27
+ var/
28
+ wheels/
29
+ share/python-wheels/
30
+ *.egg-info/
31
+ .installed.cfg
32
+ *.egg
33
+ MANIFEST
34
+
35
+ # PyInstaller
36
+ # Usually these files are written by a python script from a template
37
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
38
+ *.manifest
39
+ *.spec
40
+
41
+ # Installer logs
42
+ pip-log.txt
43
+ pip-delete-this-directory.txt
44
+
45
+ # Unit test / coverage reports
46
+ htmlcov/
47
+ .tox/
48
+ .nox/
49
+ .coverage
50
+ .coverage.*
51
+ .cache
52
+ nosetests.xml
53
+ coverage.xml
54
+ *.cover
55
+ *.py,cover
56
+ .hypothesis/
57
+ .pytest_cache/
58
+ cover/
59
+
60
+ # Translations
61
+ *.mo
62
+ *.pot
63
+
64
+ # Django stuff:
65
+ *.log
66
+ local_settings.py
67
+ db.sqlite3
68
+ db.sqlite3-journal
69
+
70
+ # Flask stuff:
71
+ instance/
72
+ .webassets-cache
73
+
74
+ # Scrapy stuff:
75
+ .scrapy
76
+
77
+ # Sphinx documentation
78
+ docs/_build/
79
+
80
+ # PyBuilder
81
+ .pybuilder/
82
+ target/
83
+
84
+ # Jupyter Notebook
85
+ .ipynb_checkpoints
86
+
87
+ # IPython
88
+ profile_default/
89
+ ipython_config.py
90
+
91
+ # pyenv
92
+ # For a library or package, you might want to ignore these files since the code is
93
+ # intended to run in multiple environments; otherwise, check them in:
94
+ # .python-version
95
+
96
+ # pipenv
97
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
98
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
99
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
100
+ # install all needed dependencies.
101
+ #Pipfile.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+
110
+ # pdm
111
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112
+ #pdm.lock
113
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114
+ # in version control.
115
+ # https://pdm.fming.dev/#use-with-ide
116
+ .pdm.toml
117
+
118
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
119
+ __pypackages__/
120
+
121
+ # Celery stuff
122
+ celerybeat-schedule
123
+ celerybeat.pid
124
+
125
+ # SageMath parsed files
126
+ *.sage.py
127
+
128
+ # Environments
129
+ .venv
130
+ env/
131
+ venv/
132
+ ENV/
133
+ env.bak/
134
+ venv.bak/
135
+
136
+ # Spyder project settings
137
+ .spyderproject
138
+ .spyproject
139
+
140
+ # Rope project settings
141
+ .ropeproject
142
+
143
+ # mkdocs documentation
144
+ /site
145
+
146
+ # mypy
147
+ .mypy_cache/
148
+ .dmypy.json
149
+ dmypy.json
150
+
151
+ # Pyre type checker
152
+ .pyre/
153
+
154
+ # pytype static type analyzer
155
+ .pytype/
156
+
157
+ # Cython debug symbols
158
+ cython_debug/
159
+
160
+ # PyCharm
161
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
162
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
163
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
164
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
165
+ .idea/
166
+
167
+ # VsCode
168
+ .vscode/
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
- title: Hack
3
- emoji: 📈
4
- colorFrom: green
5
- colorTo: blue
6
  sdk: gradio
7
  sdk_version: 4.19.2
8
  app_file: app.py
 
1
  ---
2
+ title: Rizoa Auchan Gaia
3
+ emoji: 🔥
4
+ colorFrom: yellow
5
+ colorTo: gray
6
  sdk: gradio
7
  sdk_version: 4.19.2
8
  app_file: app.py
app.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import gradio as gr
4
+ from llama_index.core import (
5
+ VectorStoreIndex,
6
+ download_loader,
7
+ StorageContext
8
+ )
9
+ from dotenv import load_dotenv, find_dotenv
10
+
11
+ import chromadb
12
+
13
+ from llama_index.llms.mistralai import MistralAI
14
+ from llama_index.embeddings.mistralai import MistralAIEmbedding
15
+ from llama_index.vector_stores.chroma import ChromaVectorStore
16
+ from llama_index.core.indices.service_context import ServiceContext
17
+
18
+ TITLE = "RIZOA-AUCHAN Chatbot Demo"
19
+ DESCRIPTION = "Example of an assistant with Gradio, coupling with function calling and Mistral AI via its API"
20
+ PLACEHOLDER = (
21
+ "Vous pouvez me posez une question sur ce contexte, appuyer sur Entrée pour valider"
22
+ )
23
+ PLACEHOLDER_URL = "Extract text from this url"
24
+ llm_model = "mistral-medium"
25
+
26
+ load_dotenv()
27
+ env_api_key = os.environ.get("MISTRAL_API_KEY")
28
+ query_engine = None
29
+
30
+ # Define LLMs
31
+ llm = MistralAI(api_key=env_api_key, model=llm_model)
32
+ embed_model = MistralAIEmbedding(model_name="mistral-embed", api_key=env_api_key)
33
+
34
+ # create client and a new collection
35
+ db = chromadb.PersistentClient(path="./chroma_db")
36
+ chroma_collection = db.get_or_create_collection("quickstart")
37
+
38
+ # set up ChromaVectorStore and load in data
39
+ vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
40
+ storage_context = StorageContext.from_defaults(vector_store=vector_store)
41
+ service_context = ServiceContext.from_defaults(
42
+ chunk_size=1024, llm=llm, embed_model=embed_model
43
+ )
44
+
45
+ PDFReader = download_loader("PDFReader")
46
+ loader = PDFReader()
47
+
48
+ index = VectorStoreIndex(
49
+ [], service_context=service_context, storage_context=storage_context
50
+ )
51
+ query_engine = index.as_query_engine(similarity_top_k=5)
52
+
53
+ with gr.Blocks() as demo:
54
+ with gr.Row():
55
+ with gr.Column(scale=1):
56
+ gr.Image(value=".\img\logo_rizoa_auchan.jpg",
57
+ height=250,
58
+ width=250,
59
+ container=False,
60
+ show_download_button=False
61
+ )
62
+ with gr.Column(scale=4):
63
+ gr.Markdown(
64
+ """
65
+ # Bienvenue au Chatbot FAIR-PLAI
66
+
67
+ Ce chatbot est un assistant numérique, médiateur des vendeurs-acheteurs
68
+ """
69
+ )
70
+
71
+ # gr.Markdown(""" ### 1 / Extract data from PDF """)
72
+
73
+ # with gr.Row():
74
+ # with gr.Column():
75
+ # input_file = gr.File(
76
+ # label="Load a pdf",
77
+ # file_types=[".pdf"],
78
+ # file_count="single",
79
+ # type="filepath",
80
+ # interactive=True,
81
+ # )
82
+ # file_msg = gr.Textbox(
83
+ # label="Loaded documents:", container=False, visible=False
84
+ # )
85
+
86
+ # input_file.upload(
87
+ # fn=load_document,
88
+ # inputs=[
89
+ # input_file,
90
+ # ],
91
+ # outputs=[file_msg],
92
+ # concurrency_limit=20,
93
+ # )
94
+
95
+ # file_btn = gr.Button(value="Encode file ✅", interactive=True)
96
+ # btn_msg = gr.Textbox(container=False, visible=False)
97
+
98
+ # with gr.Row():
99
+ # db_list = gr.Markdown(value=get_documents_in_db)
100
+ # delete_btn = gr.Button(value="Empty db 🗑️", interactive=True, scale=0)
101
+
102
+ # file_btn.click(
103
+ # load_file,
104
+ # inputs=[input_file],
105
+ # outputs=[file_msg, btn_msg, db_list],
106
+ # show_progress="full",
107
+ # )
108
+ # delete_btn.click(empty_db, outputs=[db_list], show_progress="minimal")
109
+
110
+ gr.Markdown(""" ### Ask a question """)
111
+
112
+ chatbot = gr.Chatbot()
113
+ msg = gr.Textbox(placeholder=PLACEHOLDER)
114
+ clear = gr.ClearButton([msg, chatbot])
115
+
116
+ def respond(message, chat_history):
117
+ response = query_engine.query(message)
118
+ chat_history.append((message, str(response)))
119
+ return chat_history
120
+
121
+ msg.submit(respond, [msg, chatbot], [chatbot])
122
+
123
+ demo.title = TITLE
124
+
125
+ if __name__ == "__main__":
126
+ demo.launch()
chroma_db/chroma.sqlite3 ADDED
Binary file (147 kB). View file
 
img/logo_rizoa_auchan.jpg ADDED
llama_index/embeddings/mistralai/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from llama_index.embeddings.mistralai.base import MistralAIEmbedding
2
+
3
+ __all__ = ["MistralAIEmbedding"]
llama_index/embeddings/mistralai/base.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """MistralAI embeddings file."""
2
+
3
+ from typing import Any, List, Optional
4
+
5
+ from llama_index.core.base.embeddings.base import (
6
+ DEFAULT_EMBED_BATCH_SIZE,
7
+ BaseEmbedding,
8
+ )
9
+ from llama_index.core.bridge.pydantic import PrivateAttr
10
+ from llama_index.core.callbacks.base import CallbackManager
11
+ from llama_index.core.base.llms.generic_utils import get_from_param_or_env
12
+
13
+ from mistralai.async_client import MistralAsyncClient
14
+ from mistralai.client import MistralClient
15
+
16
+
17
+ class MistralAIEmbedding(BaseEmbedding):
18
+ """Class for MistralAI embeddings.
19
+
20
+ Args:
21
+ model_name (str): Model for embedding.
22
+ Defaults to "mistral-embed".
23
+
24
+ api_key (Optional[str]): API key to access the model. Defaults to None.
25
+ """
26
+
27
+ # Instance variables initialized via Pydantic's mechanism
28
+ _mistralai_client: Any = PrivateAttr()
29
+ _mistralai_async_client: Any = PrivateAttr()
30
+
31
+ def __init__(
32
+ self,
33
+ model_name: str = "mistral-embed",
34
+ api_key: Optional[str] = None,
35
+ embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
36
+ callback_manager: Optional[CallbackManager] = None,
37
+ **kwargs: Any,
38
+ ):
39
+ api_key = get_from_param_or_env("api_key", api_key, "MISTRAL_API_KEY", "")
40
+
41
+ if not api_key:
42
+ raise ValueError(
43
+ "You must provide an API key to use mistralai. "
44
+ "You can either pass it in as an argument or set it `MISTRAL_API_KEY`."
45
+ )
46
+ self._mistralai_client = MistralClient(api_key=api_key)
47
+ self._mistralai_async_client = MistralAsyncClient(api_key=api_key)
48
+ super().__init__(
49
+ model_name=model_name,
50
+ embed_batch_size=embed_batch_size,
51
+ callback_manager=callback_manager,
52
+ **kwargs,
53
+ )
54
+
55
+ @classmethod
56
+ def class_name(cls) -> str:
57
+ return "MistralAIEmbedding"
58
+
59
+ def _get_query_embedding(self, query: str) -> List[float]:
60
+ """Get query embedding."""
61
+ return (
62
+ self._mistralai_client.embeddings(model=self.model_name, input=[query])
63
+ .data[0]
64
+ .embedding
65
+ )
66
+
67
+ async def _aget_query_embedding(self, query: str) -> List[float]:
68
+ """The asynchronous version of _get_query_embedding."""
69
+ return (
70
+ (
71
+ await self._mistralai_async_client.embeddings(
72
+ model=self.model_name, input=[query]
73
+ )
74
+ )
75
+ .data[0]
76
+ .embedding
77
+ )
78
+
79
+ def _get_text_embedding(self, text: str) -> List[float]:
80
+ """Get text embedding."""
81
+ return (
82
+ self._mistralai_client.embeddings(model=self.model_name, input=[text])
83
+ .data[0]
84
+ .embedding
85
+ )
86
+
87
+ async def _aget_text_embedding(self, text: str) -> List[float]:
88
+ """Asynchronously get text embedding."""
89
+ return (
90
+ (
91
+ await self._mistralai_async_client.embeddings(
92
+ model=self.model_name, input=[text]
93
+ )
94
+ )
95
+ .data[0]
96
+ .embedding
97
+ )
98
+
99
+ def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
100
+ """Get text embeddings."""
101
+ embedding_response = self._mistralai_client.embeddings(
102
+ model=self.model_name, input=texts
103
+ ).data
104
+ return [embed.embedding for embed in embedding_response]
105
+
106
+ async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
107
+ """Asynchronously get text embeddings."""
108
+ embedding_response = await self._mistralai_async_client.embeddings(
109
+ model=self.model_name, input=texts
110
+ )
111
+ return [embed.embedding for embed in embedding_response.data]
llama_index/llms/mistralai/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from llama_index.llms.mistralai.base import MistralAI
2
+
3
+ __all__ = ["MistralAI"]
llama_index/llms/mistralai/base.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, Dict, Optional, Sequence
2
+
3
+ # from mistralai.models.chat_completion import ChatMessage
4
+ from llama_index.core.base.llms.types import (
5
+ ChatMessage,
6
+ ChatResponse,
7
+ ChatResponseAsyncGen,
8
+ ChatResponseGen,
9
+ CompletionResponse,
10
+ CompletionResponseAsyncGen,
11
+ CompletionResponseGen,
12
+ LLMMetadata,
13
+ MessageRole,
14
+ )
15
+ from llama_index.core.bridge.pydantic import Field, PrivateAttr
16
+ from llama_index.core.callbacks import CallbackManager
17
+ from llama_index.core.constants import DEFAULT_TEMPERATURE
18
+ from llama_index.core.llms.callbacks import (
19
+ llm_chat_callback,
20
+ llm_completion_callback,
21
+ )
22
+ from llama_index.core.base.llms.generic_utils import (
23
+ achat_to_completion_decorator,
24
+ astream_chat_to_completion_decorator,
25
+ chat_to_completion_decorator,
26
+ get_from_param_or_env,
27
+ stream_chat_to_completion_decorator,
28
+ )
29
+ from llama_index.core.llms.llm import LLM
30
+ from llama_index.core.types import BaseOutputParser, PydanticProgramMode
31
+ from llama_index.llms.mistralai.utils import (
32
+ mistralai_modelname_to_contextsize,
33
+ )
34
+
35
+ from mistralai.async_client import MistralAsyncClient
36
+ from mistralai.client import MistralClient
37
+
38
+ DEFAULT_MISTRALAI_MODEL = "mistral-tiny"
39
+ DEFAULT_MISTRALAI_ENDPOINT = "https://api.mistral.ai"
40
+ DEFAULT_MISTRALAI_MAX_TOKENS = 512
41
+
42
+
43
+ class MistralAI(LLM):
44
+ model: str = Field(
45
+ default=DEFAULT_MISTRALAI_MODEL, description="The mistralai model to use."
46
+ )
47
+ temperature: float = Field(
48
+ default=DEFAULT_TEMPERATURE,
49
+ description="The temperature to use for sampling.",
50
+ gte=0.0,
51
+ lte=1.0,
52
+ )
53
+ max_tokens: int = Field(
54
+ default=DEFAULT_MISTRALAI_MAX_TOKENS,
55
+ description="The maximum number of tokens to generate.",
56
+ gt=0,
57
+ )
58
+
59
+ timeout: float = Field(
60
+ default=120, description="The timeout to use in seconds.", gte=0
61
+ )
62
+ max_retries: int = Field(
63
+ default=5, description="The maximum number of API retries.", gte=0
64
+ )
65
+ safe_mode: bool = Field(
66
+ default=False,
67
+ description="The parameter to enforce guardrails in chat generations.",
68
+ )
69
+ random_seed: str = Field(
70
+ default=None, description="The random seed to use for sampling."
71
+ )
72
+ additional_kwargs: Dict[str, Any] = Field(
73
+ default_factory=dict, description="Additional kwargs for the MistralAI API."
74
+ )
75
+
76
+ _client: Any = PrivateAttr()
77
+ _aclient: Any = PrivateAttr()
78
+
79
+ def __init__(
80
+ self,
81
+ model: str = DEFAULT_MISTRALAI_MODEL,
82
+ temperature: float = DEFAULT_TEMPERATURE,
83
+ max_tokens: int = DEFAULT_MISTRALAI_MAX_TOKENS,
84
+ timeout: int = 120,
85
+ max_retries: int = 5,
86
+ safe_mode: bool = False,
87
+ random_seed: Optional[int] = None,
88
+ api_key: Optional[str] = None,
89
+ additional_kwargs: Optional[Dict[str, Any]] = None,
90
+ callback_manager: Optional[CallbackManager] = None,
91
+ system_prompt: Optional[str] = None,
92
+ messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
93
+ completion_to_prompt: Optional[Callable[[str], str]] = None,
94
+ pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
95
+ output_parser: Optional[BaseOutputParser] = None,
96
+ ) -> None:
97
+ additional_kwargs = additional_kwargs or {}
98
+ callback_manager = callback_manager or CallbackManager([])
99
+
100
+ api_key = get_from_param_or_env("api_key", api_key, "MISTRAL_API_KEY", "")
101
+
102
+ if not api_key:
103
+ raise ValueError(
104
+ "You must provide an API key to use mistralai. "
105
+ "You can either pass it in as an argument or set it `MISTRAL_API_KEY`."
106
+ )
107
+
108
+ self._client = MistralClient(
109
+ api_key=api_key,
110
+ endpoint=DEFAULT_MISTRALAI_ENDPOINT,
111
+ timeout=timeout,
112
+ max_retries=max_retries,
113
+ )
114
+ self._aclient = MistralAsyncClient(
115
+ api_key=api_key,
116
+ endpoint=DEFAULT_MISTRALAI_ENDPOINT,
117
+ timeout=timeout,
118
+ max_retries=max_retries,
119
+ )
120
+
121
+ super().__init__(
122
+ temperature=temperature,
123
+ max_tokens=max_tokens,
124
+ additional_kwargs=additional_kwargs,
125
+ timeout=timeout,
126
+ max_retries=max_retries,
127
+ safe_mode=safe_mode,
128
+ random_seed=random_seed,
129
+ model=model,
130
+ callback_manager=callback_manager,
131
+ system_prompt=system_prompt,
132
+ messages_to_prompt=messages_to_prompt,
133
+ completion_to_prompt=completion_to_prompt,
134
+ pydantic_program_mode=pydantic_program_mode,
135
+ output_parser=output_parser,
136
+ )
137
+
138
+ @classmethod
139
+ def class_name(cls) -> str:
140
+ return "MistralAI_LLM"
141
+
142
+ @property
143
+ def metadata(self) -> LLMMetadata:
144
+ return LLMMetadata(
145
+ context_window=mistralai_modelname_to_contextsize(self.model),
146
+ num_output=self.max_tokens,
147
+ is_chat_model=True,
148
+ model_name=self.model,
149
+ safe_mode=self.safe_mode,
150
+ random_seed=self.random_seed,
151
+ )
152
+
153
+ @property
154
+ def _model_kwargs(self) -> Dict[str, Any]:
155
+ base_kwargs = {
156
+ "model": self.model,
157
+ "temperature": self.temperature,
158
+ "max_tokens": self.max_tokens,
159
+ "random_seed": self.random_seed,
160
+ "safe_mode": self.safe_mode,
161
+ }
162
+ return {
163
+ **base_kwargs,
164
+ **self.additional_kwargs,
165
+ }
166
+
167
+ def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
168
+ return {
169
+ **self._model_kwargs,
170
+ **kwargs,
171
+ }
172
+
173
+ @llm_chat_callback()
174
+ def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
175
+ # convert messages to mistral ChatMessage
176
+ from mistralai.client import ChatMessage as mistral_chatmessage
177
+
178
+ messages = [
179
+ mistral_chatmessage(role=x.role, content=x.content) for x in messages
180
+ ]
181
+ all_kwargs = self._get_all_kwargs(**kwargs)
182
+ response = self._client.chat(messages=messages, **all_kwargs)
183
+ return ChatResponse(
184
+ message=ChatMessage(
185
+ role=MessageRole.ASSISTANT, content=response.choices[0].message.content
186
+ ),
187
+ raw=dict(response),
188
+ )
189
+
190
+ @llm_completion_callback()
191
+ def complete(
192
+ self, prompt: str, formatted: bool = False, **kwargs: Any
193
+ ) -> CompletionResponse:
194
+ complete_fn = chat_to_completion_decorator(self.chat)
195
+ return complete_fn(prompt, **kwargs)
196
+
197
+ @llm_chat_callback()
198
+ def stream_chat(
199
+ self, messages: Sequence[ChatMessage], **kwargs: Any
200
+ ) -> ChatResponseGen:
201
+ # convert messages to mistral ChatMessage
202
+ from mistralai.client import ChatMessage as mistral_chatmessage
203
+
204
+ messages = [
205
+ mistral_chatmessage(role=message.role, content=message.content)
206
+ for message in messages
207
+ ]
208
+ all_kwargs = self._get_all_kwargs(**kwargs)
209
+
210
+ response = self._client.chat_stream(messages=messages, **all_kwargs)
211
+
212
+ def gen() -> ChatResponseGen:
213
+ content = ""
214
+ role = MessageRole.ASSISTANT
215
+ for chunk in response:
216
+ content_delta = chunk.choices[0].delta.content
217
+ if content_delta is None:
218
+ continue
219
+ content += content_delta
220
+ yield ChatResponse(
221
+ message=ChatMessage(role=role, content=content),
222
+ delta=content_delta,
223
+ raw=chunk,
224
+ )
225
+
226
+ return gen()
227
+
228
+ @llm_completion_callback()
229
+ def stream_complete(
230
+ self, prompt: str, formatted: bool = False, **kwargs: Any
231
+ ) -> CompletionResponseGen:
232
+ stream_complete_fn = stream_chat_to_completion_decorator(self.stream_chat)
233
+ return stream_complete_fn(prompt, **kwargs)
234
+
235
+ @llm_chat_callback()
236
+ async def achat(
237
+ self, messages: Sequence[ChatMessage], **kwargs: Any
238
+ ) -> ChatResponse:
239
+ # convert messages to mistral ChatMessage
240
+ from mistralai.client import ChatMessage as mistral_chatmessage
241
+
242
+ messages = [
243
+ mistral_chatmessage(role=message.role, content=message.content)
244
+ for message in messages
245
+ ]
246
+ all_kwargs = self._get_all_kwargs(**kwargs)
247
+ response = await self._aclient.chat(messages=messages, **all_kwargs)
248
+ return ChatResponse(
249
+ message=ChatMessage(
250
+ role=MessageRole.ASSISTANT, content=response.choices[0].message.content
251
+ ),
252
+ raw=dict(response),
253
+ )
254
+
255
+ @llm_completion_callback()
256
+ async def acomplete(
257
+ self, prompt: str, formatted: bool = False, **kwargs: Any
258
+ ) -> CompletionResponse:
259
+ acomplete_fn = achat_to_completion_decorator(self.achat)
260
+ return await acomplete_fn(prompt, **kwargs)
261
+
262
+ @llm_chat_callback()
263
+ async def astream_chat(
264
+ self, messages: Sequence[ChatMessage], **kwargs: Any
265
+ ) -> ChatResponseAsyncGen:
266
+ # convert messages to mistral ChatMessage
267
+ from mistralai.client import ChatMessage as mistral_chatmessage
268
+
269
+ messages = [
270
+ mistral_chatmessage(role=x.role, content=x.content) for x in messages
271
+ ]
272
+ all_kwargs = self._get_all_kwargs(**kwargs)
273
+
274
+ response = await self._aclient.chat_stream(messages=messages, **all_kwargs)
275
+
276
+ async def gen() -> ChatResponseAsyncGen:
277
+ content = ""
278
+ role = MessageRole.ASSISTANT
279
+ async for chunk in response:
280
+ content_delta = chunk.choices[0].delta.content
281
+ if content_delta is None:
282
+ continue
283
+ content += content_delta
284
+ yield ChatResponse(
285
+ message=ChatMessage(role=role, content=content),
286
+ delta=content_delta,
287
+ raw=chunk,
288
+ )
289
+
290
+ return gen()
291
+
292
+ @llm_completion_callback()
293
+ async def astream_complete(
294
+ self, prompt: str, formatted: bool = False, **kwargs: Any
295
+ ) -> CompletionResponseAsyncGen:
296
+ astream_complete_fn = astream_chat_to_completion_decorator(self.astream_chat)
297
+ return await astream_complete_fn(prompt, **kwargs)
llama_index/llms/mistralai/utils.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict
2
+
3
+ MISTRALAI_MODELS: Dict[str, int] = {
4
+ "mistral-tiny": 32000,
5
+ "mistral-small": 32000,
6
+ "mistral-medium": 32000,
7
+ }
8
+
9
+
10
+ def mistralai_modelname_to_contextsize(modelname: str) -> int:
11
+ if modelname not in MISTRALAI_MODELS:
12
+ raise ValueError(
13
+ f"Unknown model: {modelname}. Please provide a valid MistralAI model name."
14
+ "Known models are: " + ", ".join(MISTRALAI_MODELS.keys())
15
+ )
16
+
17
+ return MISTRALAI_MODELS[modelname]
llama_index/readers/web/__init__.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Init file."""
2
+ from llama_index.readers.web.async_web.base import (
3
+ AsyncWebPageReader,
4
+ )
5
+ from llama_index.readers.web.beautiful_soup_web.base import (
6
+ BeautifulSoupWebReader,
7
+ )
8
+ from llama_index.readers.web.knowledge_base.base import (
9
+ KnowledgeBaseWebReader,
10
+ )
11
+ from llama_index.readers.web.main_content_extractor.base import (
12
+ MainContentExtractorReader,
13
+ )
14
+ from llama_index.readers.web.news.base import NewsArticleReader
15
+ from llama_index.readers.web.readability_web.base import (
16
+ ReadabilityWebPageReader,
17
+ )
18
+ from llama_index.readers.web.rss.base import (
19
+ RssReader,
20
+ )
21
+ from llama_index.readers.web.rss_news.base import (
22
+ RssNewsReader,
23
+ )
24
+ from llama_index.readers.web.simple_web.base import (
25
+ SimpleWebPageReader,
26
+ )
27
+ from llama_index.readers.web.sitemap.base import (
28
+ SitemapReader,
29
+ )
30
+ from llama_index.readers.web.trafilatura_web.base import (
31
+ TrafilaturaWebReader,
32
+ )
33
+ from llama_index.readers.web.unstructured_web.base import (
34
+ UnstructuredURLLoader,
35
+ )
36
+ from llama_index.readers.web.whole_site.base import (
37
+ WholeSiteReader,
38
+ )
39
+
40
+ __all__ = [
41
+ "AsyncWebPageReader",
42
+ "BeautifulSoupWebReader",
43
+ "KnowledgeBaseWebReader",
44
+ "MainContentExtractorReader",
45
+ "NewsArticleReader",
46
+ "ReadabilityWebPageReader",
47
+ "RssReader",
48
+ "RssNewsReader",
49
+ "SimpleWebPageReader",
50
+ "SitemapReader",
51
+ "TrafilaturaWebReader",
52
+ "UnstructuredURLLoader",
53
+ "WholeSiteReader",
54
+ ]
llama_index/readers/web/async_web/README.md ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Async Website Loader
2
+
3
+ This loader is an asynchronous web scraper that fetches the text from static websites by converting the HTML to text.
4
+
5
+ ## Usage
6
+
7
+ To use this loader, you need to pass in an array of URLs.
8
+
9
+ ```python
10
+ from llama_index.readers.web.async_web.base import AsyncWebPageReader
11
+
12
+ # for jupyter notebooks uncomment the following two lines of code:
13
+ # import nest_asyncio
14
+ # nest_asyncio.apply()
15
+
16
+ loader = AsyncWebPageReader()
17
+ documents = loader.load_data(urls=["https://google.com"])
18
+ ```
19
+
20
+ ### Issues Jupyter Notebooks asyncio
21
+
22
+ If you get a `RuntimeError: asyncio.run() cannot be called from a running event loop` you might be interested in this (solution here)[https://saturncloud.io/blog/asynciorun-cannot-be-called-from-a-running-event-loop-a-guide-for-data-scientists-using-jupyter-notebook/#option-3-use-nest_asyncio]
23
+
24
+ ### Old Usage
25
+
26
+ use this syntax for earlier versions of llama_index where llama_hub loaders where loaded via separate download process:
27
+
28
+ ```python
29
+ from llama_index import download_loader
30
+
31
+ AsyncWebPageReader = download_loader("AsyncWebPageReader")
32
+
33
+ loader = AsyncWebPageReader()
34
+ documents = loader.load_data(urls=["https://google.com"])
35
+ ```
llama_index/readers/web/async_web/__init__.py ADDED
File without changes
llama_index/readers/web/async_web/base.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import logging
3
+ from typing import List
4
+
5
+ from llama_index.core.readers.base import BaseReader
6
+ from llama_index.core.schema import Document
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ class AsyncWebPageReader(BaseReader):
12
+ """Asynchronous web page reader.
13
+
14
+ Reads pages from the web asynchronously.
15
+
16
+ Args:
17
+ html_to_text (bool): Whether to convert HTML to text.
18
+ Requires `html2text` package.
19
+ limit (int): Maximum number of concurrent requests.
20
+ dedupe (bool): to deduplicate urls if there is exact-match within given list
21
+ fail_on_error (bool): if requested url does not return status code 200 the routine will raise an ValueError
22
+ """
23
+
24
+ def __init__(
25
+ self,
26
+ html_to_text: bool = False,
27
+ limit: int = 10,
28
+ dedupe: bool = True,
29
+ fail_on_error: bool = False,
30
+ ) -> None:
31
+ """Initialize with parameters."""
32
+ try:
33
+ import html2text # noqa: F401
34
+ except ImportError:
35
+ raise ImportError(
36
+ "`html2text` package not found, please run `pip install html2text`"
37
+ )
38
+ try:
39
+ import aiohttp # noqa: F401
40
+ except ImportError:
41
+ raise ImportError(
42
+ "`aiohttp` package not found, please run `pip install aiohttp`"
43
+ )
44
+ self._limit = limit
45
+ self._html_to_text = html_to_text
46
+ self._dedupe = dedupe
47
+ self._fail_on_error = fail_on_error
48
+
49
+ def load_data(self, urls: List[str]) -> List[Document]:
50
+ """Load data from the input urls.
51
+
52
+ Args:
53
+ urls (List[str]): List of URLs to scrape.
54
+
55
+ Returns:
56
+ List[Document]: List of documents.
57
+
58
+ """
59
+ if self._dedupe:
60
+ urls = list(dict.fromkeys(urls))
61
+
62
+ import aiohttp
63
+
64
+ def chunked_http_client(limit: int):
65
+ semaphore = asyncio.Semaphore(limit)
66
+
67
+ async def http_get(url: str, session: aiohttp.ClientSession):
68
+ async with semaphore:
69
+ async with session.get(url) as response:
70
+ return response, await response.text()
71
+
72
+ return http_get
73
+
74
+ async def fetch_urls(urls: List[str]):
75
+ http_client = chunked_http_client(self._limit)
76
+ async with aiohttp.ClientSession() as session:
77
+ tasks = [http_client(url, session) for url in urls]
78
+ return await asyncio.gather(*tasks, return_exceptions=True)
79
+
80
+ if not isinstance(urls, list):
81
+ raise ValueError("urls must be a list of strings.")
82
+
83
+ documents = []
84
+ responses = asyncio.run(fetch_urls(urls))
85
+
86
+ for i, response_tuple in enumerate(responses):
87
+ if not isinstance(response_tuple, tuple):
88
+ raise ValueError(f"One of the inputs is not a valid url: {urls[i]}")
89
+
90
+ response, raw_page = response_tuple
91
+
92
+ if response.status != 200:
93
+ logger.warning(f"error fetching page from {urls[i]}")
94
+ logger.info(response)
95
+
96
+ if self._fail_on_error:
97
+ raise ValueError(
98
+ f"error fetching page from {urls[i]}. server returned status:"
99
+ f" {response.status} and response {raw_page}"
100
+ )
101
+
102
+ continue
103
+
104
+ if self._html_to_text:
105
+ import html2text
106
+
107
+ response_text = html2text.html2text(raw_page)
108
+ else:
109
+ response_text = raw_page
110
+
111
+ documents.append(
112
+ Document(text=response_text, extra_info={"Source": str(response.url)})
113
+ )
114
+
115
+ return documents
llama_index/readers/web/beautiful_soup_web/README.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Beautiful Soup Website Loader
2
+
3
+ This loader is a web scraper that fetches the text from websites using the `Beautiful Soup` (aka `bs4`) Python package. Furthermore, the flexibility of Beautiful Soup allows for custom templates that enable the loader to extract the desired text from specific website designs, such as Substack. Check out the code to see how to add your own.
4
+
5
+ ## Usage
6
+
7
+ To use this loader, you need to pass in an array of URLs.
8
+
9
+ ```python
10
+ from llama_index import download_loader
11
+
12
+ BeautifulSoupWebReader = download_loader("BeautifulSoupWebReader")
13
+
14
+ loader = BeautifulSoupWebReader()
15
+ documents = loader.load_data(urls=["https://google.com"])
16
+ ```
17
+
18
+ You can also add your own specific website parsers in `base.py` that automatically get used for certain URLs. Alternatively, you may tell the loader to use a certain parser by passing in the `custom_hostname` argument. For reference, this is what the Beautiful Soup parser looks like for Substack sites:
19
+
20
+ ```python
21
+ def _substack_reader(soup: Any) -> Tuple[str, Dict[str, Any]]:
22
+ """Extract text from Substack blog post."""
23
+ extra_info = {
24
+ "Title of this Substack post": soup.select_one(
25
+ "h1.post-title"
26
+ ).getText(),
27
+ "Subtitle": soup.select_one("h3.subtitle").getText(),
28
+ "Author": soup.select_one("span.byline-names").getText(),
29
+ }
30
+ text = soup.select_one("div.available-content").getText()
31
+ return text, extra_info
32
+ ```
33
+
34
+ ## Examples
35
+
36
+ This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent.
37
+
38
+ ### LlamaIndex
39
+
40
+ ```python
41
+ from llama_index import VectorStoreIndex, download_loader
42
+
43
+ BeautifulSoupWebReader = download_loader("BeautifulSoupWebReader")
44
+
45
+ loader = BeautifulSoupWebReader()
46
+ documents = loader.load_data(urls=["https://google.com"])
47
+ index = VectorStoreIndex.from_documents(documents)
48
+ index.query("What language is on this website?")
49
+ ```
50
+
51
+ ### LangChain
52
+
53
+ Note: Make sure you change the description of the `Tool` to match your use-case.
54
+
55
+ ```python
56
+ from llama_index import VectorStoreIndex, download_loader
57
+ from langchain.agents import initialize_agent, Tool
58
+ from langchain.llms import OpenAI
59
+ from langchain.chains.conversation.memory import ConversationBufferMemory
60
+
61
+ BeautifulSoupWebReader = download_loader("BeautifulSoupWebReader")
62
+
63
+ loader = BeautifulSoupWebReader()
64
+ documents = loader.load_data(urls=["https://google.com"])
65
+ index = VectorStoreIndex.from_documents(documents)
66
+
67
+ tools = [
68
+ Tool(
69
+ name="Website Index",
70
+ func=lambda q: index.query(q),
71
+ description=f"Useful when you want answer questions about the text on websites.",
72
+ ),
73
+ ]
74
+ llm = OpenAI(temperature=0)
75
+ memory = ConversationBufferMemory(memory_key="chat_history")
76
+ agent_chain = initialize_agent(
77
+ tools, llm, agent="zero-shot-react-description", memory=memory
78
+ )
79
+
80
+ output = agent_chain.run(input="What language is on this website?")
81
+ ```
82
+
83
+ ## Custom hostname example
84
+
85
+ To use a custom hostname like readme.co, substack.com or any other commonly-used website template, you can pass in the `custom_hostname` argument to guarantee that a custom parser is used (if it exists). Check out the code to see which ones are currently implemented.
86
+
87
+ ```python
88
+ documents = loader.load_data(
89
+ urls=["https://langchain.readthedocs.io/en/latest/"],
90
+ custom_hostname="readthedocs.io",
91
+ )
92
+ ```
llama_index/readers/web/beautiful_soup_web/__init__.py ADDED
File without changes
llama_index/readers/web/beautiful_soup_web/base.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Beautiful Soup Web scraper."""
2
+
3
+ import logging
4
+ from typing import Any, Callable, Dict, List, Optional, Tuple
5
+ from urllib.parse import urljoin
6
+
7
+ from llama_index.core.bridge.pydantic import PrivateAttr
8
+ from llama_index.core.readers.base import BasePydanticReader
9
+ from llama_index.core.schema import Document
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ def _substack_reader(soup: Any, **kwargs) -> Tuple[str, Dict[str, Any]]:
15
+ """Extract text from Substack blog post."""
16
+ extra_info = {
17
+ "Title of this Substack post": soup.select_one("h1.post-title").getText(),
18
+ "Subtitle": soup.select_one("h3.subtitle").getText(),
19
+ "Author": soup.select_one("span.byline-names").getText(),
20
+ }
21
+ text = soup.select_one("div.available-content").getText()
22
+ return text, extra_info
23
+
24
+
25
+ def _readthedocs_reader(soup: Any, url: str, **kwargs) -> Tuple[str, Dict[str, Any]]:
26
+ """Extract text from a ReadTheDocs documentation site."""
27
+ import requests
28
+ from bs4 import BeautifulSoup
29
+
30
+ links = soup.find_all("a", {"class": "reference internal"})
31
+ rtd_links = []
32
+
33
+ for link in links:
34
+ rtd_links.append(link["href"])
35
+ for i in range(len(rtd_links)):
36
+ if not rtd_links[i].startswith("http"):
37
+ rtd_links[i] = urljoin(url, rtd_links[i])
38
+
39
+ texts = []
40
+ for doc_link in rtd_links:
41
+ page_link = requests.get(doc_link)
42
+ soup = BeautifulSoup(page_link.text, "html.parser")
43
+ try:
44
+ text = soup.find(attrs={"role": "main"}).get_text()
45
+
46
+ except IndexError:
47
+ text = None
48
+ if text:
49
+ texts.append("\n".join([t for t in text.split("\n") if t]))
50
+ return "\n".join(texts), {}
51
+
52
+
53
+ def _readmedocs_reader(
54
+ soup: Any, url: str, include_url_in_text: bool = True
55
+ ) -> Tuple[str, Dict[str, Any]]:
56
+ """Extract text from a ReadMe documentation site."""
57
+ import requests
58
+ from bs4 import BeautifulSoup
59
+
60
+ links = soup.find_all("a")
61
+ docs_links = [link["href"] for link in links if "/docs/" in link["href"]]
62
+ docs_links = list(set(docs_links))
63
+ for i in range(len(docs_links)):
64
+ if not docs_links[i].startswith("http"):
65
+ docs_links[i] = urljoin(url, docs_links[i])
66
+
67
+ texts = []
68
+ for doc_link in docs_links:
69
+ page_link = requests.get(doc_link)
70
+ soup = BeautifulSoup(page_link.text, "html.parser")
71
+ try:
72
+ text = ""
73
+ for element in soup.find_all("article", {"id": "content"}):
74
+ for child in element.descendants:
75
+ if child.name == "a" and child.has_attr("href"):
76
+ if include_url_in_text:
77
+ url = child.get("href")
78
+ if url is not None and "edit" in url:
79
+ text += child.text
80
+ else:
81
+ text += (
82
+ f"{child.text} (Reference url: {doc_link}{url}) "
83
+ )
84
+ elif child.string and child.string.strip():
85
+ text += child.string.strip() + " "
86
+
87
+ except IndexError:
88
+ text = None
89
+ logger.error(f"Could not extract text from {doc_link}")
90
+ continue
91
+ texts.append("\n".join([t for t in text.split("\n") if t]))
92
+ return "\n".join(texts), {}
93
+
94
+
95
+ def _gitbook_reader(
96
+ soup: Any, url: str, include_url_in_text: bool = True
97
+ ) -> Tuple[str, Dict[str, Any]]:
98
+ """Extract text from a ReadMe documentation site."""
99
+ import requests
100
+ from bs4 import BeautifulSoup
101
+
102
+ links = soup.find_all("a")
103
+ docs_links = [link["href"] for link in links if "/docs/" in link["href"]]
104
+ docs_links = list(set(docs_links))
105
+ for i in range(len(docs_links)):
106
+ if not docs_links[i].startswith("http"):
107
+ docs_links[i] = urljoin(url, docs_links[i])
108
+
109
+ texts = []
110
+ for doc_link in docs_links:
111
+ page_link = requests.get(doc_link)
112
+ soup = BeautifulSoup(page_link.text, "html.parser")
113
+ try:
114
+ text = ""
115
+ text = soup.find("main")
116
+ clean_text = clean_text = ", ".join([tag.get_text() for tag in text])
117
+ except IndexError:
118
+ text = None
119
+ logger.error(f"Could not extract text from {doc_link}")
120
+ continue
121
+ texts.append(clean_text)
122
+ return "\n".join(texts), {}
123
+
124
+
125
+ DEFAULT_WEBSITE_EXTRACTOR: Dict[
126
+ str, Callable[[Any, str], Tuple[str, Dict[str, Any]]]
127
+ ] = {
128
+ "substack.com": _substack_reader,
129
+ "readthedocs.io": _readthedocs_reader,
130
+ "readme.com": _readmedocs_reader,
131
+ "gitbook.io": _gitbook_reader,
132
+ }
133
+
134
+
135
+ class BeautifulSoupWebReader(BasePydanticReader):
136
+ """BeautifulSoup web page reader.
137
+
138
+ Reads pages from the web.
139
+ Requires the `bs4` and `urllib` packages.
140
+
141
+ Args:
142
+ website_extractor (Optional[Dict[str, Callable]]): A mapping of website
143
+ hostname (e.g. google.com) to a function that specifies how to
144
+ extract text from the BeautifulSoup obj. See DEFAULT_WEBSITE_EXTRACTOR.
145
+ """
146
+
147
+ is_remote: bool = True
148
+ _website_extractor: Dict[str, Callable] = PrivateAttr()
149
+
150
+ def __init__(self, website_extractor: Optional[Dict[str, Callable]] = None) -> None:
151
+ self._website_extractor = website_extractor or DEFAULT_WEBSITE_EXTRACTOR
152
+ super().__init__()
153
+
154
+ @classmethod
155
+ def class_name(cls) -> str:
156
+ """Get the name identifier of the class."""
157
+ return "BeautifulSoupWebReader"
158
+
159
+ def load_data(
160
+ self,
161
+ urls: List[str],
162
+ custom_hostname: Optional[str] = None,
163
+ include_url_in_text: Optional[bool] = True,
164
+ ) -> List[Document]:
165
+ """Load data from the urls.
166
+
167
+ Args:
168
+ urls (List[str]): List of URLs to scrape.
169
+ custom_hostname (Optional[str]): Force a certain hostname in the case
170
+ a website is displayed under custom URLs (e.g. Substack blogs)
171
+ include_url_in_text (Optional[bool]): Include the reference url in the text of the document
172
+
173
+ Returns:
174
+ List[Document]: List of documents.
175
+
176
+ """
177
+ from urllib.parse import urlparse
178
+
179
+ import requests
180
+ from bs4 import BeautifulSoup
181
+
182
+ documents = []
183
+ for url in urls:
184
+ try:
185
+ page = requests.get(url)
186
+ except Exception:
187
+ raise ValueError(f"One of the inputs is not a valid url: {url}")
188
+
189
+ hostname = custom_hostname or urlparse(url).hostname or ""
190
+
191
+ soup = BeautifulSoup(page.content, "html.parser")
192
+
193
+ data = ""
194
+ extra_info = {"URL": url}
195
+ if hostname in self._website_extractor:
196
+ data, metadata = self._website_extractor[hostname](
197
+ soup=soup, url=url, include_url_in_text=include_url_in_text
198
+ )
199
+ extra_info.update(metadata)
200
+
201
+ else:
202
+ data = soup.getText()
203
+
204
+ documents.append(Document(text=data, id_=url, extra_info=extra_info))
205
+
206
+ return documents
llama_index/readers/web/beautiful_soup_web/requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ beautifulsoup4
2
+ requests
3
+ urllib3
llama_index/readers/web/knowledge_base/README.md ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Knowledge Base Website Loader
2
+
3
+ This loader is a web crawler and scraper that fetches text content from websites hosting public knowledge bases. Examples are the [Intercom help center](https://www.intercom.com/help/en/) or the [Robinhood help center](https://robinhood.com/us/en/support/). Typically these sites have a directory structure with several sections and many articles in each section. This loader crawls and finds all links that match the article path provided, and scrapes the content of each article. This can be used to create bots that answer customer questions based on public documentation.
4
+
5
+ It uses [Playwright](https://playwright.dev/python/) to drive a browser. This reduces the chance of getting blocked by Cloudflare or other CDNs, but makes it a bit more challenging to run on cloud services.
6
+
7
+ ## Usage
8
+
9
+ First run
10
+
11
+ ```
12
+ playwright install
13
+ ```
14
+
15
+ This installs the browsers that Playwright requires.
16
+
17
+ To use this loader, you need to pass in the root URL and the string to search for in the URL to tell if the crawler has reached an article. You also need to pass in several CSS selectors so the cralwer knows which links to follow and which elements to extract content from. use
18
+
19
+ ```python
20
+ from llama_index import download_loader
21
+
22
+ KnowledgeBaseWebReader = download_loader("KnowledgeBaseWebReader")
23
+
24
+ loader = KnowledgeBaseWebReader()
25
+ documents = loader.load_data(
26
+ root_url="https://www.intercom.com/help",
27
+ link_selectors=[".article-list a", ".article-list a"],
28
+ article_path="/articles",
29
+ body_selector=".article-body",
30
+ title_selector=".article-title",
31
+ subtitle_selector=".article-subtitle",
32
+ )
33
+ ```
34
+
35
+ ## Examples
36
+
37
+ This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent.
38
+
39
+ ### LlamaIndex
40
+
41
+ ```python
42
+ from llama_index import VectorStoreIndex, download_loader
43
+
44
+ KnowledgeBaseWebReader = download_loader("KnowledgeBaseWebReader")
45
+
46
+ loader = KnowledgeBaseWebReader()
47
+ documents = loader.load_data(
48
+ root_url="https://support.intercom.com",
49
+ link_selectors=[".article-list a", ".article-list a"],
50
+ article_path="/articles",
51
+ body_selector=".article-body",
52
+ title_selector=".article-title",
53
+ subtitle_selector=".article-subtitle",
54
+ )
55
+ index = VectorStoreIndex.from_documents(documents)
56
+ index.query("What languages does Intercom support?")
57
+ ```
58
+
59
+ ### LangChain
60
+
61
+ Note: Make sure you change the description of the `Tool` to match your use-case.
62
+
63
+ ```python
64
+ from llama_index import VectorStoreIndex, download_loader
65
+ from langchain.agents import initialize_agent, Tool
66
+ from langchain.llms import OpenAI
67
+ from langchain.chains.conversation.memory import ConversationBufferMemory
68
+
69
+ KnowledgeBaseWebReader = download_loader("KnowledgeBaseWebReader")
70
+
71
+ loader = KnowledgeBaseWebReader()
72
+ documents = loader.load_data(
73
+ root_url="https://support.intercom.com",
74
+ link_selectors=[".article-list a", ".article-list a"],
75
+ article_path="/articles",
76
+ body_selector=".article-body",
77
+ title_selector=".article-title",
78
+ subtitle_selector=".article-subtitle",
79
+ )
80
+ index = VectorStoreIndex.from_documents(documents)
81
+
82
+ tools = [
83
+ Tool(
84
+ name="Website Index",
85
+ func=lambda q: index.query(q),
86
+ description=f"Useful when you want answer questions about a product that has a public knowledge base.",
87
+ ),
88
+ ]
89
+ llm = OpenAI(temperature=0)
90
+ memory = ConversationBufferMemory(memory_key="chat_history")
91
+ agent_chain = initialize_agent(
92
+ tools, llm, agent="zero-shot-react-description", memory=memory
93
+ )
94
+
95
+ output = agent_chain.run(input="What languages does Intercom support?")
96
+ ```
llama_index/readers/web/knowledge_base/__init__.py ADDED
File without changes
llama_index/readers/web/knowledge_base/base.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Optional
2
+
3
+ from llama_index.core.readers.base import BaseReader
4
+ from llama_index.core.schema import Document
5
+
6
+
7
+ class KnowledgeBaseWebReader(BaseReader):
8
+ """Knowledge base reader.
9
+
10
+ Crawls and reads articles from a knowledge base/help center with Playwright.
11
+ Tested on Zendesk and Intercom CMS, may work on others.
12
+ Can be run in headless mode but it may be blocked by Cloudflare. Run it headed to be safe.
13
+ Times out occasionally, just increase the default time out if it does.
14
+ Requires the `playwright` package.
15
+
16
+ Args:
17
+ root_url (str): the base url of the knowledge base, with no trailing slash
18
+ e.g. 'https://support.intercom.com'
19
+ link_selectors (List[str]): list of css selectors to find links to articles while crawling
20
+ e.g. ['.article-list a', '.article-list a']
21
+ article_path (str): the url path of articles on this domain so the crawler knows when to stop
22
+ e.g. '/articles'
23
+ title_selector (Optional[str]): css selector to find the title of the article
24
+ e.g. '.article-title'
25
+ subtitle_selector (Optional[str]): css selector to find the subtitle/description of the article
26
+ e.g. '.article-subtitle'
27
+ body_selector (Optional[str]): css selector to find the body of the article
28
+ e.g. '.article-body'
29
+ """
30
+
31
+ def __init__(
32
+ self,
33
+ root_url: str,
34
+ link_selectors: List[str],
35
+ article_path: str,
36
+ title_selector: Optional[str] = None,
37
+ subtitle_selector: Optional[str] = None,
38
+ body_selector: Optional[str] = None,
39
+ ) -> None:
40
+ """Initialize with parameters."""
41
+ self.root_url = root_url
42
+ self.link_selectors = link_selectors
43
+ self.article_path = article_path
44
+ self.title_selector = title_selector
45
+ self.subtitle_selector = subtitle_selector
46
+ self.body_selector = body_selector
47
+
48
+ def load_data(self) -> List[Document]:
49
+ """Load data from the knowledge base."""
50
+ from playwright.sync_api import sync_playwright
51
+
52
+ with sync_playwright() as p:
53
+ browser = p.chromium.launch(headless=False)
54
+
55
+ # Crawl
56
+ article_urls = self.get_article_urls(
57
+ browser,
58
+ self.root_url,
59
+ self.root_url,
60
+ )
61
+
62
+ # Scrape
63
+ documents = []
64
+ for url in article_urls:
65
+ article = self.scrape_article(
66
+ browser,
67
+ url,
68
+ )
69
+ extra_info = {
70
+ "title": article["title"],
71
+ "subtitle": article["subtitle"],
72
+ "url": article["url"],
73
+ }
74
+ documents.append(Document(text=article["body"], extra_info=extra_info))
75
+
76
+ browser.close()
77
+
78
+ return documents
79
+
80
+ def scrape_article(
81
+ self,
82
+ browser: Any,
83
+ url: str,
84
+ ) -> Dict[str, str]:
85
+ """Scrape a single article url.
86
+
87
+ Args:
88
+ browser (Any): a Playwright Chromium browser.
89
+ url (str): URL of the article to scrape.
90
+
91
+ Returns:
92
+ Dict[str, str]: a mapping of article attributes to their values.
93
+
94
+ """
95
+ page = browser.new_page(ignore_https_errors=True)
96
+ page.set_default_timeout(60000)
97
+ page.goto(url, wait_until="domcontentloaded")
98
+
99
+ title = (
100
+ (
101
+ page.query_selector(self.title_selector).evaluate(
102
+ "node => node.innerText"
103
+ )
104
+ )
105
+ if self.title_selector
106
+ else ""
107
+ )
108
+ subtitle = (
109
+ (
110
+ page.query_selector(self.subtitle_selector).evaluate(
111
+ "node => node.innerText"
112
+ )
113
+ )
114
+ if self.subtitle_selector
115
+ else ""
116
+ )
117
+ body = (
118
+ (page.query_selector(self.body_selector).evaluate("node => node.innerText"))
119
+ if self.body_selector
120
+ else ""
121
+ )
122
+
123
+ page.close()
124
+ print("scraped:", url)
125
+ return {"title": title, "subtitle": subtitle, "body": body, "url": url}
126
+
127
+ def get_article_urls(
128
+ self, browser: Any, root_url: str, current_url: str
129
+ ) -> List[str]:
130
+ """Recursively crawl through the knowledge base to find a list of articles.
131
+
132
+ Args:
133
+ browser (Any): a Playwright Chromium browser.
134
+ root_url (str): root URL of the knowledge base.
135
+ current_url (str): current URL that is being crawled.
136
+
137
+ Returns:
138
+ List[str]: a list of URLs of found articles.
139
+
140
+ """
141
+ page = browser.new_page(ignore_https_errors=True)
142
+ page.set_default_timeout(60000)
143
+ page.goto(current_url, wait_until="domcontentloaded")
144
+
145
+ # If this is a leaf node aka article page, return itself
146
+ if self.article_path in current_url:
147
+ print("Found an article: ", current_url)
148
+ page.close()
149
+ return [current_url]
150
+
151
+ # Otherwise crawl this page and find all the articles linked from it
152
+ article_urls = []
153
+ links = []
154
+
155
+ for link_selector in self.link_selectors:
156
+ ahrefs = page.query_selector_all(link_selector)
157
+ links.extend(ahrefs)
158
+
159
+ for link in links:
160
+ url = root_url + page.evaluate("(node) => node.getAttribute('href')", link)
161
+ article_urls.extend(self.get_article_urls(browser, root_url, url))
162
+
163
+ page.close()
164
+
165
+ return article_urls
llama_index/readers/web/knowledge_base/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ playwright~=1.30
llama_index/readers/web/main_content_extractor/README.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MainContentExtractor Website Loader
2
+
3
+ This loader is a web scraper that fetches the text from static websites using the `MainContentExtractor` Python package.
4
+
5
+ For information on how to extract main content, README in the following github repository
6
+
7
+ [HawkClaws/main_content_extractor](https://github.com/HawkClaws/main_content_extractor)
8
+
9
+ ## Usage
10
+
11
+ To use this loader, you need to pass in an array of URLs.
12
+
13
+ ```python
14
+ from llama_index import download_loader
15
+
16
+ MainContentExtractorReader = download_loader("MainContentExtractorReader")
17
+
18
+ loader = MainContentExtractorReader()
19
+ documents = loader.load_data(urls=["https://google.com"])
20
+ ```
21
+
22
+ ## Examples
23
+
24
+ ### LlamaIndex
25
+
26
+ ```python
27
+ from llama_index import VectorStoreIndex, download_loader
28
+
29
+ MainContentExtractorReader = download_loader("MainContentExtractorReader")
30
+
31
+ loader = MainContentExtractorReader()
32
+ documents = loader.load_data(urls=["https://google.com"])
33
+ index = VectorStoreIndex.from_documents(documents)
34
+ index.query("What language is on this website?")
35
+ ```
36
+
37
+ ### LangChain
38
+
39
+ Note: Make sure you change the description of the `Tool` to match your use-case.
40
+
41
+ ```python
42
+ from llama_index import VectorStoreIndex, download_loader
43
+ from langchain.agents import initialize_agent, Tool
44
+ from langchain.llms import OpenAI
45
+ from langchain.chains.conversation.memory import ConversationBufferMemory
46
+
47
+ MainContentExtractorReader = download_loader("MainContentExtractorReader")
48
+
49
+ loader = MainContentExtractorReader()
50
+ documents = loader.load_data(urls=["https://google.com"])
51
+ index = VectorStoreIndex.from_documents(documents)
52
+
53
+ tools = [
54
+ Tool(
55
+ name="Website Index",
56
+ func=lambda q: index.query(q),
57
+ description=f"Useful when you want answer questions about the text on websites.",
58
+ ),
59
+ ]
60
+ llm = OpenAI(temperature=0)
61
+ memory = ConversationBufferMemory(memory_key="chat_history")
62
+ agent_chain = initialize_agent(
63
+ tools, llm, agent="zero-shot-react-description", memory=memory
64
+ )
65
+
66
+ output = agent_chain.run(input="What language is on this website?")
67
+ ```
llama_index/readers/web/main_content_extractor/__init__.py ADDED
File without changes
llama_index/readers/web/main_content_extractor/base.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ import requests
4
+ from llama_index.core.readers.base import BaseReader
5
+ from llama_index.core.schema import Document
6
+
7
+
8
+ class MainContentExtractorReader(BaseReader):
9
+ """MainContentExtractor web page reader.
10
+
11
+ Reads pages from the web.
12
+
13
+ Args:
14
+ text_format (str, optional): The format of the text. Defaults to "markdown".
15
+ Requires `MainContentExtractor` package.
16
+
17
+ """
18
+
19
+ def __init__(self, text_format: str = "markdown") -> None:
20
+ """Initialize with parameters."""
21
+ self.text_format = text_format
22
+
23
+ def load_data(self, urls: List[str]) -> List[Document]:
24
+ """Load data from the input directory.
25
+
26
+ Args:
27
+ urls (List[str]): List of URLs to scrape.
28
+
29
+ Returns:
30
+ List[Document]: List of documents.
31
+
32
+ """
33
+ if not isinstance(urls, list):
34
+ raise ValueError("urls must be a list of strings.")
35
+
36
+ from main_content_extractor import MainContentExtractor
37
+
38
+ documents = []
39
+ for url in urls:
40
+ response = requests.get(url).text
41
+ response = MainContentExtractor.extract(
42
+ response, output_format=self.text_format, include_links=False
43
+ )
44
+
45
+ documents.append(Document(text=response))
46
+
47
+ return documents
llama_index/readers/web/main_content_extractor/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ MainContentExtractor
llama_index/readers/web/news/README.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # News Article Loader
2
+
3
+ This loader makes use of the `newspaper3k` library to parse web page urls which have news
4
+ articles in them.
5
+
6
+ ## Usage
7
+
8
+ ```
9
+ pip install newspaper3k
10
+ ```
11
+
12
+ Pass in an array of individual page URLs:
13
+
14
+ ```python
15
+ from llama_index.readers.web.news import NewsArticleReader
16
+
17
+ reader = NewsArticleReader(use_nlp=False)
18
+ documents = reader.load_data(
19
+ [
20
+ "https://www.cnbc.com/2023/08/03/amazon-amzn-q2-earnings-report-2023.html",
21
+ "https://www.theverge.com/2023/8/3/23818388/brave-search-image-video-results-privacy-index",
22
+ ]
23
+ )
24
+ ```
llama_index/readers/web/news/__init__.py ADDED
File without changes
llama_index/readers/web/news/base.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """News article reader using Newspaper."""
2
+ import logging
3
+ from importlib.util import find_spec
4
+ from typing import Any, Generator, List
5
+
6
+ from llama_index.core.readers.base import BaseReader
7
+ from llama_index.core.schema import Document
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ class NewsArticleReader(BaseReader):
13
+ """Simple news article reader.
14
+
15
+ Reads news articles from the web and parses them using the `newspaper` library.
16
+
17
+ Args:
18
+ text_mode (bool): Whether to load a text version or HTML version of the content (default=True).
19
+ use_nlp (bool): Whether to use NLP to extract additional summary and keywords (default=True).
20
+ newspaper_kwargs: Additional keyword arguments to pass to newspaper.Article. See
21
+ https://newspaper.readthedocs.io/en/latest/user_guide/quickstart.html#article
22
+ """
23
+
24
+ def __init__(
25
+ self, text_mode: bool = True, use_nlp: bool = True, **newspaper_kwargs: Any
26
+ ) -> None:
27
+ """Initialize with parameters."""
28
+ if find_spec("newspaper") is None:
29
+ raise ImportError(
30
+ "`newspaper` package not found, please run `pip install newspaper3k`"
31
+ )
32
+ self.load_text = text_mode
33
+ self.use_nlp = use_nlp
34
+ self.newspaper_kwargs = newspaper_kwargs
35
+
36
+ def load_data(self, urls: List[str]) -> List[Document]:
37
+ """Load data from the list of news article urls.
38
+
39
+ Args:
40
+ urls (List[str]): List of URLs to load news articles.
41
+
42
+ Returns:
43
+ List[Document]: List of documents.
44
+
45
+ """
46
+ if not isinstance(urls, list) and not isinstance(urls, Generator):
47
+ raise ValueError("urls must be a list or generator.")
48
+ documents = []
49
+ for url in urls:
50
+ from newspaper import Article
51
+
52
+ try:
53
+ article = Article(url, **self.newspaper_kwargs)
54
+ article.download()
55
+ article.parse()
56
+
57
+ if self.use_nlp:
58
+ article.nlp()
59
+
60
+ except Exception as e:
61
+ logger.error(f"Error fetching or processing {url}, exception: {e}")
62
+ continue
63
+
64
+ metadata = {
65
+ "title": getattr(article, "title", ""),
66
+ "link": getattr(article, "url", getattr(article, "canonical_link", "")),
67
+ "authors": getattr(article, "authors", []),
68
+ "language": getattr(article, "meta_lang", ""),
69
+ "description": getattr(article, "meta_description", ""),
70
+ "publish_date": getattr(article, "publish_date", ""),
71
+ }
72
+
73
+ if self.load_text:
74
+ content = article.text
75
+ else:
76
+ content = article.html
77
+
78
+ if self.use_nlp:
79
+ metadata["keywords"] = getattr(article, "keywords", [])
80
+ metadata["summary"] = getattr(article, "summary", "")
81
+
82
+ documents.append(Document(text=content, metadata=metadata))
83
+
84
+ return documents
85
+
86
+
87
+ if __name__ == "__main__":
88
+ reader = NewsArticleReader()
89
+ article = reader.load_data(["https://www.bbc.com/news/world-us-canada-56797998"])
90
+ print(article)
llama_index/readers/web/news/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ newspaper3k
llama_index/readers/web/readability_web/README.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Readability Webpage Loader
2
+
3
+ Extracting relevant information from a fully rendered web page.
4
+ During the processing, it is always assumed that web pages used as data sources contain textual content.
5
+
6
+ It is particularly effective for websites that use client-side rendering.
7
+
8
+ 1. Load the page and wait for it rendered. (playwright)
9
+ 2. Inject Readability.js to extract the main content.
10
+
11
+ ## Usage
12
+
13
+ To use this loader, you need to pass in a single of URL.
14
+
15
+ ```python
16
+ from llama_index import download_loader
17
+
18
+ ReadabilityWebPageReader = download_loader("ReadabilityWebPageReader")
19
+
20
+ # or set proxy server for playwright: loader = ReadabilityWebPageReader(proxy="http://your-proxy-server:port")
21
+ # For some specific web pages, you may need to set "wait_until" to "networkidle". loader = ReadabilityWebPageReader(wait_until="networkidle")
22
+ loader = ReadabilityWebPageReader()
23
+
24
+ documents = loader.load_data(
25
+ url="https://support.squarespace.com/hc/en-us/articles/206795137-Pages-and-content-basics"
26
+ )
27
+ ```
28
+
29
+ ## Examples
30
+
31
+ This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent.
32
+
33
+ ### LlamaIndex
34
+
35
+ ```python
36
+ from llama_index import download_loader
37
+
38
+ ReadabilityWebPageReader = download_loader("ReadabilityWebPageReader")
39
+
40
+ loader = ReadabilityWebPageReader()
41
+ documents = loader.load_data(
42
+ url="https://support.squarespace.com/hc/en-us/articles/206795137-Pages-and-content-basics"
43
+ )
44
+
45
+ index = VectorStoreIndex.from_documents(documents)
46
+ print(index.query("What is pages?"))
47
+ ```
48
+
49
+ ### LangChain
50
+
51
+ Note: Make sure you change the description of the `Tool` to match your use-case.
52
+
53
+ ```python
54
+ from llama_index import VectorStoreIndex, download_loader
55
+ from langchain.agents import initialize_agent, Tool
56
+ from langchain.llms import OpenAI
57
+ from langchain.chains.conversation.memory import ConversationBufferMemory
58
+
59
+ ReadabilityWebPageReader = download_loader("ReadabilityWebPageReader")
60
+
61
+ loader = ReadabilityWebPageReader()
62
+ documents = loader.load_data(
63
+ url="https://support.squarespace.com/hc/en-us/articles/206795137-Pages-and-content-basics"
64
+ )
65
+
66
+ index = VectorStoreIndex.from_documents(documents)
67
+
68
+ tools = [
69
+ Tool(
70
+ name="Website Index",
71
+ func=lambda q: index.query(q),
72
+ description=f"Useful when you want answer questions about the text on websites.",
73
+ ),
74
+ ]
75
+ llm = OpenAI(temperature=0)
76
+ memory = ConversationBufferMemory(memory_key="chat_history")
77
+ agent_chain = initialize_agent(
78
+ tools, llm, agent="zero-shot-react-description", memory=memory
79
+ )
80
+
81
+ output = agent_chain.run(input="What is pages?")
82
+ ```
llama_index/readers/web/readability_web/Readability.js ADDED
@@ -0,0 +1,2613 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) 2010 Arc90 Inc
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ /*
18
+ * This code is heavily based on Arc90's readability.js (1.7.1) script
19
+ * available at: http://code.google.com/p/arc90labs-readability
20
+ */
21
+
22
+ /**
23
+ * Public constructor.
24
+ * @param {HTMLDocument} doc The document to parse.
25
+ * @param {Object} options The options object.
26
+ */
27
+ function Readability(doc, options) {
28
+ // In some older versions, people passed a URI as the first argument. Cope:
29
+ if (options && options.documentElement) {
30
+ doc = options;
31
+ options = arguments[2];
32
+ } else if (!doc || !doc.documentElement) {
33
+ throw new Error(
34
+ "First argument to Readability constructor should be a document object.",
35
+ );
36
+ }
37
+ options = options || {};
38
+
39
+ this._doc = doc;
40
+ this._docJSDOMParser = this._doc.firstChild.__JSDOMParser__;
41
+ this._articleTitle = null;
42
+ this._articleByline = null;
43
+ this._articleDir = null;
44
+ this._articleSiteName = null;
45
+ this._attempts = [];
46
+
47
+ // Configurable options
48
+ this._debug = !!options.debug;
49
+ this._maxElemsToParse =
50
+ options.maxElemsToParse || this.DEFAULT_MAX_ELEMS_TO_PARSE;
51
+ this._nbTopCandidates =
52
+ options.nbTopCandidates || this.DEFAULT_N_TOP_CANDIDATES;
53
+ this._charThreshold = options.charThreshold || this.DEFAULT_CHAR_THRESHOLD;
54
+ this._classesToPreserve = this.CLASSES_TO_PRESERVE.concat(
55
+ options.classesToPreserve || [],
56
+ );
57
+ this._keepClasses = !!options.keepClasses;
58
+ this._serializer =
59
+ options.serializer ||
60
+ function (el) {
61
+ return el.innerHTML;
62
+ };
63
+ this._disableJSONLD = !!options.disableJSONLD;
64
+ this._allowedVideoRegex = options.allowedVideoRegex || this.REGEXPS.videos;
65
+
66
+ // Start with all flags set
67
+ this._flags =
68
+ this.FLAG_STRIP_UNLIKELYS |
69
+ this.FLAG_WEIGHT_CLASSES |
70
+ this.FLAG_CLEAN_CONDITIONALLY;
71
+
72
+ // Control whether log messages are sent to the console
73
+ if (this._debug) {
74
+ let logNode = function (node) {
75
+ if (node.nodeType == node.TEXT_NODE) {
76
+ return `${node.nodeName} ("${node.textContent}")`;
77
+ }
78
+ let attrPairs = Array.from(node.attributes || [], function (attr) {
79
+ return `${attr.name}="${attr.value}"`;
80
+ }).join(" ");
81
+ return `<${node.localName} ${attrPairs}>`;
82
+ };
83
+ this.log = function () {
84
+ if (typeof console !== "undefined") {
85
+ let args = Array.from(arguments, (arg) => {
86
+ if (arg && arg.nodeType == this.ELEMENT_NODE) {
87
+ return logNode(arg);
88
+ }
89
+ return arg;
90
+ });
91
+ args.unshift("Reader: (Readability)");
92
+ console.log.apply(console, args);
93
+ } else if (typeof dump !== "undefined") {
94
+ /* global dump */
95
+ var msg = Array.prototype.map
96
+ .call(arguments, function (x) {
97
+ return x && x.nodeName ? logNode(x) : x;
98
+ })
99
+ .join(" ");
100
+ dump("Reader: (Readability) " + msg + "\n");
101
+ }
102
+ };
103
+ } else {
104
+ this.log = function () {};
105
+ }
106
+ }
107
+
108
+ Readability.prototype = {
109
+ FLAG_STRIP_UNLIKELYS: 0x1,
110
+ FLAG_WEIGHT_CLASSES: 0x2,
111
+ FLAG_CLEAN_CONDITIONALLY: 0x4,
112
+
113
+ // https://developer.mozilla.org/en-US/docs/Web/API/Node/nodeType
114
+ ELEMENT_NODE: 1,
115
+ TEXT_NODE: 3,
116
+
117
+ // Max number of nodes supported by this parser. Default: 0 (no limit)
118
+ DEFAULT_MAX_ELEMS_TO_PARSE: 0,
119
+
120
+ // The number of top candidates to consider when analysing how
121
+ // tight the competition is among candidates.
122
+ DEFAULT_N_TOP_CANDIDATES: 5,
123
+
124
+ // Element tags to score by default.
125
+ DEFAULT_TAGS_TO_SCORE: "section,h2,h3,h4,h5,h6,p,td,pre"
126
+ .toUpperCase()
127
+ .split(","),
128
+
129
+ // The default number of chars an article must have in order to return a result
130
+ DEFAULT_CHAR_THRESHOLD: 500,
131
+
132
+ // All of the regular expressions in use within readability.
133
+ // Defined up here so we don't instantiate them repeatedly in loops.
134
+ REGEXPS: {
135
+ // NOTE: These two regular expressions are duplicated in
136
+ // Readability-readerable.js. Please keep both copies in sync.
137
+ unlikelyCandidates:
138
+ /-ad-|ai2html|banner|breadcrumbs|combx|comment|community|cover-wrap|disqus|extra|footer|gdpr|header|legends|menu|related|remark|replies|rss|shoutbox|sidebar|skyscraper|social|sponsor|supplemental|ad-break|agegate|pagination|pager|popup|yom-remote/i,
139
+ okMaybeItsACandidate: /and|article|body|column|content|main|shadow/i,
140
+
141
+ positive:
142
+ /article|body|content|entry|hentry|h-entry|main|page|pagination|post|text|blog|story/i,
143
+ negative:
144
+ /-ad-|hidden|^hid$| hid$| hid |^hid |banner|combx|comment|com-|contact|foot|footer|footnote|gdpr|masthead|media|meta|outbrain|promo|related|scroll|share|shoutbox|sidebar|skyscraper|sponsor|shopping|tags|tool|widget/i,
145
+ extraneous:
146
+ /print|archive|comment|discuss|e[\-]?mail|share|reply|all|login|sign|single|utility/i,
147
+ byline: /byline|author|dateline|writtenby|p-author/i,
148
+ replaceFonts: /<(\/?)font[^>]*>/gi,
149
+ normalize: /\s{2,}/g,
150
+ videos:
151
+ /\/\/(www\.)?((dailymotion|youtube|youtube-nocookie|player\.vimeo|v\.qq)\.com|(archive|upload\.wikimedia)\.org|player\.twitch\.tv)/i,
152
+ shareElements: /(\b|_)(share|sharedaddy)(\b|_)/i,
153
+ nextLink: /(next|weiter|continue|>([^\|]|$)|»([^\|]|$))/i,
154
+ prevLink: /(prev|earl|old|new|<|«)/i,
155
+ tokenize: /\W+/g,
156
+ whitespace: /^\s*$/,
157
+ hasContent: /\S$/,
158
+ hashUrl: /^#.+/,
159
+ srcsetUrl: /(\S+)(\s+[\d.]+[xw])?(\s*(?:,|$))/g,
160
+ b64DataUrl: /^data:\s*([^\s;,]+)\s*;\s*base64\s*,/i,
161
+ // See: https://schema.org/Article
162
+ jsonLdArticleTypes:
163
+ /^Article|AdvertiserContentArticle|NewsArticle|AnalysisNewsArticle|AskPublicNewsArticle|BackgroundNewsArticle|OpinionNewsArticle|ReportageNewsArticle|ReviewNewsArticle|Report|SatiricalArticle|ScholarlyArticle|MedicalScholarlyArticle|SocialMediaPosting|BlogPosting|LiveBlogPosting|DiscussionForumPosting|TechArticle|APIReference$/,
164
+ },
165
+
166
+ UNLIKELY_ROLES: [
167
+ "menu",
168
+ "menubar",
169
+ "complementary",
170
+ "navigation",
171
+ "alert",
172
+ "alertdialog",
173
+ "dialog",
174
+ ],
175
+
176
+ DIV_TO_P_ELEMS: new Set([
177
+ "BLOCKQUOTE",
178
+ "DL",
179
+ "DIV",
180
+ "IMG",
181
+ "OL",
182
+ "P",
183
+ "PRE",
184
+ "TABLE",
185
+ "UL",
186
+ ]),
187
+
188
+ ALTER_TO_DIV_EXCEPTIONS: ["DIV", "ARTICLE", "SECTION", "P"],
189
+
190
+ PRESENTATIONAL_ATTRIBUTES: [
191
+ "align",
192
+ "background",
193
+ "bgcolor",
194
+ "border",
195
+ "cellpadding",
196
+ "cellspacing",
197
+ "frame",
198
+ "hspace",
199
+ "rules",
200
+ "style",
201
+ "valign",
202
+ "vspace",
203
+ ],
204
+
205
+ DEPRECATED_SIZE_ATTRIBUTE_ELEMS: ["TABLE", "TH", "TD", "HR", "PRE"],
206
+
207
+ // The commented out elements qualify as phrasing content but tend to be
208
+ // removed by readability when put into paragraphs, so we ignore them here.
209
+ PHRASING_ELEMS: [
210
+ // "CANVAS", "IFRAME", "SVG", "VIDEO",
211
+ "ABBR",
212
+ "AUDIO",
213
+ "B",
214
+ "BDO",
215
+ "BR",
216
+ "BUTTON",
217
+ "CITE",
218
+ "CODE",
219
+ "DATA",
220
+ "DATALIST",
221
+ "DFN",
222
+ "EM",
223
+ "EMBED",
224
+ "I",
225
+ "IMG",
226
+ "INPUT",
227
+ "KBD",
228
+ "LABEL",
229
+ "MARK",
230
+ "MATH",
231
+ "METER",
232
+ "NOSCRIPT",
233
+ "OBJECT",
234
+ "OUTPUT",
235
+ "PROGRESS",
236
+ "Q",
237
+ "RUBY",
238
+ "SAMP",
239
+ "SCRIPT",
240
+ "SELECT",
241
+ "SMALL",
242
+ "SPAN",
243
+ "STRONG",
244
+ "SUB",
245
+ "SUP",
246
+ "TEXTAREA",
247
+ "TIME",
248
+ "VAR",
249
+ "WBR",
250
+ ],
251
+
252
+ // These are the classes that readability sets itself.
253
+ CLASSES_TO_PRESERVE: ["page"],
254
+
255
+ // These are the list of HTML entities that need to be escaped.
256
+ HTML_ESCAPE_MAP: {
257
+ lt: "<",
258
+ gt: ">",
259
+ amp: "&",
260
+ quot: '"',
261
+ apos: "'",
262
+ },
263
+
264
+ /**
265
+ * Run any post-process modifications to article content as necessary.
266
+ *
267
+ * @param Element
268
+ * @return void
269
+ **/
270
+ _postProcessContent: function (articleContent) {
271
+ // Readability cannot open relative uris so we convert them to absolute uris.
272
+ this._fixRelativeUris(articleContent);
273
+
274
+ this._simplifyNestedElements(articleContent);
275
+
276
+ if (!this._keepClasses) {
277
+ // Remove classes.
278
+ this._cleanClasses(articleContent);
279
+ }
280
+ },
281
+
282
+ /**
283
+ * Iterates over a NodeList, calls `filterFn` for each node and removes node
284
+ * if function returned `true`.
285
+ *
286
+ * If function is not passed, removes all the nodes in node list.
287
+ *
288
+ * @param NodeList nodeList The nodes to operate on
289
+ * @param Function filterFn the function to use as a filter
290
+ * @return void
291
+ */
292
+ _removeNodes: function (nodeList, filterFn) {
293
+ // Avoid ever operating on live node lists.
294
+ if (this._docJSDOMParser && nodeList._isLiveNodeList) {
295
+ throw new Error("Do not pass live node lists to _removeNodes");
296
+ }
297
+ for (var i = nodeList.length - 1; i >= 0; i--) {
298
+ var node = nodeList[i];
299
+ var parentNode = node.parentNode;
300
+ if (parentNode) {
301
+ if (!filterFn || filterFn.call(this, node, i, nodeList)) {
302
+ parentNode.removeChild(node);
303
+ }
304
+ }
305
+ }
306
+ },
307
+
308
+ /**
309
+ * Iterates over a NodeList, and calls _setNodeTag for each node.
310
+ *
311
+ * @param NodeList nodeList The nodes to operate on
312
+ * @param String newTagName the new tag name to use
313
+ * @return void
314
+ */
315
+ _replaceNodeTags: function (nodeList, newTagName) {
316
+ // Avoid ever operating on live node lists.
317
+ if (this._docJSDOMParser && nodeList._isLiveNodeList) {
318
+ throw new Error("Do not pass live node lists to _replaceNodeTags");
319
+ }
320
+ for (const node of nodeList) {
321
+ this._setNodeTag(node, newTagName);
322
+ }
323
+ },
324
+
325
+ /**
326
+ * Iterate over a NodeList, which doesn't natively fully implement the Array
327
+ * interface.
328
+ *
329
+ * For convenience, the current object context is applied to the provided
330
+ * iterate function.
331
+ *
332
+ * @param NodeList nodeList The NodeList.
333
+ * @param Function fn The iterate function.
334
+ * @return void
335
+ */
336
+ _forEachNode: function (nodeList, fn) {
337
+ Array.prototype.forEach.call(nodeList, fn, this);
338
+ },
339
+
340
+ /**
341
+ * Iterate over a NodeList, and return the first node that passes
342
+ * the supplied test function
343
+ *
344
+ * For convenience, the current object context is applied to the provided
345
+ * test function.
346
+ *
347
+ * @param NodeList nodeList The NodeList.
348
+ * @param Function fn The test function.
349
+ * @return void
350
+ */
351
+ _findNode: function (nodeList, fn) {
352
+ return Array.prototype.find.call(nodeList, fn, this);
353
+ },
354
+
355
+ /**
356
+ * Iterate over a NodeList, return true if any of the provided iterate
357
+ * function calls returns true, false otherwise.
358
+ *
359
+ * For convenience, the current object context is applied to the
360
+ * provided iterate function.
361
+ *
362
+ * @param NodeList nodeList The NodeList.
363
+ * @param Function fn The iterate function.
364
+ * @return Boolean
365
+ */
366
+ _someNode: function (nodeList, fn) {
367
+ return Array.prototype.some.call(nodeList, fn, this);
368
+ },
369
+
370
+ /**
371
+ * Iterate over a NodeList, return true if all of the provided iterate
372
+ * function calls return true, false otherwise.
373
+ *
374
+ * For convenience, the current object context is applied to the
375
+ * provided iterate function.
376
+ *
377
+ * @param NodeList nodeList The NodeList.
378
+ * @param Function fn The iterate function.
379
+ * @return Boolean
380
+ */
381
+ _everyNode: function (nodeList, fn) {
382
+ return Array.prototype.every.call(nodeList, fn, this);
383
+ },
384
+
385
+ /**
386
+ * Concat all nodelists passed as arguments.
387
+ *
388
+ * @return ...NodeList
389
+ * @return Array
390
+ */
391
+ _concatNodeLists: function () {
392
+ var slice = Array.prototype.slice;
393
+ var args = slice.call(arguments);
394
+ var nodeLists = args.map(function (list) {
395
+ return slice.call(list);
396
+ });
397
+ return Array.prototype.concat.apply([], nodeLists);
398
+ },
399
+
400
+ _getAllNodesWithTag: function (node, tagNames) {
401
+ if (node.querySelectorAll) {
402
+ return node.querySelectorAll(tagNames.join(","));
403
+ }
404
+ return [].concat.apply(
405
+ [],
406
+ tagNames.map(function (tag) {
407
+ var collection = node.getElementsByTagName(tag);
408
+ return Array.isArray(collection) ? collection : Array.from(collection);
409
+ }),
410
+ );
411
+ },
412
+
413
+ /**
414
+ * Removes the class="" attribute from every element in the given
415
+ * subtree, except those that match CLASSES_TO_PRESERVE and
416
+ * the classesToPreserve array from the options object.
417
+ *
418
+ * @param Element
419
+ * @return void
420
+ */
421
+ _cleanClasses: function (node) {
422
+ var classesToPreserve = this._classesToPreserve;
423
+ var className = (node.getAttribute("class") || "")
424
+ .split(/\s+/)
425
+ .filter(function (cls) {
426
+ return classesToPreserve.indexOf(cls) != -1;
427
+ })
428
+ .join(" ");
429
+
430
+ if (className) {
431
+ node.setAttribute("class", className);
432
+ } else {
433
+ node.removeAttribute("class");
434
+ }
435
+
436
+ for (node = node.firstElementChild; node; node = node.nextElementSibling) {
437
+ this._cleanClasses(node);
438
+ }
439
+ },
440
+
441
+ /**
442
+ * Converts each <a> and <img> uri in the given element to an absolute URI,
443
+ * ignoring #ref URIs.
444
+ *
445
+ * @param Element
446
+ * @return void
447
+ */
448
+ _fixRelativeUris: function (articleContent) {
449
+ var baseURI = this._doc.baseURI;
450
+ var documentURI = this._doc.documentURI;
451
+ function toAbsoluteURI(uri) {
452
+ // Leave hash links alone if the base URI matches the document URI:
453
+ if (baseURI == documentURI && uri.charAt(0) == "#") {
454
+ return uri;
455
+ }
456
+
457
+ // Otherwise, resolve against base URI:
458
+ try {
459
+ return new URL(uri, baseURI).href;
460
+ } catch (ex) {
461
+ // Something went wrong, just return the original:
462
+ }
463
+ return uri;
464
+ }
465
+
466
+ var links = this._getAllNodesWithTag(articleContent, ["a"]);
467
+ this._forEachNode(links, function (link) {
468
+ var href = link.getAttribute("href");
469
+ if (href) {
470
+ // Remove links with javascript: URIs, since
471
+ // they won't work after scripts have been removed from the page.
472
+ if (href.indexOf("javascript:") === 0) {
473
+ // if the link only contains simple text content, it can be converted to a text node
474
+ if (
475
+ link.childNodes.length === 1 &&
476
+ link.childNodes[0].nodeType === this.TEXT_NODE
477
+ ) {
478
+ var text = this._doc.createTextNode(link.textContent);
479
+ link.parentNode.replaceChild(text, link);
480
+ } else {
481
+ // if the link has multiple children, they should all be preserved
482
+ var container = this._doc.createElement("span");
483
+ while (link.firstChild) {
484
+ container.appendChild(link.firstChild);
485
+ }
486
+ link.parentNode.replaceChild(container, link);
487
+ }
488
+ } else {
489
+ link.setAttribute("href", toAbsoluteURI(href));
490
+ }
491
+ }
492
+ });
493
+
494
+ var mediaTypes = this._getAllNodesWithTag(articleContent, [
495
+ "img",
496
+ "picture",
497
+ "figure",
498
+ "video",
499
+ "audio",
500
+ "source",
501
+ ]);
502
+
503
+ this._forEachNode(mediaTypes, function (media) {
504
+ var src = media.getAttribute("src");
505
+ var poster = media.getAttribute("poster");
506
+ var srcset = media.getAttribute("srcset");
507
+
508
+ if (src) {
509
+ media.setAttribute("src", toAbsoluteURI(src));
510
+ }
511
+
512
+ if (poster) {
513
+ media.setAttribute("poster", toAbsoluteURI(poster));
514
+ }
515
+
516
+ if (srcset) {
517
+ var newSrcset = srcset.replace(
518
+ this.REGEXPS.srcsetUrl,
519
+ function (_, p1, p2, p3) {
520
+ return toAbsoluteURI(p1) + (p2 || "") + p3;
521
+ },
522
+ );
523
+
524
+ media.setAttribute("srcset", newSrcset);
525
+ }
526
+ });
527
+ },
528
+
529
+ _simplifyNestedElements: function (articleContent) {
530
+ var node = articleContent;
531
+
532
+ while (node) {
533
+ if (
534
+ node.parentNode &&
535
+ ["DIV", "SECTION"].includes(node.tagName) &&
536
+ !(node.id && node.id.startsWith("readability"))
537
+ ) {
538
+ if (this._isElementWithoutContent(node)) {
539
+ node = this._removeAndGetNext(node);
540
+ continue;
541
+ } else if (
542
+ this._hasSingleTagInsideElement(node, "DIV") ||
543
+ this._hasSingleTagInsideElement(node, "SECTION")
544
+ ) {
545
+ var child = node.children[0];
546
+ for (var i = 0; i < node.attributes.length; i++) {
547
+ child.setAttribute(
548
+ node.attributes[i].name,
549
+ node.attributes[i].value,
550
+ );
551
+ }
552
+ node.parentNode.replaceChild(child, node);
553
+ node = child;
554
+ continue;
555
+ }
556
+ }
557
+
558
+ node = this._getNextNode(node);
559
+ }
560
+ },
561
+
562
+ /**
563
+ * Get the article title as an H1.
564
+ *
565
+ * @return string
566
+ **/
567
+ _getArticleTitle: function () {
568
+ var doc = this._doc;
569
+ var curTitle = "";
570
+ var origTitle = "";
571
+
572
+ try {
573
+ curTitle = origTitle = doc.title.trim();
574
+
575
+ // If they had an element with id "title" in their HTML
576
+ if (typeof curTitle !== "string")
577
+ curTitle = origTitle = this._getInnerText(
578
+ doc.getElementsByTagName("title")[0],
579
+ );
580
+ } catch (e) {
581
+ /* ignore exceptions setting the title. */
582
+ }
583
+
584
+ var titleHadHierarchicalSeparators = false;
585
+ function wordCount(str) {
586
+ return str.split(/\s+/).length;
587
+ }
588
+
589
+ // If there's a separator in the title, first remove the final part
590
+ if (/ [\|\-\\\/>»] /.test(curTitle)) {
591
+ titleHadHierarchicalSeparators = / [\\\/>»] /.test(curTitle);
592
+ curTitle = origTitle.replace(/(.*)[\|\-\\\/>»] .*/gi, "$1");
593
+
594
+ // If the resulting title is too short (3 words or fewer), remove
595
+ // the first part instead:
596
+ if (wordCount(curTitle) < 3)
597
+ curTitle = origTitle.replace(/[^\|\-\\\/>»]*[\|\-\\\/>»](.*)/gi, "$1");
598
+ } else if (curTitle.indexOf(": ") !== -1) {
599
+ // Check if we have an heading containing this exact string, so we
600
+ // could assume it's the full title.
601
+ var headings = this._concatNodeLists(
602
+ doc.getElementsByTagName("h1"),
603
+ doc.getElementsByTagName("h2"),
604
+ );
605
+ var trimmedTitle = curTitle.trim();
606
+ var match = this._someNode(headings, function (heading) {
607
+ return heading.textContent.trim() === trimmedTitle;
608
+ });
609
+
610
+ // If we don't, let's extract the title out of the original title string.
611
+ if (!match) {
612
+ curTitle = origTitle.substring(origTitle.lastIndexOf(":") + 1);
613
+
614
+ // If the title is now too short, try the first colon instead:
615
+ if (wordCount(curTitle) < 3) {
616
+ curTitle = origTitle.substring(origTitle.indexOf(":") + 1);
617
+ // But if we have too many words before the colon there's something weird
618
+ // with the titles and the H tags so let's just use the original title instead
619
+ } else if (wordCount(origTitle.substr(0, origTitle.indexOf(":"))) > 5) {
620
+ curTitle = origTitle;
621
+ }
622
+ }
623
+ } else if (curTitle.length > 150 || curTitle.length < 15) {
624
+ var hOnes = doc.getElementsByTagName("h1");
625
+
626
+ if (hOnes.length === 1) curTitle = this._getInnerText(hOnes[0]);
627
+ }
628
+
629
+ curTitle = curTitle.trim().replace(this.REGEXPS.normalize, " ");
630
+ // If we now have 4 words or fewer as our title, and either no
631
+ // 'hierarchical' separators (\, /, > or ») were found in the original
632
+ // title or we decreased the number of words by more than 1 word, use
633
+ // the original title.
634
+ var curTitleWordCount = wordCount(curTitle);
635
+ if (
636
+ curTitleWordCount <= 4 &&
637
+ (!titleHadHierarchicalSeparators ||
638
+ curTitleWordCount !=
639
+ wordCount(origTitle.replace(/[\|\-\\\/>»]+/g, "")) - 1)
640
+ ) {
641
+ curTitle = origTitle;
642
+ }
643
+
644
+ return curTitle;
645
+ },
646
+
647
+ /**
648
+ * Prepare the HTML document for readability to scrape it.
649
+ * This includes things like stripping javascript, CSS, and handling terrible markup.
650
+ *
651
+ * @return void
652
+ **/
653
+ _prepDocument: function () {
654
+ var doc = this._doc;
655
+
656
+ // Remove all style tags in head
657
+ this._removeNodes(this._getAllNodesWithTag(doc, ["style"]));
658
+
659
+ if (doc.body) {
660
+ this._replaceBrs(doc.body);
661
+ }
662
+
663
+ this._replaceNodeTags(this._getAllNodesWithTag(doc, ["font"]), "SPAN");
664
+ },
665
+
666
+ /**
667
+ * Finds the next node, starting from the given node, and ignoring
668
+ * whitespace in between. If the given node is an element, the same node is
669
+ * returned.
670
+ */
671
+ _nextNode: function (node) {
672
+ var next = node;
673
+ while (
674
+ next &&
675
+ next.nodeType != this.ELEMENT_NODE &&
676
+ this.REGEXPS.whitespace.test(next.textContent)
677
+ ) {
678
+ next = next.nextSibling;
679
+ }
680
+ return next;
681
+ },
682
+
683
+ /**
684
+ * Replaces 2 or more successive <br> elements with a single <p>.
685
+ * Whitespace between <br> elements are ignored. For example:
686
+ * <div>foo<br>bar<br> <br><br>abc</div>
687
+ * will become:
688
+ * <div>foo<br>bar<p>abc</p></div>
689
+ */
690
+ _replaceBrs: function (elem) {
691
+ this._forEachNode(this._getAllNodesWithTag(elem, ["br"]), function (br) {
692
+ var next = br.nextSibling;
693
+
694
+ // Whether 2 or more <br> elements have been found and replaced with a
695
+ // <p> block.
696
+ var replaced = false;
697
+
698
+ // If we find a <br> chain, remove the <br>s until we hit another node
699
+ // or non-whitespace. This leaves behind the first <br> in the chain
700
+ // (which will be replaced with a <p> later).
701
+ while ((next = this._nextNode(next)) && next.tagName == "BR") {
702
+ replaced = true;
703
+ var brSibling = next.nextSibling;
704
+ next.parentNode.removeChild(next);
705
+ next = brSibling;
706
+ }
707
+
708
+ // If we removed a <br> chain, replace the remaining <br> with a <p>. Add
709
+ // all sibling nodes as children of the <p> until we hit another <br>
710
+ // chain.
711
+ if (replaced) {
712
+ var p = this._doc.createElement("p");
713
+ br.parentNode.replaceChild(p, br);
714
+
715
+ next = p.nextSibling;
716
+ while (next) {
717
+ // If we've hit another <br><br>, we're done adding children to this <p>.
718
+ if (next.tagName == "BR") {
719
+ var nextElem = this._nextNode(next.nextSibling);
720
+ if (nextElem && nextElem.tagName == "BR") break;
721
+ }
722
+
723
+ if (!this._isPhrasingContent(next)) break;
724
+
725
+ // Otherwise, make this node a child of the new <p>.
726
+ var sibling = next.nextSibling;
727
+ p.appendChild(next);
728
+ next = sibling;
729
+ }
730
+
731
+ while (p.lastChild && this._isWhitespace(p.lastChild)) {
732
+ p.removeChild(p.lastChild);
733
+ }
734
+
735
+ if (p.parentNode.tagName === "P") this._setNodeTag(p.parentNode, "DIV");
736
+ }
737
+ });
738
+ },
739
+
740
+ _setNodeTag: function (node, tag) {
741
+ this.log("_setNodeTag", node, tag);
742
+ if (this._docJSDOMParser) {
743
+ node.localName = tag.toLowerCase();
744
+ node.tagName = tag.toUpperCase();
745
+ return node;
746
+ }
747
+
748
+ var replacement = node.ownerDocument.createElement(tag);
749
+ while (node.firstChild) {
750
+ replacement.appendChild(node.firstChild);
751
+ }
752
+ node.parentNode.replaceChild(replacement, node);
753
+ if (node.readability) replacement.readability = node.readability;
754
+
755
+ for (var i = 0; i < node.attributes.length; i++) {
756
+ try {
757
+ replacement.setAttribute(
758
+ node.attributes[i].name,
759
+ node.attributes[i].value,
760
+ );
761
+ } catch (ex) {
762
+ /* it's possible for setAttribute() to throw if the attribute name
763
+ * isn't a valid XML Name. Such attributes can however be parsed from
764
+ * source in HTML docs, see https://github.com/whatwg/html/issues/4275,
765
+ * so we can hit them here and then throw. We don't care about such
766
+ * attributes so we ignore them.
767
+ */
768
+ }
769
+ }
770
+ return replacement;
771
+ },
772
+
773
+ /**
774
+ * Prepare the article node for display. Clean out any inline styles,
775
+ * iframes, forms, strip extraneous <p> tags, etc.
776
+ *
777
+ * @param Element
778
+ * @return void
779
+ **/
780
+ _prepArticle: function (articleContent) {
781
+ this._cleanStyles(articleContent);
782
+
783
+ // Check for data tables before we continue, to avoid removing items in
784
+ // those tables, which will often be isolated even though they're
785
+ // visually linked to other content-ful elements (text, images, etc.).
786
+ this._markDataTables(articleContent);
787
+
788
+ this._fixLazyImages(articleContent);
789
+
790
+ // Clean out junk from the article content
791
+ this._cleanConditionally(articleContent, "form");
792
+ this._cleanConditionally(articleContent, "fieldset");
793
+ this._clean(articleContent, "object");
794
+ this._clean(articleContent, "embed");
795
+ this._clean(articleContent, "footer");
796
+ this._clean(articleContent, "link");
797
+ this._clean(articleContent, "aside");
798
+
799
+ // Clean out elements with little content that have "share" in their id/class combinations from final top candidates,
800
+ // which means we don't remove the top candidates even they have "share".
801
+
802
+ var shareElementThreshold = this.DEFAULT_CHAR_THRESHOLD;
803
+
804
+ this._forEachNode(articleContent.children, function (topCandidate) {
805
+ this._cleanMatchedNodes(topCandidate, function (node, matchString) {
806
+ return (
807
+ this.REGEXPS.shareElements.test(matchString) &&
808
+ node.textContent.length < shareElementThreshold
809
+ );
810
+ });
811
+ });
812
+
813
+ this._clean(articleContent, "iframe");
814
+ this._clean(articleContent, "input");
815
+ this._clean(articleContent, "textarea");
816
+ this._clean(articleContent, "select");
817
+ this._clean(articleContent, "button");
818
+ this._cleanHeaders(articleContent);
819
+
820
+ // Do these last as the previous stuff may have removed junk
821
+ // that will affect these
822
+ this._cleanConditionally(articleContent, "table");
823
+ this._cleanConditionally(articleContent, "ul");
824
+ this._cleanConditionally(articleContent, "div");
825
+
826
+ // replace H1 with H2 as H1 should be only title that is displayed separately
827
+ this._replaceNodeTags(
828
+ this._getAllNodesWithTag(articleContent, ["h1"]),
829
+ "h2",
830
+ );
831
+
832
+ // Remove extra paragraphs
833
+ this._removeNodes(
834
+ this._getAllNodesWithTag(articleContent, ["p"]),
835
+ function (paragraph) {
836
+ var imgCount = paragraph.getElementsByTagName("img").length;
837
+ var embedCount = paragraph.getElementsByTagName("embed").length;
838
+ var objectCount = paragraph.getElementsByTagName("object").length;
839
+ // At this point, nasty iframes have been removed, only remain embedded video ones.
840
+ var iframeCount = paragraph.getElementsByTagName("iframe").length;
841
+ var totalCount = imgCount + embedCount + objectCount + iframeCount;
842
+
843
+ return totalCount === 0 && !this._getInnerText(paragraph, false);
844
+ },
845
+ );
846
+
847
+ this._forEachNode(
848
+ this._getAllNodesWithTag(articleContent, ["br"]),
849
+ function (br) {
850
+ var next = this._nextNode(br.nextSibling);
851
+ if (next && next.tagName == "P") br.parentNode.removeChild(br);
852
+ },
853
+ );
854
+
855
+ // Remove single-cell tables
856
+ this._forEachNode(
857
+ this._getAllNodesWithTag(articleContent, ["table"]),
858
+ function (table) {
859
+ var tbody = this._hasSingleTagInsideElement(table, "TBODY")
860
+ ? table.firstElementChild
861
+ : table;
862
+ if (this._hasSingleTagInsideElement(tbody, "TR")) {
863
+ var row = tbody.firstElementChild;
864
+ if (this._hasSingleTagInsideElement(row, "TD")) {
865
+ var cell = row.firstElementChild;
866
+ cell = this._setNodeTag(
867
+ cell,
868
+ this._everyNode(cell.childNodes, this._isPhrasingContent)
869
+ ? "P"
870
+ : "DIV",
871
+ );
872
+ table.parentNode.replaceChild(cell, table);
873
+ }
874
+ }
875
+ },
876
+ );
877
+ },
878
+
879
+ /**
880
+ * Initialize a node with the readability object. Also checks the
881
+ * className/id for special names to add to its score.
882
+ *
883
+ * @param Element
884
+ * @return void
885
+ **/
886
+ _initializeNode: function (node) {
887
+ node.readability = { contentScore: 0 };
888
+
889
+ switch (node.tagName) {
890
+ case "DIV":
891
+ node.readability.contentScore += 5;
892
+ break;
893
+
894
+ case "PRE":
895
+ case "TD":
896
+ case "BLOCKQUOTE":
897
+ node.readability.contentScore += 3;
898
+ break;
899
+
900
+ case "ADDRESS":
901
+ case "OL":
902
+ case "UL":
903
+ case "DL":
904
+ case "DD":
905
+ case "DT":
906
+ case "LI":
907
+ case "FORM":
908
+ node.readability.contentScore -= 3;
909
+ break;
910
+
911
+ case "H1":
912
+ case "H2":
913
+ case "H3":
914
+ case "H4":
915
+ case "H5":
916
+ case "H6":
917
+ case "TH":
918
+ node.readability.contentScore -= 5;
919
+ break;
920
+ }
921
+
922
+ node.readability.contentScore += this._getClassWeight(node);
923
+ },
924
+
925
+ _removeAndGetNext: function (node) {
926
+ var nextNode = this._getNextNode(node, true);
927
+ node.parentNode.removeChild(node);
928
+ return nextNode;
929
+ },
930
+
931
+ /**
932
+ * Traverse the DOM from node to node, starting at the node passed in.
933
+ * Pass true for the second parameter to indicate this node itself
934
+ * (and its kids) are going away, and we want the next node over.
935
+ *
936
+ * Calling this in a loop will traverse the DOM depth-first.
937
+ */
938
+ _getNextNode: function (node, ignoreSelfAndKids) {
939
+ // First check for kids if those aren't being ignored
940
+ if (!ignoreSelfAndKids && node.firstElementChild) {
941
+ return node.firstElementChild;
942
+ }
943
+ // Then for siblings...
944
+ if (node.nextElementSibling) {
945
+ return node.nextElementSibling;
946
+ }
947
+ // And finally, move up the parent chain *and* find a sibling
948
+ // (because this is depth-first traversal, we will have already
949
+ // seen the parent nodes themselves).
950
+ do {
951
+ node = node.parentNode;
952
+ } while (node && !node.nextElementSibling);
953
+ return node && node.nextElementSibling;
954
+ },
955
+
956
+ // compares second text to first one
957
+ // 1 = same text, 0 = completely different text
958
+ // works the way that it splits both texts into words and then finds words that are unique in second text
959
+ // the result is given by the lower length of unique parts
960
+ _textSimilarity: function (textA, textB) {
961
+ var tokensA = textA
962
+ .toLowerCase()
963
+ .split(this.REGEXPS.tokenize)
964
+ .filter(Boolean);
965
+ var tokensB = textB
966
+ .toLowerCase()
967
+ .split(this.REGEXPS.tokenize)
968
+ .filter(Boolean);
969
+ if (!tokensA.length || !tokensB.length) {
970
+ return 0;
971
+ }
972
+ var uniqTokensB = tokensB.filter((token) => !tokensA.includes(token));
973
+ var distanceB = uniqTokensB.join(" ").length / tokensB.join(" ").length;
974
+ return 1 - distanceB;
975
+ },
976
+
977
+ _checkByline: function (node, matchString) {
978
+ if (this._articleByline) {
979
+ return false;
980
+ }
981
+
982
+ if (node.getAttribute !== undefined) {
983
+ var rel = node.getAttribute("rel");
984
+ var itemprop = node.getAttribute("itemprop");
985
+ }
986
+
987
+ if (
988
+ (rel === "author" ||
989
+ (itemprop && itemprop.indexOf("author") !== -1) ||
990
+ this.REGEXPS.byline.test(matchString)) &&
991
+ this._isValidByline(node.textContent)
992
+ ) {
993
+ this._articleByline = node.textContent.trim();
994
+ return true;
995
+ }
996
+
997
+ return false;
998
+ },
999
+
1000
+ _getNodeAncestors: function (node, maxDepth) {
1001
+ maxDepth = maxDepth || 0;
1002
+ var i = 0,
1003
+ ancestors = [];
1004
+ while (node.parentNode) {
1005
+ ancestors.push(node.parentNode);
1006
+ if (maxDepth && ++i === maxDepth) break;
1007
+ node = node.parentNode;
1008
+ }
1009
+ return ancestors;
1010
+ },
1011
+
1012
+ /***
1013
+ * grabArticle - Using a variety of metrics (content score, classname, element types), find the content that is
1014
+ * most likely to be the stuff a user wants to read. Then return it wrapped up in a div.
1015
+ *
1016
+ * @param page a document to run upon. Needs to be a full document, complete with body.
1017
+ * @return Element
1018
+ **/
1019
+ _grabArticle: function (page) {
1020
+ this.log("**** grabArticle ****");
1021
+ var doc = this._doc;
1022
+ var isPaging = page !== null;
1023
+ page = page ? page : this._doc.body;
1024
+
1025
+ // We can't grab an article if we don't have a page!
1026
+ if (!page) {
1027
+ this.log("No body found in document. Abort.");
1028
+ return null;
1029
+ }
1030
+
1031
+ var pageCacheHtml = page.innerHTML;
1032
+
1033
+ while (true) {
1034
+ this.log("Starting grabArticle loop");
1035
+ var stripUnlikelyCandidates = this._flagIsActive(
1036
+ this.FLAG_STRIP_UNLIKELYS,
1037
+ );
1038
+
1039
+ // First, node prepping. Trash nodes that look cruddy (like ones with the
1040
+ // class name "comment", etc), and turn divs into P tags where they have been
1041
+ // used inappropriately (as in, where they contain no other block level elements.)
1042
+ var elementsToScore = [];
1043
+ var node = this._doc.documentElement;
1044
+
1045
+ let shouldRemoveTitleHeader = true;
1046
+
1047
+ while (node) {
1048
+ if (node.tagName === "HTML") {
1049
+ this._articleLang = node.getAttribute("lang");
1050
+ }
1051
+
1052
+ var matchString = node.className + " " + node.id;
1053
+
1054
+ if (!this._isProbablyVisible(node)) {
1055
+ this.log("Removing hidden node - " + matchString);
1056
+ node = this._removeAndGetNext(node);
1057
+ continue;
1058
+ }
1059
+
1060
+ // User is not able to see elements applied with both "aria-modal = true" and "role = dialog"
1061
+ if (
1062
+ node.getAttribute("aria-modal") == "true" &&
1063
+ node.getAttribute("role") == "dialog"
1064
+ ) {
1065
+ node = this._removeAndGetNext(node);
1066
+ continue;
1067
+ }
1068
+
1069
+ // Check to see if this node is a byline, and remove it if it is.
1070
+ if (this._checkByline(node, matchString)) {
1071
+ node = this._removeAndGetNext(node);
1072
+ continue;
1073
+ }
1074
+
1075
+ if (shouldRemoveTitleHeader && this._headerDuplicatesTitle(node)) {
1076
+ this.log(
1077
+ "Removing header: ",
1078
+ node.textContent.trim(),
1079
+ this._articleTitle.trim(),
1080
+ );
1081
+ shouldRemoveTitleHeader = false;
1082
+ node = this._removeAndGetNext(node);
1083
+ continue;
1084
+ }
1085
+
1086
+ // Remove unlikely candidates
1087
+ if (stripUnlikelyCandidates) {
1088
+ if (
1089
+ this.REGEXPS.unlikelyCandidates.test(matchString) &&
1090
+ !this.REGEXPS.okMaybeItsACandidate.test(matchString) &&
1091
+ !this._hasAncestorTag(node, "table") &&
1092
+ !this._hasAncestorTag(node, "code") &&
1093
+ node.tagName !== "BODY" &&
1094
+ node.tagName !== "A"
1095
+ ) {
1096
+ this.log("Removing unlikely candidate - " + matchString);
1097
+ node = this._removeAndGetNext(node);
1098
+ continue;
1099
+ }
1100
+
1101
+ if (this.UNLIKELY_ROLES.includes(node.getAttribute("role"))) {
1102
+ this.log(
1103
+ "Removing content with role " +
1104
+ node.getAttribute("role") +
1105
+ " - " +
1106
+ matchString,
1107
+ );
1108
+ node = this._removeAndGetNext(node);
1109
+ continue;
1110
+ }
1111
+ }
1112
+
1113
+ // Remove DIV, SECTION, and HEADER nodes without any content(e.g. text, image, video, or iframe).
1114
+ if (
1115
+ (node.tagName === "DIV" ||
1116
+ node.tagName === "SECTION" ||
1117
+ node.tagName === "HEADER" ||
1118
+ node.tagName === "H1" ||
1119
+ node.tagName === "H2" ||
1120
+ node.tagName === "H3" ||
1121
+ node.tagName === "H4" ||
1122
+ node.tagName === "H5" ||
1123
+ node.tagName === "H6") &&
1124
+ this._isElementWithoutContent(node)
1125
+ ) {
1126
+ node = this._removeAndGetNext(node);
1127
+ continue;
1128
+ }
1129
+
1130
+ if (this.DEFAULT_TAGS_TO_SCORE.indexOf(node.tagName) !== -1) {
1131
+ elementsToScore.push(node);
1132
+ }
1133
+
1134
+ // Turn all divs that don't have children block level elements into p's
1135
+ if (node.tagName === "DIV") {
1136
+ // Put phrasing content into paragraphs.
1137
+ var p = null;
1138
+ var childNode = node.firstChild;
1139
+ while (childNode) {
1140
+ var nextSibling = childNode.nextSibling;
1141
+ if (this._isPhrasingContent(childNode)) {
1142
+ if (p !== null) {
1143
+ p.appendChild(childNode);
1144
+ } else if (!this._isWhitespace(childNode)) {
1145
+ p = doc.createElement("p");
1146
+ node.replaceChild(p, childNode);
1147
+ p.appendChild(childNode);
1148
+ }
1149
+ } else if (p !== null) {
1150
+ while (p.lastChild && this._isWhitespace(p.lastChild)) {
1151
+ p.removeChild(p.lastChild);
1152
+ }
1153
+ p = null;
1154
+ }
1155
+ childNode = nextSibling;
1156
+ }
1157
+
1158
+ // Sites like http://mobile.slate.com encloses each paragraph with a DIV
1159
+ // element. DIVs with only a P element inside and no text content can be
1160
+ // safely converted into plain P elements to avoid confusing the scoring
1161
+ // algorithm with DIVs with are, in practice, paragraphs.
1162
+ if (
1163
+ this._hasSingleTagInsideElement(node, "P") &&
1164
+ this._getLinkDensity(node) < 0.25
1165
+ ) {
1166
+ var newNode = node.children[0];
1167
+ node.parentNode.replaceChild(newNode, node);
1168
+ node = newNode;
1169
+ elementsToScore.push(node);
1170
+ } else if (!this._hasChildBlockElement(node)) {
1171
+ node = this._setNodeTag(node, "P");
1172
+ elementsToScore.push(node);
1173
+ }
1174
+ }
1175
+ node = this._getNextNode(node);
1176
+ }
1177
+
1178
+ /**
1179
+ * Loop through all paragraphs, and assign a score to them based on how content-y they look.
1180
+ * Then add their score to their parent node.
1181
+ *
1182
+ * A score is determined by things like number of commas, class names, etc. Maybe eventually link density.
1183
+ **/
1184
+ var candidates = [];
1185
+ this._forEachNode(elementsToScore, function (elementToScore) {
1186
+ if (
1187
+ !elementToScore.parentNode ||
1188
+ typeof elementToScore.parentNode.tagName === "undefined"
1189
+ )
1190
+ return;
1191
+
1192
+ // If this paragraph is less than 25 characters, don't even count it.
1193
+ var innerText = this._getInnerText(elementToScore);
1194
+ if (innerText.length < 25) return;
1195
+
1196
+ // Exclude nodes with no ancestor.
1197
+ var ancestors = this._getNodeAncestors(elementToScore, 5);
1198
+ if (ancestors.length === 0) return;
1199
+
1200
+ var contentScore = 0;
1201
+
1202
+ // Add a point for the paragraph itself as a base.
1203
+ contentScore += 1;
1204
+
1205
+ // Add points for any commas within this paragraph.
1206
+ contentScore += innerText.split(",").length;
1207
+
1208
+ // For every 100 characters in this paragraph, add another point. Up to 3 points.
1209
+ contentScore += Math.min(Math.floor(innerText.length / 100), 3);
1210
+
1211
+ // Initialize and score ancestors.
1212
+ this._forEachNode(ancestors, function (ancestor, level) {
1213
+ if (
1214
+ !ancestor.tagName ||
1215
+ !ancestor.parentNode ||
1216
+ typeof ancestor.parentNode.tagName === "undefined"
1217
+ )
1218
+ return;
1219
+
1220
+ if (typeof ancestor.readability === "undefined") {
1221
+ this._initializeNode(ancestor);
1222
+ candidates.push(ancestor);
1223
+ }
1224
+
1225
+ // Node score divider:
1226
+ // - parent: 1 (no division)
1227
+ // - grandparent: 2
1228
+ // - great grandparent+: ancestor level * 3
1229
+ if (level === 0) var scoreDivider = 1;
1230
+ else if (level === 1) scoreDivider = 2;
1231
+ else scoreDivider = level * 3;
1232
+ ancestor.readability.contentScore += contentScore / scoreDivider;
1233
+ });
1234
+ });
1235
+
1236
+ // After we've calculated scores, loop through all of the possible
1237
+ // candidate nodes we found and find the one with the highest score.
1238
+ var topCandidates = [];
1239
+ for (var c = 0, cl = candidates.length; c < cl; c += 1) {
1240
+ var candidate = candidates[c];
1241
+
1242
+ // Scale the final candidates score based on link density. Good content
1243
+ // should have a relatively small link density (5% or less) and be mostly
1244
+ // unaffected by this operation.
1245
+ var candidateScore =
1246
+ candidate.readability.contentScore *
1247
+ (1 - this._getLinkDensity(candidate));
1248
+ candidate.readability.contentScore = candidateScore;
1249
+
1250
+ this.log("Candidate:", candidate, "with score " + candidateScore);
1251
+
1252
+ for (var t = 0; t < this._nbTopCandidates; t++) {
1253
+ var aTopCandidate = topCandidates[t];
1254
+
1255
+ if (
1256
+ !aTopCandidate ||
1257
+ candidateScore > aTopCandidate.readability.contentScore
1258
+ ) {
1259
+ topCandidates.splice(t, 0, candidate);
1260
+ if (topCandidates.length > this._nbTopCandidates)
1261
+ topCandidates.pop();
1262
+ break;
1263
+ }
1264
+ }
1265
+ }
1266
+
1267
+ var topCandidate = topCandidates[0] || null;
1268
+ var neededToCreateTopCandidate = false;
1269
+ var parentOfTopCandidate;
1270
+
1271
+ // If we still have no top candidate, just use the body as a last resort.
1272
+ // We also have to copy the body node so it is something we can modify.
1273
+ if (topCandidate === null || topCandidate.tagName === "BODY") {
1274
+ // Move all of the page's children into topCandidate
1275
+ topCandidate = doc.createElement("DIV");
1276
+ neededToCreateTopCandidate = true;
1277
+ // Move everything (not just elements, also text nodes etc.) into the container
1278
+ // so we even include text directly in the body:
1279
+ while (page.firstChild) {
1280
+ this.log("Moving child out:", page.firstChild);
1281
+ topCandidate.appendChild(page.firstChild);
1282
+ }
1283
+
1284
+ page.appendChild(topCandidate);
1285
+
1286
+ this._initializeNode(topCandidate);
1287
+ } else if (topCandidate) {
1288
+ // Find a better top candidate node if it contains (at least three) nodes which belong to `topCandidates` array
1289
+ // and whose scores are quite closed with current `topCandidate` node.
1290
+ var alternativeCandidateAncestors = [];
1291
+ for (var i = 1; i < topCandidates.length; i++) {
1292
+ if (
1293
+ topCandidates[i].readability.contentScore /
1294
+ topCandidate.readability.contentScore >=
1295
+ 0.75
1296
+ ) {
1297
+ alternativeCandidateAncestors.push(
1298
+ this._getNodeAncestors(topCandidates[i]),
1299
+ );
1300
+ }
1301
+ }
1302
+ var MINIMUM_TOPCANDIDATES = 3;
1303
+ if (alternativeCandidateAncestors.length >= MINIMUM_TOPCANDIDATES) {
1304
+ parentOfTopCandidate = topCandidate.parentNode;
1305
+ while (parentOfTopCandidate.tagName !== "BODY") {
1306
+ var listsContainingThisAncestor = 0;
1307
+ for (
1308
+ var ancestorIndex = 0;
1309
+ ancestorIndex < alternativeCandidateAncestors.length &&
1310
+ listsContainingThisAncestor < MINIMUM_TOPCANDIDATES;
1311
+ ancestorIndex++
1312
+ ) {
1313
+ listsContainingThisAncestor += Number(
1314
+ alternativeCandidateAncestors[ancestorIndex].includes(
1315
+ parentOfTopCandidate,
1316
+ ),
1317
+ );
1318
+ }
1319
+ if (listsContainingThisAncestor >= MINIMUM_TOPCANDIDATES) {
1320
+ topCandidate = parentOfTopCandidate;
1321
+ break;
1322
+ }
1323
+ parentOfTopCandidate = parentOfTopCandidate.parentNode;
1324
+ }
1325
+ }
1326
+ if (!topCandidate.readability) {
1327
+ this._initializeNode(topCandidate);
1328
+ }
1329
+
1330
+ // Because of our bonus system, parents of candidates might have scores
1331
+ // themselves. They get half of the node. There won't be nodes with higher
1332
+ // scores than our topCandidate, but if we see the score going *up* in the first
1333
+ // few steps up the tree, that's a decent sign that there might be more content
1334
+ // lurking in other places that we want to unify in. The sibling stuff
1335
+ // below does some of that - but only if we've looked high enough up the DOM
1336
+ // tree.
1337
+ parentOfTopCandidate = topCandidate.parentNode;
1338
+ var lastScore = topCandidate.readability.contentScore;
1339
+ // The scores shouldn't get too low.
1340
+ var scoreThreshold = lastScore / 3;
1341
+ while (parentOfTopCandidate.tagName !== "BODY") {
1342
+ if (!parentOfTopCandidate.readability) {
1343
+ parentOfTopCandidate = parentOfTopCandidate.parentNode;
1344
+ continue;
1345
+ }
1346
+ var parentScore = parentOfTopCandidate.readability.contentScore;
1347
+ if (parentScore < scoreThreshold) break;
1348
+ if (parentScore > lastScore) {
1349
+ // Alright! We found a better parent to use.
1350
+ topCandidate = parentOfTopCandidate;
1351
+ break;
1352
+ }
1353
+ lastScore = parentOfTopCandidate.readability.contentScore;
1354
+ parentOfTopCandidate = parentOfTopCandidate.parentNode;
1355
+ }
1356
+
1357
+ // If the top candidate is the only child, use parent instead. This will help sibling
1358
+ // joining logic when adjacent content is actually located in parent's sibling node.
1359
+ parentOfTopCandidate = topCandidate.parentNode;
1360
+ while (
1361
+ parentOfTopCandidate.tagName != "BODY" &&
1362
+ parentOfTopCandidate.children.length == 1
1363
+ ) {
1364
+ topCandidate = parentOfTopCandidate;
1365
+ parentOfTopCandidate = topCandidate.parentNode;
1366
+ }
1367
+ if (!topCandidate.readability) {
1368
+ this._initializeNode(topCandidate);
1369
+ }
1370
+ }
1371
+
1372
+ // Now that we have the top candidate, look through its siblings for content
1373
+ // that might also be related. Things like preambles, content split by ads
1374
+ // that we removed, etc.
1375
+ var articleContent = doc.createElement("DIV");
1376
+ if (isPaging) articleContent.id = "readability-content";
1377
+
1378
+ var siblingScoreThreshold = Math.max(
1379
+ 10,
1380
+ topCandidate.readability.contentScore * 0.2,
1381
+ );
1382
+ // Keep potential top candidate's parent node to try to get text direction of it later.
1383
+ parentOfTopCandidate = topCandidate.parentNode;
1384
+ var siblings = parentOfTopCandidate.children;
1385
+
1386
+ for (var s = 0, sl = siblings.length; s < sl; s++) {
1387
+ var sibling = siblings[s];
1388
+ var append = false;
1389
+
1390
+ this.log(
1391
+ "Looking at sibling node:",
1392
+ sibling,
1393
+ sibling.readability
1394
+ ? "with score " + sibling.readability.contentScore
1395
+ : "",
1396
+ );
1397
+ this.log(
1398
+ "Sibling has score",
1399
+ sibling.readability ? sibling.readability.contentScore : "Unknown",
1400
+ );
1401
+
1402
+ if (sibling === topCandidate) {
1403
+ append = true;
1404
+ } else {
1405
+ var contentBonus = 0;
1406
+
1407
+ // Give a bonus if sibling nodes and top candidates have the example same classname
1408
+ if (
1409
+ sibling.className === topCandidate.className &&
1410
+ topCandidate.className !== ""
1411
+ )
1412
+ contentBonus += topCandidate.readability.contentScore * 0.2;
1413
+
1414
+ if (
1415
+ sibling.readability &&
1416
+ sibling.readability.contentScore + contentBonus >=
1417
+ siblingScoreThreshold
1418
+ ) {
1419
+ append = true;
1420
+ } else if (sibling.nodeName === "P") {
1421
+ var linkDensity = this._getLinkDensity(sibling);
1422
+ var nodeContent = this._getInnerText(sibling);
1423
+ var nodeLength = nodeContent.length;
1424
+
1425
+ if (nodeLength > 80 && linkDensity < 0.25) {
1426
+ append = true;
1427
+ } else if (
1428
+ nodeLength < 80 &&
1429
+ nodeLength > 0 &&
1430
+ linkDensity === 0 &&
1431
+ nodeContent.search(/\.( |$)/) !== -1
1432
+ ) {
1433
+ append = true;
1434
+ }
1435
+ }
1436
+ }
1437
+
1438
+ if (append) {
1439
+ this.log("Appending node:", sibling);
1440
+
1441
+ if (this.ALTER_TO_DIV_EXCEPTIONS.indexOf(sibling.nodeName) === -1) {
1442
+ // We have a node that isn't a common block level element, like a form or td tag.
1443
+ // Turn it into a div so it doesn't get filtered out later by accident.
1444
+ this.log("Altering sibling:", sibling, "to div.");
1445
+
1446
+ sibling = this._setNodeTag(sibling, "DIV");
1447
+ }
1448
+
1449
+ articleContent.appendChild(sibling);
1450
+ // Fetch children again to make it compatible
1451
+ // with DOM parsers without live collection support.
1452
+ siblings = parentOfTopCandidate.children;
1453
+ // siblings is a reference to the children array, and
1454
+ // sibling is removed from the array when we call appendChild().
1455
+ // As a result, we must revisit this index since the nodes
1456
+ // have been shifted.
1457
+ s -= 1;
1458
+ sl -= 1;
1459
+ }
1460
+ }
1461
+
1462
+ if (this._debug)
1463
+ this.log("Article content pre-prep: " + articleContent.innerHTML);
1464
+ // So we have all of the content that we need. Now we clean it up for presentation.
1465
+ this._prepArticle(articleContent);
1466
+ if (this._debug)
1467
+ this.log("Article content post-prep: " + articleContent.innerHTML);
1468
+
1469
+ if (neededToCreateTopCandidate) {
1470
+ // We already created a fake div thing, and there wouldn't have been any siblings left
1471
+ // for the previous loop, so there's no point trying to create a new div, and then
1472
+ // move all the children over. Just assign IDs and class names here. No need to append
1473
+ // because that already happened anyway.
1474
+ topCandidate.id = "readability-page-1";
1475
+ topCandidate.className = "page";
1476
+ } else {
1477
+ var div = doc.createElement("DIV");
1478
+ div.id = "readability-page-1";
1479
+ div.className = "page";
1480
+ while (articleContent.firstChild) {
1481
+ div.appendChild(articleContent.firstChild);
1482
+ }
1483
+ articleContent.appendChild(div);
1484
+ }
1485
+
1486
+ if (this._debug)
1487
+ this.log("Article content after paging: " + articleContent.innerHTML);
1488
+
1489
+ var parseSuccessful = true;
1490
+
1491
+ // Now that we've gone through the full algorithm, check to see if
1492
+ // we got any meaningful content. If we didn't, we may need to re-run
1493
+ // grabArticle with different flags set. This gives us a higher likelihood of
1494
+ // finding the content, and the sieve approach gives us a higher likelihood of
1495
+ // finding the -right- content.
1496
+ var textLength = this._getInnerText(articleContent, true).length;
1497
+ if (textLength < this._charThreshold) {
1498
+ parseSuccessful = false;
1499
+ page.innerHTML = pageCacheHtml;
1500
+
1501
+ if (this._flagIsActive(this.FLAG_STRIP_UNLIKELYS)) {
1502
+ this._removeFlag(this.FLAG_STRIP_UNLIKELYS);
1503
+ this._attempts.push({
1504
+ articleContent: articleContent,
1505
+ textLength: textLength,
1506
+ });
1507
+ } else if (this._flagIsActive(this.FLAG_WEIGHT_CLASSES)) {
1508
+ this._removeFlag(this.FLAG_WEIGHT_CLASSES);
1509
+ this._attempts.push({
1510
+ articleContent: articleContent,
1511
+ textLength: textLength,
1512
+ });
1513
+ } else if (this._flagIsActive(this.FLAG_CLEAN_CONDITIONALLY)) {
1514
+ this._removeFlag(this.FLAG_CLEAN_CONDITIONALLY);
1515
+ this._attempts.push({
1516
+ articleContent: articleContent,
1517
+ textLength: textLength,
1518
+ });
1519
+ } else {
1520
+ this._attempts.push({
1521
+ articleContent: articleContent,
1522
+ textLength: textLength,
1523
+ });
1524
+ // No luck after removing flags, just return the longest text we found during the different loops
1525
+ this._attempts.sort(function (a, b) {
1526
+ return b.textLength - a.textLength;
1527
+ });
1528
+
1529
+ // But first check if we actually have something
1530
+ if (!this._attempts[0].textLength) {
1531
+ return null;
1532
+ }
1533
+
1534
+ articleContent = this._attempts[0].articleContent;
1535
+ parseSuccessful = true;
1536
+ }
1537
+ }
1538
+
1539
+ if (parseSuccessful) {
1540
+ // Find out text direction from ancestors of final top candidate.
1541
+ var ancestors = [parentOfTopCandidate, topCandidate].concat(
1542
+ this._getNodeAncestors(parentOfTopCandidate),
1543
+ );
1544
+ this._someNode(ancestors, function (ancestor) {
1545
+ if (!ancestor.tagName) return false;
1546
+ var articleDir = ancestor.getAttribute("dir");
1547
+ if (articleDir) {
1548
+ this._articleDir = articleDir;
1549
+ return true;
1550
+ }
1551
+ return false;
1552
+ });
1553
+ return articleContent;
1554
+ }
1555
+ }
1556
+ },
1557
+
1558
+ /**
1559
+ * Check whether the input string could be a byline.
1560
+ * This verifies that the input is a string, and that the length
1561
+ * is less than 100 chars.
1562
+ *
1563
+ * @param possibleByline {string} - a string to check whether its a byline.
1564
+ * @return Boolean - whether the input string is a byline.
1565
+ */
1566
+ _isValidByline: function (byline) {
1567
+ if (typeof byline == "string" || byline instanceof String) {
1568
+ byline = byline.trim();
1569
+ return byline.length > 0 && byline.length < 100;
1570
+ }
1571
+ return false;
1572
+ },
1573
+
1574
+ /**
1575
+ * Converts some of the common HTML entities in string to their corresponding characters.
1576
+ *
1577
+ * @param str {string} - a string to unescape.
1578
+ * @return string without HTML entity.
1579
+ */
1580
+ _unescapeHtmlEntities: function (str) {
1581
+ if (!str) {
1582
+ return str;
1583
+ }
1584
+
1585
+ var htmlEscapeMap = this.HTML_ESCAPE_MAP;
1586
+ return str
1587
+ .replace(/&(quot|amp|apos|lt|gt);/g, function (_, tag) {
1588
+ return htmlEscapeMap[tag];
1589
+ })
1590
+ .replace(
1591
+ /&#(?:x([0-9a-z]{1,4})|([0-9]{1,4}));/gi,
1592
+ function (_, hex, numStr) {
1593
+ var num = parseInt(hex || numStr, hex ? 16 : 10);
1594
+ return String.fromCharCode(num);
1595
+ },
1596
+ );
1597
+ },
1598
+
1599
+ /**
1600
+ * Try to extract metadata from JSON-LD object.
1601
+ * For now, only Schema.org objects of type Article or its subtypes are supported.
1602
+ * @return Object with any metadata that could be extracted (possibly none)
1603
+ */
1604
+ _getJSONLD: function (doc) {
1605
+ var scripts = this._getAllNodesWithTag(doc, ["script"]);
1606
+
1607
+ var metadata;
1608
+
1609
+ this._forEachNode(scripts, function (jsonLdElement) {
1610
+ if (
1611
+ !metadata &&
1612
+ jsonLdElement.getAttribute("type") === "application/ld+json"
1613
+ ) {
1614
+ try {
1615
+ // Strip CDATA markers if present
1616
+ var content = jsonLdElement.textContent.replace(
1617
+ /^\s*<!\[CDATA\[|\]\]>\s*$/g,
1618
+ "",
1619
+ );
1620
+ var parsed = JSON.parse(content);
1621
+ if (
1622
+ !parsed["@context"] ||
1623
+ !parsed["@context"].match(/^https?\:\/\/schema\.org$/)
1624
+ ) {
1625
+ return;
1626
+ }
1627
+
1628
+ if (!parsed["@type"] && Array.isArray(parsed["@graph"])) {
1629
+ parsed = parsed["@graph"].find(function (it) {
1630
+ return (it["@type"] || "").match(this.REGEXPS.jsonLdArticleTypes);
1631
+ });
1632
+ }
1633
+
1634
+ if (
1635
+ !parsed ||
1636
+ !parsed["@type"] ||
1637
+ !parsed["@type"].match(this.REGEXPS.jsonLdArticleTypes)
1638
+ ) {
1639
+ return;
1640
+ }
1641
+
1642
+ metadata = {};
1643
+
1644
+ if (
1645
+ typeof parsed.name === "string" &&
1646
+ typeof parsed.headline === "string" &&
1647
+ parsed.name !== parsed.headline
1648
+ ) {
1649
+ // we have both name and headline element in the JSON-LD. They should both be the same but some websites like aktualne.cz
1650
+ // put their own name into "name" and the article title to "headline" which confuses Readability. So we try to check if either
1651
+ // "name" or "headline" closely matches the html title, and if so, use that one. If not, then we use "name" by default.
1652
+
1653
+ var title = this._getArticleTitle();
1654
+ var nameMatches = this._textSimilarity(parsed.name, title) > 0.75;
1655
+ var headlineMatches =
1656
+ this._textSimilarity(parsed.headline, title) > 0.75;
1657
+
1658
+ if (headlineMatches && !nameMatches) {
1659
+ metadata.title = parsed.headline;
1660
+ } else {
1661
+ metadata.title = parsed.name;
1662
+ }
1663
+ } else if (typeof parsed.name === "string") {
1664
+ metadata.title = parsed.name.trim();
1665
+ } else if (typeof parsed.headline === "string") {
1666
+ metadata.title = parsed.headline.trim();
1667
+ }
1668
+ if (parsed.author) {
1669
+ if (typeof parsed.author.name === "string") {
1670
+ metadata.byline = parsed.author.name.trim();
1671
+ } else if (
1672
+ Array.isArray(parsed.author) &&
1673
+ parsed.author[0] &&
1674
+ typeof parsed.author[0].name === "string"
1675
+ ) {
1676
+ metadata.byline = parsed.author
1677
+ .filter(function (author) {
1678
+ return author && typeof author.name === "string";
1679
+ })
1680
+ .map(function (author) {
1681
+ return author.name.trim();
1682
+ })
1683
+ .join(", ");
1684
+ }
1685
+ }
1686
+ if (typeof parsed.description === "string") {
1687
+ metadata.excerpt = parsed.description.trim();
1688
+ }
1689
+ if (parsed.publisher && typeof parsed.publisher.name === "string") {
1690
+ metadata.siteName = parsed.publisher.name.trim();
1691
+ }
1692
+ return;
1693
+ } catch (err) {
1694
+ this.log(err.message);
1695
+ }
1696
+ }
1697
+ });
1698
+ return metadata ? metadata : {};
1699
+ },
1700
+
1701
+ /**
1702
+ * Attempts to get excerpt and byline metadata for the article.
1703
+ *
1704
+ * @param {Object} jsonld — object containing any metadata that
1705
+ * could be extracted from JSON-LD object.
1706
+ *
1707
+ * @return Object with optional "excerpt" and "byline" properties
1708
+ */
1709
+ _getArticleMetadata: function (jsonld) {
1710
+ var metadata = {};
1711
+ var values = {};
1712
+ var metaElements = this._doc.getElementsByTagName("meta");
1713
+
1714
+ // property is a space-separated list of values
1715
+ var propertyPattern =
1716
+ /\s*(dc|dcterm|og|twitter)\s*:\s*(author|creator|description|title|site_name)\s*/gi;
1717
+
1718
+ // name is a single value
1719
+ var namePattern =
1720
+ /^\s*(?:(dc|dcterm|og|twitter|weibo:(article|webpage))\s*[\.:]\s*)?(author|creator|description|title|site_name)\s*$/i;
1721
+
1722
+ // Find description tags.
1723
+ this._forEachNode(metaElements, function (element) {
1724
+ var elementName = element.getAttribute("name");
1725
+ var elementProperty = element.getAttribute("property");
1726
+ var content = element.getAttribute("content");
1727
+ if (!content) {
1728
+ return;
1729
+ }
1730
+ var matches = null;
1731
+ var name = null;
1732
+
1733
+ if (elementProperty) {
1734
+ matches = elementProperty.match(propertyPattern);
1735
+ if (matches) {
1736
+ // Convert to lowercase, and remove any whitespace
1737
+ // so we can match below.
1738
+ name = matches[0].toLowerCase().replace(/\s/g, "");
1739
+ // multiple authors
1740
+ values[name] = content.trim();
1741
+ }
1742
+ }
1743
+ if (!matches && elementName && namePattern.test(elementName)) {
1744
+ name = elementName;
1745
+ if (content) {
1746
+ // Convert to lowercase, remove any whitespace, and convert dots
1747
+ // to colons so we can match below.
1748
+ name = name.toLowerCase().replace(/\s/g, "").replace(/\./g, ":");
1749
+ values[name] = content.trim();
1750
+ }
1751
+ }
1752
+ });
1753
+
1754
+ // get title
1755
+ metadata.title =
1756
+ jsonld.title ||
1757
+ values["dc:title"] ||
1758
+ values["dcterm:title"] ||
1759
+ values["og:title"] ||
1760
+ values["weibo:article:title"] ||
1761
+ values["weibo:webpage:title"] ||
1762
+ values["title"] ||
1763
+ values["twitter:title"];
1764
+
1765
+ if (!metadata.title) {
1766
+ metadata.title = this._getArticleTitle();
1767
+ }
1768
+
1769
+ // get author
1770
+ metadata.byline =
1771
+ jsonld.byline ||
1772
+ values["dc:creator"] ||
1773
+ values["dcterm:creator"] ||
1774
+ values["author"];
1775
+
1776
+ // get description
1777
+ metadata.excerpt =
1778
+ jsonld.excerpt ||
1779
+ values["dc:description"] ||
1780
+ values["dcterm:description"] ||
1781
+ values["og:description"] ||
1782
+ values["weibo:article:description"] ||
1783
+ values["weibo:webpage:description"] ||
1784
+ values["description"] ||
1785
+ values["twitter:description"];
1786
+
1787
+ // get site name
1788
+ metadata.siteName = jsonld.siteName || values["og:site_name"];
1789
+
1790
+ // in many sites the meta value is escaped with HTML entities,
1791
+ // so here we need to unescape it
1792
+ metadata.title = this._unescapeHtmlEntities(metadata.title);
1793
+ metadata.byline = this._unescapeHtmlEntities(metadata.byline);
1794
+ metadata.excerpt = this._unescapeHtmlEntities(metadata.excerpt);
1795
+ metadata.siteName = this._unescapeHtmlEntities(metadata.siteName);
1796
+
1797
+ return metadata;
1798
+ },
1799
+
1800
+ /**
1801
+ * Check if node is image, or if node contains exactly only one image
1802
+ * whether as a direct child or as its descendants.
1803
+ *
1804
+ * @param Element
1805
+ **/
1806
+ _isSingleImage: function (node) {
1807
+ if (node.tagName === "IMG") {
1808
+ return true;
1809
+ }
1810
+
1811
+ if (node.children.length !== 1 || node.textContent.trim() !== "") {
1812
+ return false;
1813
+ }
1814
+
1815
+ return this._isSingleImage(node.children[0]);
1816
+ },
1817
+
1818
+ /**
1819
+ * Find all <noscript> that are located after <img> nodes, and which contain only one
1820
+ * <img> element. Replace the first image with the image from inside the <noscript> tag,
1821
+ * and remove the <noscript> tag. This improves the quality of the images we use on
1822
+ * some sites (e.g. Medium).
1823
+ *
1824
+ * @param Element
1825
+ **/
1826
+ _unwrapNoscriptImages: function (doc) {
1827
+ // Find img without source or attributes that might contains image, and remove it.
1828
+ // This is done to prevent a placeholder img is replaced by img from noscript in next step.
1829
+ var imgs = Array.from(doc.getElementsByTagName("img"));
1830
+ this._forEachNode(imgs, function (img) {
1831
+ for (var i = 0; i < img.attributes.length; i++) {
1832
+ var attr = img.attributes[i];
1833
+ switch (attr.name) {
1834
+ case "src":
1835
+ case "srcset":
1836
+ case "data-src":
1837
+ case "data-srcset":
1838
+ return;
1839
+ }
1840
+
1841
+ if (/\.(jpg|jpeg|png|webp)/i.test(attr.value)) {
1842
+ return;
1843
+ }
1844
+ }
1845
+
1846
+ img.parentNode.removeChild(img);
1847
+ });
1848
+
1849
+ // Next find noscript and try to extract its image
1850
+ var noscripts = Array.from(doc.getElementsByTagName("noscript"));
1851
+ this._forEachNode(noscripts, function (noscript) {
1852
+ // Parse content of noscript and make sure it only contains image
1853
+ var tmp = doc.createElement("div");
1854
+ tmp.innerHTML = noscript.innerHTML;
1855
+ if (!this._isSingleImage(tmp)) {
1856
+ return;
1857
+ }
1858
+
1859
+ // If noscript has previous sibling and it only contains image,
1860
+ // replace it with noscript content. However we also keep old
1861
+ // attributes that might contains image.
1862
+ var prevElement = noscript.previousElementSibling;
1863
+ if (prevElement && this._isSingleImage(prevElement)) {
1864
+ var prevImg = prevElement;
1865
+ if (prevImg.tagName !== "IMG") {
1866
+ prevImg = prevElement.getElementsByTagName("img")[0];
1867
+ }
1868
+
1869
+ var newImg = tmp.getElementsByTagName("img")[0];
1870
+ for (var i = 0; i < prevImg.attributes.length; i++) {
1871
+ var attr = prevImg.attributes[i];
1872
+ if (attr.value === "") {
1873
+ continue;
1874
+ }
1875
+
1876
+ if (
1877
+ attr.name === "src" ||
1878
+ attr.name === "srcset" ||
1879
+ /\.(jpg|jpeg|png|webp)/i.test(attr.value)
1880
+ ) {
1881
+ if (newImg.getAttribute(attr.name) === attr.value) {
1882
+ continue;
1883
+ }
1884
+
1885
+ var attrName = attr.name;
1886
+ if (newImg.hasAttribute(attrName)) {
1887
+ attrName = "data-old-" + attrName;
1888
+ }
1889
+
1890
+ newImg.setAttribute(attrName, attr.value);
1891
+ }
1892
+ }
1893
+
1894
+ noscript.parentNode.replaceChild(tmp.firstElementChild, prevElement);
1895
+ }
1896
+ });
1897
+ },
1898
+
1899
+ /**
1900
+ * Removes script tags from the document.
1901
+ *
1902
+ * @param Element
1903
+ **/
1904
+ _removeScripts: function (doc) {
1905
+ this._removeNodes(this._getAllNodesWithTag(doc, ["script", "noscript"]));
1906
+ },
1907
+
1908
+ /**
1909
+ * Check if this node has only whitespace and a single element with given tag
1910
+ * Returns false if the DIV node contains non-empty text nodes
1911
+ * or if it contains no element with given tag or more than 1 element.
1912
+ *
1913
+ * @param Element
1914
+ * @param string tag of child element
1915
+ **/
1916
+ _hasSingleTagInsideElement: function (element, tag) {
1917
+ // There should be exactly 1 element child with given tag
1918
+ if (element.children.length != 1 || element.children[0].tagName !== tag) {
1919
+ return false;
1920
+ }
1921
+
1922
+ // And there should be no text nodes with real content
1923
+ return !this._someNode(element.childNodes, function (node) {
1924
+ return (
1925
+ node.nodeType === this.TEXT_NODE &&
1926
+ this.REGEXPS.hasContent.test(node.textContent)
1927
+ );
1928
+ });
1929
+ },
1930
+
1931
+ _isElementWithoutContent: function (node) {
1932
+ return (
1933
+ node.nodeType === this.ELEMENT_NODE &&
1934
+ node.textContent.trim().length == 0 &&
1935
+ (node.children.length == 0 ||
1936
+ node.children.length ==
1937
+ node.getElementsByTagName("br").length +
1938
+ node.getElementsByTagName("hr").length)
1939
+ );
1940
+ },
1941
+
1942
+ /**
1943
+ * Determine whether element has any children block level elements.
1944
+ *
1945
+ * @param Element
1946
+ */
1947
+ _hasChildBlockElement: function (element) {
1948
+ return this._someNode(element.childNodes, function (node) {
1949
+ return (
1950
+ this.DIV_TO_P_ELEMS.has(node.tagName) ||
1951
+ this._hasChildBlockElement(node)
1952
+ );
1953
+ });
1954
+ },
1955
+
1956
+ /***
1957
+ * Determine if a node qualifies as phrasing content.
1958
+ * https://developer.mozilla.org/en-US/docs/Web/Guide/HTML/Content_categories#Phrasing_content
1959
+ **/
1960
+ _isPhrasingContent: function (node) {
1961
+ return (
1962
+ node.nodeType === this.TEXT_NODE ||
1963
+ this.PHRASING_ELEMS.indexOf(node.tagName) !== -1 ||
1964
+ ((node.tagName === "A" ||
1965
+ node.tagName === "DEL" ||
1966
+ node.tagName === "INS") &&
1967
+ this._everyNode(node.childNodes, this._isPhrasingContent))
1968
+ );
1969
+ },
1970
+
1971
+ _isWhitespace: function (node) {
1972
+ return (
1973
+ (node.nodeType === this.TEXT_NODE &&
1974
+ node.textContent.trim().length === 0) ||
1975
+ (node.nodeType === this.ELEMENT_NODE && node.tagName === "BR")
1976
+ );
1977
+ },
1978
+
1979
+ /**
1980
+ * Get the inner text of a node - cross browser compatibly.
1981
+ * This also strips out any excess whitespace to be found.
1982
+ *
1983
+ * @param Element
1984
+ * @param Boolean normalizeSpaces (default: true)
1985
+ * @return string
1986
+ **/
1987
+ _getInnerText: function (e, normalizeSpaces) {
1988
+ normalizeSpaces =
1989
+ typeof normalizeSpaces === "undefined" ? true : normalizeSpaces;
1990
+ var textContent = e.textContent.trim();
1991
+
1992
+ if (normalizeSpaces) {
1993
+ return textContent.replace(this.REGEXPS.normalize, " ");
1994
+ }
1995
+ return textContent;
1996
+ },
1997
+
1998
+ /**
1999
+ * Get the number of times a string s appears in the node e.
2000
+ *
2001
+ * @param Element
2002
+ * @param string - what to split on. Default is ","
2003
+ * @return number (integer)
2004
+ **/
2005
+ _getCharCount: function (e, s) {
2006
+ s = s || ",";
2007
+ return this._getInnerText(e).split(s).length - 1;
2008
+ },
2009
+
2010
+ /**
2011
+ * Remove the style attribute on every e and under.
2012
+ * TODO: Test if getElementsByTagName(*) is faster.
2013
+ *
2014
+ * @param Element
2015
+ * @return void
2016
+ **/
2017
+ _cleanStyles: function (e) {
2018
+ if (!e || e.tagName.toLowerCase() === "svg") return;
2019
+
2020
+ // Remove `style` and deprecated presentational attributes
2021
+ for (var i = 0; i < this.PRESENTATIONAL_ATTRIBUTES.length; i++) {
2022
+ e.removeAttribute(this.PRESENTATIONAL_ATTRIBUTES[i]);
2023
+ }
2024
+
2025
+ if (this.DEPRECATED_SIZE_ATTRIBUTE_ELEMS.indexOf(e.tagName) !== -1) {
2026
+ e.removeAttribute("width");
2027
+ e.removeAttribute("height");
2028
+ }
2029
+
2030
+ var cur = e.firstElementChild;
2031
+ while (cur !== null) {
2032
+ this._cleanStyles(cur);
2033
+ cur = cur.nextElementSibling;
2034
+ }
2035
+ },
2036
+
2037
+ /**
2038
+ * Get the density of links as a percentage of the content
2039
+ * This is the amount of text that is inside a link divided by the total text in the node.
2040
+ *
2041
+ * @param Element
2042
+ * @return number (float)
2043
+ **/
2044
+ _getLinkDensity: function (element) {
2045
+ var textLength = this._getInnerText(element).length;
2046
+ if (textLength === 0) return 0;
2047
+
2048
+ var linkLength = 0;
2049
+
2050
+ // XXX implement _reduceNodeList?
2051
+ this._forEachNode(element.getElementsByTagName("a"), function (linkNode) {
2052
+ var href = linkNode.getAttribute("href");
2053
+ var coefficient = href && this.REGEXPS.hashUrl.test(href) ? 0.3 : 1;
2054
+ linkLength += this._getInnerText(linkNode).length * coefficient;
2055
+ });
2056
+
2057
+ return linkLength / textLength;
2058
+ },
2059
+
2060
+ /**
2061
+ * Get an elements class/id weight. Uses regular expressions to tell if this
2062
+ * element looks good or bad.
2063
+ *
2064
+ * @param Element
2065
+ * @return number (Integer)
2066
+ **/
2067
+ _getClassWeight: function (e) {
2068
+ if (!this._flagIsActive(this.FLAG_WEIGHT_CLASSES)) return 0;
2069
+
2070
+ var weight = 0;
2071
+
2072
+ // Look for a special classname
2073
+ if (typeof e.className === "string" && e.className !== "") {
2074
+ if (this.REGEXPS.negative.test(e.className)) weight -= 25;
2075
+
2076
+ if (this.REGEXPS.positive.test(e.className)) weight += 25;
2077
+ }
2078
+
2079
+ // Look for a special ID
2080
+ if (typeof e.id === "string" && e.id !== "") {
2081
+ if (this.REGEXPS.negative.test(e.id)) weight -= 25;
2082
+
2083
+ if (this.REGEXPS.positive.test(e.id)) weight += 25;
2084
+ }
2085
+
2086
+ return weight;
2087
+ },
2088
+
2089
+ /**
2090
+ * Clean a node of all elements of type "tag".
2091
+ * (Unless it's a youtube/vimeo video. People love movies.)
2092
+ *
2093
+ * @param Element
2094
+ * @param string tag to clean
2095
+ * @return void
2096
+ **/
2097
+ _clean: function (e, tag) {
2098
+ var isEmbed = ["object", "embed", "iframe"].indexOf(tag) !== -1;
2099
+
2100
+ this._removeNodes(this._getAllNodesWithTag(e, [tag]), function (element) {
2101
+ // Allow youtube and vimeo videos through as people usually want to see those.
2102
+ if (isEmbed) {
2103
+ // First, check the elements attributes to see if any of them contain youtube or vimeo
2104
+ for (var i = 0; i < element.attributes.length; i++) {
2105
+ if (this._allowedVideoRegex.test(element.attributes[i].value)) {
2106
+ return false;
2107
+ }
2108
+ }
2109
+
2110
+ // For embed with <object> tag, check inner HTML as well.
2111
+ if (
2112
+ element.tagName === "object" &&
2113
+ this._allowedVideoRegex.test(element.innerHTML)
2114
+ ) {
2115
+ return false;
2116
+ }
2117
+ }
2118
+
2119
+ return true;
2120
+ });
2121
+ },
2122
+
2123
+ /**
2124
+ * Check if a given node has one of its ancestor tag name matching the
2125
+ * provided one.
2126
+ * @param HTMLElement node
2127
+ * @param String tagName
2128
+ * @param Number maxDepth
2129
+ * @param Function filterFn a filter to invoke to determine whether this node 'counts'
2130
+ * @return Boolean
2131
+ */
2132
+ _hasAncestorTag: function (node, tagName, maxDepth, filterFn) {
2133
+ maxDepth = maxDepth || 3;
2134
+ tagName = tagName.toUpperCase();
2135
+ var depth = 0;
2136
+ while (node.parentNode) {
2137
+ if (maxDepth > 0 && depth > maxDepth) return false;
2138
+ if (
2139
+ node.parentNode.tagName === tagName &&
2140
+ (!filterFn || filterFn(node.parentNode))
2141
+ )
2142
+ return true;
2143
+ node = node.parentNode;
2144
+ depth++;
2145
+ }
2146
+ return false;
2147
+ },
2148
+
2149
+ /**
2150
+ * Return an object indicating how many rows and columns this table has.
2151
+ */
2152
+ _getRowAndColumnCount: function (table) {
2153
+ var rows = 0;
2154
+ var columns = 0;
2155
+ var trs = table.getElementsByTagName("tr");
2156
+ for (var i = 0; i < trs.length; i++) {
2157
+ var rowspan = trs[i].getAttribute("rowspan") || 0;
2158
+ if (rowspan) {
2159
+ rowspan = parseInt(rowspan, 10);
2160
+ }
2161
+ rows += rowspan || 1;
2162
+
2163
+ // Now look for column-related info
2164
+ var columnsInThisRow = 0;
2165
+ var cells = trs[i].getElementsByTagName("td");
2166
+ for (var j = 0; j < cells.length; j++) {
2167
+ var colspan = cells[j].getAttribute("colspan") || 0;
2168
+ if (colspan) {
2169
+ colspan = parseInt(colspan, 10);
2170
+ }
2171
+ columnsInThisRow += colspan || 1;
2172
+ }
2173
+ columns = Math.max(columns, columnsInThisRow);
2174
+ }
2175
+ return { rows: rows, columns: columns };
2176
+ },
2177
+
2178
+ /**
2179
+ * Look for 'data' (as opposed to 'layout') tables, for which we use
2180
+ * similar checks as
2181
+ * https://searchfox.org/mozilla-central/rev/f82d5c549f046cb64ce5602bfd894b7ae807c8f8/accessible/generic/TableAccessible.cpp#19
2182
+ */
2183
+ _markDataTables: function (root) {
2184
+ var tables = root.getElementsByTagName("table");
2185
+ for (var i = 0; i < tables.length; i++) {
2186
+ var table = tables[i];
2187
+ var role = table.getAttribute("role");
2188
+ if (role == "presentation") {
2189
+ table._readabilityDataTable = false;
2190
+ continue;
2191
+ }
2192
+ var datatable = table.getAttribute("datatable");
2193
+ if (datatable == "0") {
2194
+ table._readabilityDataTable = false;
2195
+ continue;
2196
+ }
2197
+ var summary = table.getAttribute("summary");
2198
+ if (summary) {
2199
+ table._readabilityDataTable = true;
2200
+ continue;
2201
+ }
2202
+
2203
+ var caption = table.getElementsByTagName("caption")[0];
2204
+ if (caption && caption.childNodes.length > 0) {
2205
+ table._readabilityDataTable = true;
2206
+ continue;
2207
+ }
2208
+
2209
+ // If the table has a descendant with any of these tags, consider a data table:
2210
+ var dataTableDescendants = ["col", "colgroup", "tfoot", "thead", "th"];
2211
+ var descendantExists = function (tag) {
2212
+ return !!table.getElementsByTagName(tag)[0];
2213
+ };
2214
+ if (dataTableDescendants.some(descendantExists)) {
2215
+ this.log("Data table because found data-y descendant");
2216
+ table._readabilityDataTable = true;
2217
+ continue;
2218
+ }
2219
+
2220
+ // Nested tables indicate a layout table:
2221
+ if (table.getElementsByTagName("table")[0]) {
2222
+ table._readabilityDataTable = false;
2223
+ continue;
2224
+ }
2225
+
2226
+ var sizeInfo = this._getRowAndColumnCount(table);
2227
+ if (sizeInfo.rows >= 10 || sizeInfo.columns > 4) {
2228
+ table._readabilityDataTable = true;
2229
+ continue;
2230
+ }
2231
+ // Now just go by size entirely:
2232
+ table._readabilityDataTable = sizeInfo.rows * sizeInfo.columns > 10;
2233
+ }
2234
+ },
2235
+
2236
+ /* convert images and figures that have properties like data-src into images that can be loaded without JS */
2237
+ _fixLazyImages: function (root) {
2238
+ this._forEachNode(
2239
+ this._getAllNodesWithTag(root, ["img", "picture", "figure"]),
2240
+ function (elem) {
2241
+ // In some sites (e.g. Kotaku), they put 1px square image as base64 data uri in the src attribute.
2242
+ // So, here we check if the data uri is too short, just might as well remove it.
2243
+ if (elem.src && this.REGEXPS.b64DataUrl.test(elem.src)) {
2244
+ // Make sure it's not SVG, because SVG can have a meaningful image in under 133 bytes.
2245
+ var parts = this.REGEXPS.b64DataUrl.exec(elem.src);
2246
+ if (parts[1] === "image/svg+xml") {
2247
+ return;
2248
+ }
2249
+
2250
+ // Make sure this element has other attributes which contains image.
2251
+ // If it doesn't, then this src is important and shouldn't be removed.
2252
+ var srcCouldBeRemoved = false;
2253
+ for (var i = 0; i < elem.attributes.length; i++) {
2254
+ var attr = elem.attributes[i];
2255
+ if (attr.name === "src") {
2256
+ continue;
2257
+ }
2258
+
2259
+ if (/\.(jpg|jpeg|png|webp)/i.test(attr.value)) {
2260
+ srcCouldBeRemoved = true;
2261
+ break;
2262
+ }
2263
+ }
2264
+
2265
+ // Here we assume if image is less than 100 bytes (or 133B after encoded to base64)
2266
+ // it will be too small, therefore it might be placeholder image.
2267
+ if (srcCouldBeRemoved) {
2268
+ var b64starts = elem.src.search(/base64\s*/i) + 7;
2269
+ var b64length = elem.src.length - b64starts;
2270
+ if (b64length < 133) {
2271
+ elem.removeAttribute("src");
2272
+ }
2273
+ }
2274
+ }
2275
+
2276
+ // also check for "null" to work around https://github.com/jsdom/jsdom/issues/2580
2277
+ if (
2278
+ (elem.src || (elem.srcset && elem.srcset != "null")) &&
2279
+ elem.className.toLowerCase().indexOf("lazy") === -1
2280
+ ) {
2281
+ return;
2282
+ }
2283
+
2284
+ for (var j = 0; j < elem.attributes.length; j++) {
2285
+ attr = elem.attributes[j];
2286
+ if (
2287
+ attr.name === "src" ||
2288
+ attr.name === "srcset" ||
2289
+ attr.name === "alt"
2290
+ ) {
2291
+ continue;
2292
+ }
2293
+ var copyTo = null;
2294
+ if (/\.(jpg|jpeg|png|webp)\s+\d/.test(attr.value)) {
2295
+ copyTo = "srcset";
2296
+ } else if (/^\s*\S+\.(jpg|jpeg|png|webp)\S*\s*$/.test(attr.value)) {
2297
+ copyTo = "src";
2298
+ }
2299
+ if (copyTo) {
2300
+ //if this is an img or picture, set the attribute directly
2301
+ if (elem.tagName === "IMG" || elem.tagName === "PICTURE") {
2302
+ elem.setAttribute(copyTo, attr.value);
2303
+ } else if (
2304
+ elem.tagName === "FIGURE" &&
2305
+ !this._getAllNodesWithTag(elem, ["img", "picture"]).length
2306
+ ) {
2307
+ //if the item is a <figure> that does not contain an image or picture, create one and place it inside the figure
2308
+ //see the nytimes-3 testcase for an example
2309
+ var img = this._doc.createElement("img");
2310
+ img.setAttribute(copyTo, attr.value);
2311
+ elem.appendChild(img);
2312
+ }
2313
+ }
2314
+ }
2315
+ },
2316
+ );
2317
+ },
2318
+
2319
+ _getTextDensity: function (e, tags) {
2320
+ var textLength = this._getInnerText(e, true).length;
2321
+ if (textLength === 0) {
2322
+ return 0;
2323
+ }
2324
+ var childrenLength = 0;
2325
+ var children = this._getAllNodesWithTag(e, tags);
2326
+ this._forEachNode(
2327
+ children,
2328
+ (child) => (childrenLength += this._getInnerText(child, true).length),
2329
+ );
2330
+ return childrenLength / textLength;
2331
+ },
2332
+
2333
+ /**
2334
+ * Clean an element of all tags of type "tag" if they look fishy.
2335
+ * "Fishy" is an algorithm based on content length, classnames, link density, number of images & embeds, etc.
2336
+ *
2337
+ * @return void
2338
+ **/
2339
+ _cleanConditionally: function (e, tag) {
2340
+ if (!this._flagIsActive(this.FLAG_CLEAN_CONDITIONALLY)) return;
2341
+
2342
+ // Gather counts for other typical elements embedded within.
2343
+ // Traverse backwards so we can remove nodes at the same time
2344
+ // without effecting the traversal.
2345
+ //
2346
+ // TODO: Consider taking into account original contentScore here.
2347
+ this._removeNodes(this._getAllNodesWithTag(e, [tag]), function (node) {
2348
+ // First check if this node IS data table, in which case don't remove it.
2349
+ var isDataTable = function (t) {
2350
+ return t._readabilityDataTable;
2351
+ };
2352
+
2353
+ var isList = tag === "ul" || tag === "ol";
2354
+ if (!isList) {
2355
+ var listLength = 0;
2356
+ var listNodes = this._getAllNodesWithTag(node, ["ul", "ol"]);
2357
+ this._forEachNode(
2358
+ listNodes,
2359
+ (list) => (listLength += this._getInnerText(list).length),
2360
+ );
2361
+ isList = listLength / this._getInnerText(node).length > 0.9;
2362
+ }
2363
+
2364
+ if (tag === "table" && isDataTable(node)) {
2365
+ return false;
2366
+ }
2367
+
2368
+ // Next check if we're inside a data table, in which case don't remove it as well.
2369
+ if (this._hasAncestorTag(node, "table", -1, isDataTable)) {
2370
+ return false;
2371
+ }
2372
+
2373
+ if (this._hasAncestorTag(node, "code")) {
2374
+ return false;
2375
+ }
2376
+
2377
+ var weight = this._getClassWeight(node);
2378
+
2379
+ this.log("Cleaning Conditionally", node);
2380
+
2381
+ var contentScore = 0;
2382
+
2383
+ if (weight + contentScore < 0) {
2384
+ return true;
2385
+ }
2386
+
2387
+ if (this._getCharCount(node, ",") < 10) {
2388
+ // If there are not very many commas, and the number of
2389
+ // non-paragraph elements is more than paragraphs or other
2390
+ // ominous signs, remove the element.
2391
+ var p = node.getElementsByTagName("p").length;
2392
+ var img = node.getElementsByTagName("img").length;
2393
+ var li = node.getElementsByTagName("li").length - 100;
2394
+ var input = node.getElementsByTagName("input").length;
2395
+ var headingDensity = this._getTextDensity(node, [
2396
+ "h1",
2397
+ "h2",
2398
+ "h3",
2399
+ "h4",
2400
+ "h5",
2401
+ "h6",
2402
+ ]);
2403
+
2404
+ var embedCount = 0;
2405
+ var embeds = this._getAllNodesWithTag(node, [
2406
+ "object",
2407
+ "embed",
2408
+ "iframe",
2409
+ ]);
2410
+
2411
+ for (var i = 0; i < embeds.length; i++) {
2412
+ // If this embed has attribute that matches video regex, don't delete it.
2413
+ for (var j = 0; j < embeds[i].attributes.length; j++) {
2414
+ if (this._allowedVideoRegex.test(embeds[i].attributes[j].value)) {
2415
+ return false;
2416
+ }
2417
+ }
2418
+
2419
+ // For embed with <object> tag, check inner HTML as well.
2420
+ if (
2421
+ embeds[i].tagName === "object" &&
2422
+ this._allowedVideoRegex.test(embeds[i].innerHTML)
2423
+ ) {
2424
+ return false;
2425
+ }
2426
+
2427
+ embedCount++;
2428
+ }
2429
+
2430
+ var linkDensity = this._getLinkDensity(node);
2431
+ var contentLength = this._getInnerText(node).length;
2432
+
2433
+ var haveToRemove =
2434
+ (img > 1 && p / img < 0.5 && !this._hasAncestorTag(node, "figure")) ||
2435
+ (!isList && li > p) ||
2436
+ input > Math.floor(p / 3) ||
2437
+ (!isList &&
2438
+ headingDensity < 0.9 &&
2439
+ contentLength < 25 &&
2440
+ (img === 0 || img > 2) &&
2441
+ !this._hasAncestorTag(node, "figure")) ||
2442
+ (!isList && weight < 25 && linkDensity > 0.2) ||
2443
+ (weight >= 25 && linkDensity > 0.5) ||
2444
+ (embedCount === 1 && contentLength < 75) ||
2445
+ embedCount > 1;
2446
+ // Allow simple lists of images to remain in pages
2447
+ if (isList && haveToRemove) {
2448
+ for (var x = 0; x < node.children.length; x++) {
2449
+ let child = node.children[x];
2450
+ // Don't filter in lists with li's that contain more than one child
2451
+ if (child.children.length > 1) {
2452
+ return haveToRemove;
2453
+ }
2454
+ }
2455
+ let li_count = node.getElementsByTagName("li").length;
2456
+ // Only allow the list to remain if every li contains an image
2457
+ if (img == li_count) {
2458
+ return false;
2459
+ }
2460
+ }
2461
+ return haveToRemove;
2462
+ }
2463
+ return false;
2464
+ });
2465
+ },
2466
+
2467
+ /**
2468
+ * Clean out elements that match the specified conditions
2469
+ *
2470
+ * @param Element
2471
+ * @param Function determines whether a node should be removed
2472
+ * @return void
2473
+ **/
2474
+ _cleanMatchedNodes: function (e, filter) {
2475
+ var endOfSearchMarkerNode = this._getNextNode(e, true);
2476
+ var next = this._getNextNode(e);
2477
+ while (next && next != endOfSearchMarkerNode) {
2478
+ if (filter.call(this, next, next.className + " " + next.id)) {
2479
+ next = this._removeAndGetNext(next);
2480
+ } else {
2481
+ next = this._getNextNode(next);
2482
+ }
2483
+ }
2484
+ },
2485
+
2486
+ /**
2487
+ * Clean out spurious headers from an Element.
2488
+ *
2489
+ * @param Element
2490
+ * @return void
2491
+ **/
2492
+ _cleanHeaders: function (e) {
2493
+ let headingNodes = this._getAllNodesWithTag(e, ["h1", "h2"]);
2494
+ this._removeNodes(headingNodes, function (node) {
2495
+ let shouldRemove = this._getClassWeight(node) < 0;
2496
+ if (shouldRemove) {
2497
+ this.log("Removing header with low class weight:", node);
2498
+ }
2499
+ return shouldRemove;
2500
+ });
2501
+ },
2502
+
2503
+ /**
2504
+ * Check if this node is an H1 or H2 element whose content is mostly
2505
+ * the same as the article title.
2506
+ *
2507
+ * @param Element the node to check.
2508
+ * @return boolean indicating whether this is a title-like header.
2509
+ */
2510
+ _headerDuplicatesTitle: function (node) {
2511
+ if (node.tagName != "H1" && node.tagName != "H2") {
2512
+ return false;
2513
+ }
2514
+ var heading = this._getInnerText(node, false);
2515
+ this.log("Evaluating similarity of header:", heading, this._articleTitle);
2516
+ return this._textSimilarity(this._articleTitle, heading) > 0.75;
2517
+ },
2518
+
2519
+ _flagIsActive: function (flag) {
2520
+ return (this._flags & flag) > 0;
2521
+ },
2522
+
2523
+ _removeFlag: function (flag) {
2524
+ this._flags = this._flags & ~flag;
2525
+ },
2526
+
2527
+ _isProbablyVisible: function (node) {
2528
+ // Have to null-check node.style and node.className.indexOf to deal with SVG and MathML nodes.
2529
+ return (
2530
+ (!node.style || node.style.display != "none") &&
2531
+ !node.hasAttribute("hidden") &&
2532
+ //check for "fallback-image" so that wikimedia math images are displayed
2533
+ (!node.hasAttribute("aria-hidden") ||
2534
+ node.getAttribute("aria-hidden") != "true" ||
2535
+ (node.className &&
2536
+ node.className.indexOf &&
2537
+ node.className.indexOf("fallback-image") !== -1))
2538
+ );
2539
+ },
2540
+
2541
+ /**
2542
+ * Runs readability.
2543
+ *
2544
+ * Workflow:
2545
+ * 1. Prep the document by removing script tags, css, etc.
2546
+ * 2. Build readability's DOM tree.
2547
+ * 3. Grab the article content from the current dom tree.
2548
+ * 4. Replace the current DOM tree with the new one.
2549
+ * 5. Read peacefully.
2550
+ *
2551
+ * @return void
2552
+ **/
2553
+ parse: function () {
2554
+ // Avoid parsing too large documents, as per configuration option
2555
+ if (this._maxElemsToParse > 0) {
2556
+ var numTags = this._doc.getElementsByTagName("*").length;
2557
+ if (numTags > this._maxElemsToParse) {
2558
+ throw new Error(
2559
+ "Aborting parsing document; " + numTags + " elements found",
2560
+ );
2561
+ }
2562
+ }
2563
+
2564
+ // Unwrap image from noscript
2565
+ this._unwrapNoscriptImages(this._doc);
2566
+
2567
+ // Extract JSON-LD metadata before removing scripts
2568
+ var jsonLd = this._disableJSONLD ? {} : this._getJSONLD(this._doc);
2569
+
2570
+ // Remove script tags from the document.
2571
+ this._removeScripts(this._doc);
2572
+
2573
+ this._prepDocument();
2574
+
2575
+ var metadata = this._getArticleMetadata(jsonLd);
2576
+ this._articleTitle = metadata.title;
2577
+
2578
+ var articleContent = this._grabArticle();
2579
+ if (!articleContent) return null;
2580
+
2581
+ this.log("Grabbed: " + articleContent.innerHTML);
2582
+
2583
+ this._postProcessContent(articleContent);
2584
+
2585
+ // If we haven't found an excerpt in the article's metadata, use the article's
2586
+ // first paragraph as the excerpt. This is used for displaying a preview of
2587
+ // the article's content.
2588
+ if (!metadata.excerpt) {
2589
+ var paragraphs = articleContent.getElementsByTagName("p");
2590
+ if (paragraphs.length > 0) {
2591
+ metadata.excerpt = paragraphs[0].textContent.trim();
2592
+ }
2593
+ }
2594
+
2595
+ var textContent = articleContent.textContent;
2596
+ return {
2597
+ title: this._articleTitle,
2598
+ byline: metadata.byline || this._articleByline,
2599
+ dir: this._articleDir,
2600
+ lang: this._articleLang,
2601
+ content: this._serializer(articleContent),
2602
+ textContent: textContent,
2603
+ length: textContent.length,
2604
+ excerpt: metadata.excerpt,
2605
+ siteName: metadata.siteName || this._articleSiteName,
2606
+ };
2607
+ },
2608
+ };
2609
+
2610
+ if (typeof module === "object") {
2611
+ /* global module */
2612
+ module.exports = Readability;
2613
+ }
llama_index/readers/web/readability_web/__init__.py ADDED
File without changes
llama_index/readers/web/readability_web/base.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unicodedata
2
+ from pathlib import Path
3
+ from typing import Any, Callable, Dict, List, Literal, Optional, cast
4
+
5
+ from llama_index.core.node_parser.interface import TextSplitter
6
+ from llama_index.core.readers.base import BaseReader
7
+ from llama_index.core.schema import Document
8
+
9
+ path = Path(__file__).parent / "Readability.js"
10
+
11
+
12
+ def nfkc_normalize(text: str) -> str:
13
+ return unicodedata.normalize("NFKC", text)
14
+
15
+
16
+ class ReadabilityWebPageReader(BaseReader):
17
+ """Readability Webpage Loader.
18
+
19
+ Extracting relevant information from a fully rendered web page.
20
+ During the processing, it is always assumed that web pages used as data sources contain textual content.
21
+
22
+ 1. Load the page and wait for it rendered. (playwright)
23
+ 2. Inject Readability.js to extract the main content.
24
+
25
+ Args:
26
+ proxy (Optional[str], optional): Proxy server. Defaults to None.
27
+ wait_until (Optional[Literal["commit", "domcontentloaded", "load", "networkidle"]], optional): Wait until the page is loaded. Defaults to "domcontentloaded".
28
+ text_splitter (TextSplitter, optional): Text splitter. Defaults to None.
29
+ normalizer (Optional[Callable[[str], str]], optional): Text normalizer. Defaults to nfkc_normalize.
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ proxy: Optional[str] = None,
35
+ wait_until: Optional[
36
+ Literal["commit", "domcontentloaded", "load", "networkidle"]
37
+ ] = "domcontentloaded",
38
+ text_splitter: Optional[TextSplitter] = None,
39
+ normalize: Optional[Callable[[str], str]] = nfkc_normalize,
40
+ ) -> None:
41
+ self._launch_options = {
42
+ "headless": True,
43
+ }
44
+ self._wait_until = wait_until
45
+ if proxy:
46
+ self._launch_options["proxy"] = {
47
+ "server": proxy,
48
+ }
49
+ self._text_splitter = text_splitter
50
+ self._normalize = normalize
51
+ self._readability_js = None
52
+
53
+ def load_data(self, url: str) -> List[Document]:
54
+ """Render and load data content from url.
55
+
56
+ Args:
57
+ url (str): URL to scrape.
58
+
59
+ Returns:
60
+ List[Document]: List of documents.
61
+
62
+ """
63
+ from playwright.sync_api import sync_playwright
64
+
65
+ with sync_playwright() as p:
66
+ browser = p.chromium.launch(**self._launch_options)
67
+
68
+ article = self.scrape_page(
69
+ browser,
70
+ url,
71
+ )
72
+ extra_info = {
73
+ key: article[key]
74
+ for key in [
75
+ "title",
76
+ "length",
77
+ "excerpt",
78
+ "byline",
79
+ "dir",
80
+ "lang",
81
+ "siteName",
82
+ ]
83
+ }
84
+
85
+ if self._normalize is not None:
86
+ article["textContent"] = self._normalize(article["textContent"])
87
+ texts = []
88
+ if self._text_splitter is not None:
89
+ texts = self._text_splitter.split_text(article["textContent"])
90
+ else:
91
+ texts = [article["textContent"]]
92
+
93
+ browser.close()
94
+
95
+ return [Document(text=x, extra_info=extra_info) for x in texts]
96
+
97
+ def scrape_page(
98
+ self,
99
+ browser: Any,
100
+ url: str,
101
+ ) -> Dict[str, str]:
102
+ """Scrape a single article url.
103
+
104
+ Args:
105
+ browser (Any): a Playwright Chromium browser.
106
+ url (str): URL of the article to scrape.
107
+
108
+ Returns:
109
+ Ref: https://github.com/mozilla/readability
110
+ title: article title;
111
+ content: HTML string of processed article content;
112
+ textContent: text content of the article, with all the HTML tags removed;
113
+ length: length of an article, in characters;
114
+ excerpt: article description, or short excerpt from the content;
115
+ byline: author metadata;
116
+ dir: content direction;
117
+ siteName: name of the site.
118
+ lang: content language
119
+
120
+ """
121
+ from playwright.sync_api._generated import Browser
122
+
123
+ if self._readability_js is None:
124
+ with open(path) as f:
125
+ self._readability_js = f.read()
126
+
127
+ inject_readability = f"""
128
+ (function(){{
129
+ {self._readability_js}
130
+ function executor() {{
131
+ return new Readability({{}}, document).parse();
132
+ }}
133
+ return executor();
134
+ }}())
135
+ """
136
+
137
+ browser = cast(Browser, browser)
138
+ page = browser.new_page(ignore_https_errors=True)
139
+ page.set_default_timeout(60000)
140
+ page.goto(url, wait_until=self._wait_until)
141
+
142
+ r = page.evaluate(inject_readability)
143
+
144
+ page.close()
145
+ print("scraped:", url)
146
+
147
+ return r
llama_index/readers/web/readability_web/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ playwright==1.30.0
llama_index/readers/web/rss/README.md ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # RSS Loader
2
+
3
+ This loader allows fetching text from an RSS feed. It uses the `feedparser` module
4
+ to fetch the feed and optionally the `html2text` module to sanitize it.
5
+
6
+ ## Usage
7
+
8
+ To use this loader, pass in an array of URL's.
9
+
10
+ ```python
11
+ from llama_index import download_loader
12
+
13
+ RssReader = download_loader("RssReader")
14
+
15
+ reader = RssReader()
16
+ documents = reader.load_data(
17
+ [
18
+ "https://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml",
19
+ "https://roelofjanelsinga.com/atom.xml",
20
+ ]
21
+ )
22
+ ```
llama_index/readers/web/rss/__init__.py ADDED
File without changes
llama_index/readers/web/rss/base.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Rss reader."""
2
+
3
+ from typing import List
4
+
5
+ from llama_index.core.readers.base import BasePydanticReader
6
+ from llama_index.core.schema import Document
7
+
8
+
9
+ class RssReader(BasePydanticReader):
10
+ """RSS reader.
11
+
12
+ Reads content from an RSS feed.
13
+
14
+ """
15
+
16
+ is_remote: bool = True
17
+ html_to_text: bool = False
18
+
19
+ @classmethod
20
+ def class_name(cls) -> str:
21
+ return "RssReader"
22
+
23
+ def load_data(self, urls: List[str]) -> List[Document]:
24
+ """Load data from RSS feeds.
25
+
26
+ Args:
27
+ urls (List[str]): List of RSS URLs to load.
28
+
29
+ Returns:
30
+ List[Document]: List of documents.
31
+
32
+ """
33
+ import feedparser
34
+
35
+ if not isinstance(urls, list):
36
+ raise ValueError("urls must be a list of strings.")
37
+
38
+ documents = []
39
+
40
+ for url in urls:
41
+ parsed = feedparser.parse(url)
42
+ for entry in parsed.entries:
43
+ doc_id = entry.id or entry.link
44
+ if "content" in entry:
45
+ data = entry.content[0].value
46
+ else:
47
+ data = entry.description or entry.summary
48
+
49
+ if self.html_to_text:
50
+ import html2text
51
+
52
+ data = html2text.html2text(data)
53
+
54
+ extra_info = {"title": entry.title, "link": entry.link}
55
+ documents.append(Document(text=data, id_=doc_id, extra_info=extra_info))
56
+
57
+ return documents
llama_index/readers/web/rss_news/README.md ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # RSS News Loader
2
+
3
+ This loader allows fetching text from an RSS feed. It uses the `feedparser` module
4
+ to fetch the feed and the `NewsArticleReader` to load each article.
5
+
6
+ ## Usage
7
+
8
+ To use this loader, pass in an array of URLs of RSS feeds. It will download the pages referenced in each feed and
9
+ combine them:
10
+
11
+ ```python
12
+ from llama_index.readers.web.rss_news import RSSNewsReader
13
+
14
+ urls = [
15
+ "https://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml",
16
+ "https://roelofjanelsinga.com/atom.xml",
17
+ ]
18
+
19
+ RSSNewsReader = download_loader("RSSNewsReader")
20
+ reader = RSSNewsReader()
21
+
22
+ documents = reader.load_data(urls=urls)
23
+ ```
24
+
25
+ Or OPML content:
26
+
27
+ ```python
28
+ with open("./sample_rss_feeds.opml", "r") as f:
29
+ documents = reader.load_data(opml=f.read())
30
+ ```
31
+
32
+ We can also pass in args for the NewsArticleLoader which parses each article:
33
+
34
+ ```python
35
+ documents = reader.load_data(urls=urls, nlp=True)
36
+ ```
llama_index/readers/web/rss_news/__init__.py ADDED
File without changes
llama_index/readers/web/rss_news/base.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """RSS feed reader for news - processes each article with NewsArticleReader."""
2
+ import logging
3
+ from typing import Any, List
4
+
5
+ from llama_index.core.readers.base import BaseReader
6
+ from llama_index.core.schema import Document
7
+ from llama_index.readers.web.news.base import NewsArticleReader
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ class RssNewsReader(BaseReader):
13
+ """RSS news reader.
14
+
15
+ Reads news content from RSS feeds and parses with NewsArticleReader.
16
+
17
+ """
18
+
19
+ def __init__(self, **reader_kwargs: Any) -> None:
20
+ """Initialize with parameters.
21
+
22
+ Args:
23
+ html_to_text (bool): Whether to convert HTML to text.
24
+ Requires `html2text` package.
25
+
26
+ """
27
+ try:
28
+ import feedparser # noqa: F401
29
+ except ImportError:
30
+ raise ImportError(
31
+ "`feedparser` package not found, please run `pip install feedparser`"
32
+ )
33
+
34
+ try:
35
+ import listparser # noqa: F401
36
+ except ImportError:
37
+ raise ImportError(
38
+ "`listparser` package not found, please run `pip install listparser`"
39
+ )
40
+
41
+ self.reader_kwargs = reader_kwargs
42
+
43
+ def load_data(self, urls: List[str] = None, opml: str = None) -> List[Document]:
44
+ """Load data from either RSS feeds or OPML.
45
+
46
+ Args:
47
+ urls (List[str]): List of RSS URLs to load.
48
+ opml (str): URL to OPML file or string or byte OPML content.
49
+
50
+ Returns:
51
+ List[Document]: List of documents.
52
+
53
+ """
54
+ if (urls is None) == (
55
+ opml is None
56
+ ): # This is True if both are None or neither is None
57
+ raise ValueError(
58
+ "Provide either the urls or the opml argument, but not both."
59
+ )
60
+
61
+ import feedparser
62
+
63
+ if urls and not isinstance(urls, list):
64
+ raise ValueError("urls must be a list of strings.")
65
+
66
+ documents = []
67
+
68
+ if not urls and opml:
69
+ try:
70
+ import listparser
71
+ except ImportError as e:
72
+ raise ImportError(
73
+ "Package listparser must be installed if the opml arg is used. "
74
+ "Please install with 'pip install listparser' or use the "
75
+ "urls arg instead."
76
+ ) from e
77
+ rss = listparser.parse(opml)
78
+ urls = [feed.url for feed in rss.feeds]
79
+
80
+ for url in urls:
81
+ try:
82
+ feed = feedparser.parse(url)
83
+ for i, entry in enumerate(feed.entries):
84
+ article = NewsArticleReader(**self.reader_kwargs).load_data(
85
+ urls=[entry.link],
86
+ )[0]
87
+ article.metadata["feed"] = url
88
+
89
+ documents.append(
90
+ Document(text=article.text, metadata=article.metadata)
91
+ )
92
+
93
+ except Exception as e:
94
+ logger.error(f"Error fetching or processing {url}, exception: {e}")
95
+ continue
96
+
97
+ return documents
98
+
99
+
100
+ if __name__ == "__main__":
101
+ reader = RssNewsReader()
102
+ logger.info(reader.load_data(urls=["https://www.engadget.com/rss.xml"]))
103
+
104
+ # Generate keywords and summary for each article
105
+ reader = RssNewsReader(use_nlp=True)
106
+ logger.info(reader.load_data(urls=["https://www.engadget.com/rss.xml"]))
llama_index/readers/web/rss_news/sample_rss_feeds.opml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+
3
+ <opml version="1.0">
4
+ <head>
5
+ <title>Sample RSS feed subscriptions</title>
6
+ </head>
7
+ <body>
8
+ <outline text="Tech" title="Tech">
9
+ <outline type="rss" text="Engadget" title="Engadget" xmlUrl="http://www.engadget.com/rss-full.xml" htmlUrl="http://www.engadget.com"/>
10
+ <outline type="rss" text="Ars Technica - All content" title="Ars Technica - All content" xmlUrl="http://feeds.arstechnica.com/arstechnica/index/" htmlUrl="https://arstechnica.com"/>
11
+ </outline>
12
+ </body>
13
+ </opml>
llama_index/readers/web/simple_web/README.md ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Simple Website Loader
2
+
3
+ This loader is a simple web scraper that fetches the text from static websites by converting the HTML to text.
4
+
5
+ ## Usage
6
+
7
+ To use this loader, you need to pass in an array of URLs.
8
+
9
+ ```python
10
+ from llama_index import download_loader
11
+
12
+ SimpleWebPageReader = download_loader("SimpleWebPageReader")
13
+
14
+ loader = SimpleWebPageReader()
15
+ documents = loader.load_data(urls=["https://google.com"])
16
+ ```
17
+
18
+ ## Examples
19
+
20
+ This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent.
21
+
22
+ ### LlamaIndex
23
+
24
+ ```python
25
+ from llama_index import VectorStoreIndex, download_loader
26
+
27
+ SimpleWebPageReader = download_loader("SimpleWebPageReader")
28
+
29
+ loader = SimpleWebPageReader()
30
+ documents = loader.load_data(urls=["https://google.com"])
31
+ index = VectorStoreIndex.from_documents(documents)
32
+ index.query("What language is on this website?")
33
+ ```
34
+
35
+ ### LangChain
36
+
37
+ Note: Make sure you change the description of the `Tool` to match your use-case.
38
+
39
+ ```python
40
+ from llama_index import VectorStoreIndex, download_loader
41
+ from langchain.agents import initialize_agent, Tool
42
+ from langchain.llms import OpenAI
43
+ from langchain.chains.conversation.memory import ConversationBufferMemory
44
+
45
+ SimpleWebPageReader = download_loader("SimpleWebPageReader")
46
+
47
+ loader = SimpleWebPageReader()
48
+ documents = loader.load_data(urls=["https://google.com"])
49
+ index = VectorStoreIndex.from_documents(documents)
50
+
51
+ tools = [
52
+ Tool(
53
+ name="Website Index",
54
+ func=lambda q: index.query(q),
55
+ description=f"Useful when you want answer questions about the text on websites.",
56
+ ),
57
+ ]
58
+ llm = OpenAI(temperature=0)
59
+ memory = ConversationBufferMemory(memory_key="chat_history")
60
+ agent_chain = initialize_agent(
61
+ tools, llm, agent="zero-shot-react-description", memory=memory
62
+ )
63
+
64
+ output = agent_chain.run(input="What language is on this website?")
65
+ ```
llama_index/readers/web/simple_web/__init__.py ADDED
File without changes
llama_index/readers/web/simple_web/base.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Simple Web scraper."""
2
+ from typing import List, Optional, Dict, Callable
3
+
4
+ import requests
5
+
6
+ from llama_index.core.bridge.pydantic import PrivateAttr
7
+ from llama_index.core.readers.base import BasePydanticReader
8
+ from llama_index.core.schema import Document
9
+
10
+
11
+ class SimpleWebPageReader(BasePydanticReader):
12
+ """Simple web page reader.
13
+
14
+ Reads pages from the web.
15
+
16
+ Args:
17
+ html_to_text (bool): Whether to convert HTML to text.
18
+ Requires `html2text` package.
19
+ metadata_fn (Optional[Callable[[str], Dict]]): A function that takes in
20
+ a URL and returns a dictionary of metadata.
21
+ Default is None.
22
+ """
23
+
24
+ is_remote: bool = True
25
+ html_to_text: bool
26
+
27
+ _metadata_fn: Optional[Callable[[str], Dict]] = PrivateAttr()
28
+
29
+ def __init__(
30
+ self,
31
+ html_to_text: bool = False,
32
+ metadata_fn: Optional[Callable[[str], Dict]] = None,
33
+ ) -> None:
34
+ """Initialize with parameters."""
35
+ try:
36
+ import html2text # noqa
37
+ except ImportError:
38
+ raise ImportError(
39
+ "`html2text` package not found, please run `pip install html2text`"
40
+ )
41
+ self._metadata_fn = metadata_fn
42
+ super().__init__(html_to_text=html_to_text)
43
+
44
+ @classmethod
45
+ def class_name(cls) -> str:
46
+ return "SimpleWebPageReader"
47
+
48
+ def load_data(self, urls: List[str]) -> List[Document]:
49
+ """Load data from the input directory.
50
+
51
+ Args:
52
+ urls (List[str]): List of URLs to scrape.
53
+
54
+ Returns:
55
+ List[Document]: List of documents.
56
+
57
+ """
58
+ if not isinstance(urls, list):
59
+ raise ValueError("urls must be a list of strings.")
60
+ documents = []
61
+ for url in urls:
62
+ response = requests.get(url, headers=None).text
63
+ if self.html_to_text:
64
+ import html2text
65
+
66
+ response = html2text.html2text(response)
67
+
68
+ metadata: Optional[Dict] = None
69
+ if self._metadata_fn is not None:
70
+ metadata = self._metadata_fn(url)
71
+
72
+ documents.append(Document(text=response, id_=url, metadata=metadata or {}))
73
+
74
+ return documents
llama_index/readers/web/simple_web/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ html2text
llama_index/readers/web/sitemap/README.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Sitemap Loader
2
+
3
+ This loader is an asynchronous web scraper that fetches the text from static websites by using its sitemap and optionally converting the HTML to text.
4
+
5
+ It is based on the [Async Website Loader](https://llama-hub-ui.vercel.app/l/web-async_web)
6
+
7
+ ## Usage
8
+
9
+ To use this loader, you just declare the sitemap.xml url like this:
10
+
11
+ ```python
12
+ from llama_index.readers.web.sitemap import SitemapReader
13
+
14
+ # for jupyter notebooks uncomment the following two lines of code:
15
+ # import nest_asyncio
16
+ # nest_asyncio.apply()
17
+
18
+ loader = SitemapReader()
19
+ documents = loader.load_data(
20
+ sitemap_url="https://gpt-index.readthedocs.io/sitemap.xml"
21
+ )
22
+ ```
23
+
24
+ Be sure that the sitemap_url contains a proper [Sitemap](https://www.sitemaps.org/protocol.html)
25
+
26
+ ## Filter option
27
+
28
+ You can filter locations from the sitemap that are actually being crawled by adding the _filter_ argument to the load_data method
29
+
30
+ ```python
31
+ documents = loader.load_data(
32
+ sitemap_url="https://gpt-index.readthedocs.io/sitemap.xml",
33
+ filter="https://gpt-index.readthedocs.io/en/latest/",
34
+ )
35
+ ```
36
+
37
+ ## Issues Jupyter Notebooks asyncio
38
+
39
+ If you get a `RuntimeError: asyncio.run() cannot be called from a running event loop` you might be interested in this (solution here)[https://saturncloud.io/blog/asynciorun-cannot-be-called-from-a-running-event-loop-a-guide-for-data-scientists-using-jupyter-notebook/#option-3-use-nest_asyncio]
40
+
41
+ ### Old Usage
42
+
43
+ use this syntax for earlier versions of llama_index where llama_hub loaders where loaded via separate download process:
44
+
45
+ ```python
46
+ from llama_index import download_loader
47
+
48
+ SitemapReader = download_loader("SitemapReader")
49
+
50
+ loader = SitemapReader()
51
+ documents = loader.load_data(
52
+ sitemap_url="https://gpt-index.readthedocs.io/sitemap.xml"
53
+ )
54
+ ```
llama_index/readers/web/sitemap/__init__.py ADDED
File without changes
llama_index/readers/web/sitemap/base.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import urllib.request
2
+ import xml.etree.ElementTree as ET
3
+ from typing import List
4
+
5
+ from llama_index.core.readers.base import BaseReader
6
+ from llama_index.core.schema import Document
7
+ from llama_index.readers.web.async_web.base import AsyncWebPageReader
8
+
9
+
10
+ class SitemapReader(BaseReader):
11
+ """Asynchronous sitemap reader for web.
12
+
13
+ Reads pages from the web based on their sitemap.xml.
14
+
15
+ Args:
16
+ sitemap_url (string): Path to the sitemap.xml. e.g. https://gpt-index.readthedocs.io/sitemap.xml
17
+ html_to_text (bool): Whether to convert HTML to text.
18
+ Requires `html2text` package.
19
+ limit (int): Maximum number of concurrent requests.
20
+
21
+ """
22
+
23
+ xml_schema_sitemap = "http://www.sitemaps.org/schemas/sitemap/0.9"
24
+
25
+ def __init__(self, html_to_text: bool = False, limit: int = 10) -> None:
26
+ """Initialize with parameters."""
27
+ self._async_loader = AsyncWebPageReader(html_to_text=html_to_text, limit=limit)
28
+ self._html_to_text = html_to_text
29
+ self._limit = limit
30
+
31
+ def _load_sitemap(self, sitemap_url: str) -> str:
32
+ sitemap_url_request = urllib.request.urlopen(sitemap_url)
33
+
34
+ return sitemap_url_request.read()
35
+
36
+ def _parse_sitemap(self, raw_sitemap: str, filter_locs: str = None) -> list:
37
+ sitemap = ET.fromstring(raw_sitemap)
38
+ sitemap_urls = []
39
+
40
+ for url in sitemap.findall(f"{{{self.xml_schema_sitemap}}}url"):
41
+ location = url.find(f"{{{self.xml_schema_sitemap}}}loc").text
42
+
43
+ if filter_locs is None or filter_locs in location:
44
+ sitemap_urls.append(location)
45
+
46
+ return sitemap_urls
47
+
48
+ def load_data(self, sitemap_url: str, filter: str = None) -> List[Document]:
49
+ sitemap = self._load_sitemap(sitemap_url=sitemap_url)
50
+ sitemap_urls = self._parse_sitemap(sitemap, filter)
51
+
52
+ return self._async_loader.load_data(urls=sitemap_urls)
llama_index/readers/web/sitemap/requirements.txt ADDED
File without changes