diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..368c4d1268055150e695d4e4d745a26933971c24 --- /dev/null +++ b/.gitignore @@ -0,0 +1,168 @@ +# Everything in local +local/* +out/* + +!.gitkeep + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +.idea/ + +# VsCode +.vscode/ diff --git a/README.md b/README.md index eb08c10f82772830e8e36296bbde0acc5a08463f..b57423d11ad8e4fb59d0d76a1eb9d22451f9df0e 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ --- -title: Hack -emoji: 📈 -colorFrom: green -colorTo: blue +title: Rizoa Auchan Gaia +emoji: 🔥 +colorFrom: yellow +colorTo: gray sdk: gradio sdk_version: 4.19.2 app_file: app.py diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..fdb562b594c334138bc2a1f7c3b0ffaa345181ee --- /dev/null +++ b/app.py @@ -0,0 +1,126 @@ +import os +import json +import gradio as gr +from llama_index.core import ( + VectorStoreIndex, + download_loader, + StorageContext +) +from dotenv import load_dotenv, find_dotenv + +import chromadb + +from llama_index.llms.mistralai import MistralAI +from llama_index.embeddings.mistralai import MistralAIEmbedding +from llama_index.vector_stores.chroma import ChromaVectorStore +from llama_index.core.indices.service_context import ServiceContext + +TITLE = "RIZOA-AUCHAN Chatbot Demo" +DESCRIPTION = "Example of an assistant with Gradio, coupling with function calling and Mistral AI via its API" +PLACEHOLDER = ( + "Vous pouvez me posez une question sur ce contexte, appuyer sur Entrée pour valider" +) +PLACEHOLDER_URL = "Extract text from this url" +llm_model = "mistral-medium" + +load_dotenv() +env_api_key = os.environ.get("MISTRAL_API_KEY") +query_engine = None + +# Define LLMs +llm = MistralAI(api_key=env_api_key, model=llm_model) +embed_model = MistralAIEmbedding(model_name="mistral-embed", api_key=env_api_key) + +# create client and a new collection +db = chromadb.PersistentClient(path="./chroma_db") +chroma_collection = db.get_or_create_collection("quickstart") + +# set up ChromaVectorStore and load in data +vector_store = ChromaVectorStore(chroma_collection=chroma_collection) +storage_context = StorageContext.from_defaults(vector_store=vector_store) +service_context = ServiceContext.from_defaults( + chunk_size=1024, llm=llm, embed_model=embed_model +) + +PDFReader = download_loader("PDFReader") +loader = PDFReader() + +index = VectorStoreIndex( + [], service_context=service_context, storage_context=storage_context +) +query_engine = index.as_query_engine(similarity_top_k=5) + +with gr.Blocks() as demo: + with gr.Row(): + with gr.Column(scale=1): + gr.Image(value=".\img\logo_rizoa_auchan.jpg", + height=250, + width=250, + container=False, + show_download_button=False + ) + with gr.Column(scale=4): + gr.Markdown( + """ + # Bienvenue au Chatbot FAIR-PLAI + + Ce chatbot est un assistant numérique, médiateur des vendeurs-acheteurs + """ + ) + + # gr.Markdown(""" ### 1 / Extract data from PDF """) + + # with gr.Row(): + # with gr.Column(): + # input_file = gr.File( + # label="Load a pdf", + # file_types=[".pdf"], + # file_count="single", + # type="filepath", + # interactive=True, + # ) + # file_msg = gr.Textbox( + # label="Loaded documents:", container=False, visible=False + # ) + + # input_file.upload( + # fn=load_document, + # inputs=[ + # input_file, + # ], + # outputs=[file_msg], + # concurrency_limit=20, + # ) + + # file_btn = gr.Button(value="Encode file ✅", interactive=True) + # btn_msg = gr.Textbox(container=False, visible=False) + + # with gr.Row(): + # db_list = gr.Markdown(value=get_documents_in_db) + # delete_btn = gr.Button(value="Empty db 🗑️", interactive=True, scale=0) + + # file_btn.click( + # load_file, + # inputs=[input_file], + # outputs=[file_msg, btn_msg, db_list], + # show_progress="full", + # ) + # delete_btn.click(empty_db, outputs=[db_list], show_progress="minimal") + + gr.Markdown(""" ### Ask a question """) + + chatbot = gr.Chatbot() + msg = gr.Textbox(placeholder=PLACEHOLDER) + clear = gr.ClearButton([msg, chatbot]) + + def respond(message, chat_history): + response = query_engine.query(message) + chat_history.append((message, str(response))) + return chat_history + + msg.submit(respond, [msg, chatbot], [chatbot]) + +demo.title = TITLE + +if __name__ == "__main__": + demo.launch() diff --git a/chroma_db/chroma.sqlite3 b/chroma_db/chroma.sqlite3 new file mode 100644 index 0000000000000000000000000000000000000000..09937d9cc5bd1ff09548730e1dc9d24644d5e859 Binary files /dev/null and b/chroma_db/chroma.sqlite3 differ diff --git a/img/logo_rizoa_auchan.jpg b/img/logo_rizoa_auchan.jpg new file mode 100644 index 0000000000000000000000000000000000000000..31c348eabea8c70125dc5092c2a5f370218cfa88 Binary files /dev/null and b/img/logo_rizoa_auchan.jpg differ diff --git a/llama_index/embeddings/mistralai/__init__.py b/llama_index/embeddings/mistralai/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a91d4b0cae0187123a17360437acdfca09b40f60 --- /dev/null +++ b/llama_index/embeddings/mistralai/__init__.py @@ -0,0 +1,3 @@ +from llama_index.embeddings.mistralai.base import MistralAIEmbedding + +__all__ = ["MistralAIEmbedding"] diff --git a/llama_index/embeddings/mistralai/base.py b/llama_index/embeddings/mistralai/base.py new file mode 100644 index 0000000000000000000000000000000000000000..80c471c7bad816347d9e64ad1e86d1f95cc2384a --- /dev/null +++ b/llama_index/embeddings/mistralai/base.py @@ -0,0 +1,111 @@ +"""MistralAI embeddings file.""" + +from typing import Any, List, Optional + +from llama_index.core.base.embeddings.base import ( + DEFAULT_EMBED_BATCH_SIZE, + BaseEmbedding, +) +from llama_index.core.bridge.pydantic import PrivateAttr +from llama_index.core.callbacks.base import CallbackManager +from llama_index.core.base.llms.generic_utils import get_from_param_or_env + +from mistralai.async_client import MistralAsyncClient +from mistralai.client import MistralClient + + +class MistralAIEmbedding(BaseEmbedding): + """Class for MistralAI embeddings. + + Args: + model_name (str): Model for embedding. + Defaults to "mistral-embed". + + api_key (Optional[str]): API key to access the model. Defaults to None. + """ + + # Instance variables initialized via Pydantic's mechanism + _mistralai_client: Any = PrivateAttr() + _mistralai_async_client: Any = PrivateAttr() + + def __init__( + self, + model_name: str = "mistral-embed", + api_key: Optional[str] = None, + embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE, + callback_manager: Optional[CallbackManager] = None, + **kwargs: Any, + ): + api_key = get_from_param_or_env("api_key", api_key, "MISTRAL_API_KEY", "") + + if not api_key: + raise ValueError( + "You must provide an API key to use mistralai. " + "You can either pass it in as an argument or set it `MISTRAL_API_KEY`." + ) + self._mistralai_client = MistralClient(api_key=api_key) + self._mistralai_async_client = MistralAsyncClient(api_key=api_key) + super().__init__( + model_name=model_name, + embed_batch_size=embed_batch_size, + callback_manager=callback_manager, + **kwargs, + ) + + @classmethod + def class_name(cls) -> str: + return "MistralAIEmbedding" + + def _get_query_embedding(self, query: str) -> List[float]: + """Get query embedding.""" + return ( + self._mistralai_client.embeddings(model=self.model_name, input=[query]) + .data[0] + .embedding + ) + + async def _aget_query_embedding(self, query: str) -> List[float]: + """The asynchronous version of _get_query_embedding.""" + return ( + ( + await self._mistralai_async_client.embeddings( + model=self.model_name, input=[query] + ) + ) + .data[0] + .embedding + ) + + def _get_text_embedding(self, text: str) -> List[float]: + """Get text embedding.""" + return ( + self._mistralai_client.embeddings(model=self.model_name, input=[text]) + .data[0] + .embedding + ) + + async def _aget_text_embedding(self, text: str) -> List[float]: + """Asynchronously get text embedding.""" + return ( + ( + await self._mistralai_async_client.embeddings( + model=self.model_name, input=[text] + ) + ) + .data[0] + .embedding + ) + + def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]: + """Get text embeddings.""" + embedding_response = self._mistralai_client.embeddings( + model=self.model_name, input=texts + ).data + return [embed.embedding for embed in embedding_response] + + async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]: + """Asynchronously get text embeddings.""" + embedding_response = await self._mistralai_async_client.embeddings( + model=self.model_name, input=texts + ) + return [embed.embedding for embed in embedding_response.data] diff --git a/llama_index/llms/mistralai/__init__.py b/llama_index/llms/mistralai/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1c39dc0b926008632a5881c2869572a779e914fb --- /dev/null +++ b/llama_index/llms/mistralai/__init__.py @@ -0,0 +1,3 @@ +from llama_index.llms.mistralai.base import MistralAI + +__all__ = ["MistralAI"] diff --git a/llama_index/llms/mistralai/base.py b/llama_index/llms/mistralai/base.py new file mode 100644 index 0000000000000000000000000000000000000000..05977726e8d1af562d72ca177253092626020542 --- /dev/null +++ b/llama_index/llms/mistralai/base.py @@ -0,0 +1,297 @@ +from typing import Any, Callable, Dict, Optional, Sequence + +# from mistralai.models.chat_completion import ChatMessage +from llama_index.core.base.llms.types import ( + ChatMessage, + ChatResponse, + ChatResponseAsyncGen, + ChatResponseGen, + CompletionResponse, + CompletionResponseAsyncGen, + CompletionResponseGen, + LLMMetadata, + MessageRole, +) +from llama_index.core.bridge.pydantic import Field, PrivateAttr +from llama_index.core.callbacks import CallbackManager +from llama_index.core.constants import DEFAULT_TEMPERATURE +from llama_index.core.llms.callbacks import ( + llm_chat_callback, + llm_completion_callback, +) +from llama_index.core.base.llms.generic_utils import ( + achat_to_completion_decorator, + astream_chat_to_completion_decorator, + chat_to_completion_decorator, + get_from_param_or_env, + stream_chat_to_completion_decorator, +) +from llama_index.core.llms.llm import LLM +from llama_index.core.types import BaseOutputParser, PydanticProgramMode +from llama_index.llms.mistralai.utils import ( + mistralai_modelname_to_contextsize, +) + +from mistralai.async_client import MistralAsyncClient +from mistralai.client import MistralClient + +DEFAULT_MISTRALAI_MODEL = "mistral-tiny" +DEFAULT_MISTRALAI_ENDPOINT = "https://api.mistral.ai" +DEFAULT_MISTRALAI_MAX_TOKENS = 512 + + +class MistralAI(LLM): + model: str = Field( + default=DEFAULT_MISTRALAI_MODEL, description="The mistralai model to use." + ) + temperature: float = Field( + default=DEFAULT_TEMPERATURE, + description="The temperature to use for sampling.", + gte=0.0, + lte=1.0, + ) + max_tokens: int = Field( + default=DEFAULT_MISTRALAI_MAX_TOKENS, + description="The maximum number of tokens to generate.", + gt=0, + ) + + timeout: float = Field( + default=120, description="The timeout to use in seconds.", gte=0 + ) + max_retries: int = Field( + default=5, description="The maximum number of API retries.", gte=0 + ) + safe_mode: bool = Field( + default=False, + description="The parameter to enforce guardrails in chat generations.", + ) + random_seed: str = Field( + default=None, description="The random seed to use for sampling." + ) + additional_kwargs: Dict[str, Any] = Field( + default_factory=dict, description="Additional kwargs for the MistralAI API." + ) + + _client: Any = PrivateAttr() + _aclient: Any = PrivateAttr() + + def __init__( + self, + model: str = DEFAULT_MISTRALAI_MODEL, + temperature: float = DEFAULT_TEMPERATURE, + max_tokens: int = DEFAULT_MISTRALAI_MAX_TOKENS, + timeout: int = 120, + max_retries: int = 5, + safe_mode: bool = False, + random_seed: Optional[int] = None, + api_key: Optional[str] = None, + additional_kwargs: Optional[Dict[str, Any]] = None, + callback_manager: Optional[CallbackManager] = None, + system_prompt: Optional[str] = None, + messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, + completion_to_prompt: Optional[Callable[[str], str]] = None, + pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, + output_parser: Optional[BaseOutputParser] = None, + ) -> None: + additional_kwargs = additional_kwargs or {} + callback_manager = callback_manager or CallbackManager([]) + + api_key = get_from_param_or_env("api_key", api_key, "MISTRAL_API_KEY", "") + + if not api_key: + raise ValueError( + "You must provide an API key to use mistralai. " + "You can either pass it in as an argument or set it `MISTRAL_API_KEY`." + ) + + self._client = MistralClient( + api_key=api_key, + endpoint=DEFAULT_MISTRALAI_ENDPOINT, + timeout=timeout, + max_retries=max_retries, + ) + self._aclient = MistralAsyncClient( + api_key=api_key, + endpoint=DEFAULT_MISTRALAI_ENDPOINT, + timeout=timeout, + max_retries=max_retries, + ) + + super().__init__( + temperature=temperature, + max_tokens=max_tokens, + additional_kwargs=additional_kwargs, + timeout=timeout, + max_retries=max_retries, + safe_mode=safe_mode, + random_seed=random_seed, + model=model, + callback_manager=callback_manager, + system_prompt=system_prompt, + messages_to_prompt=messages_to_prompt, + completion_to_prompt=completion_to_prompt, + pydantic_program_mode=pydantic_program_mode, + output_parser=output_parser, + ) + + @classmethod + def class_name(cls) -> str: + return "MistralAI_LLM" + + @property + def metadata(self) -> LLMMetadata: + return LLMMetadata( + context_window=mistralai_modelname_to_contextsize(self.model), + num_output=self.max_tokens, + is_chat_model=True, + model_name=self.model, + safe_mode=self.safe_mode, + random_seed=self.random_seed, + ) + + @property + def _model_kwargs(self) -> Dict[str, Any]: + base_kwargs = { + "model": self.model, + "temperature": self.temperature, + "max_tokens": self.max_tokens, + "random_seed": self.random_seed, + "safe_mode": self.safe_mode, + } + return { + **base_kwargs, + **self.additional_kwargs, + } + + def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: + return { + **self._model_kwargs, + **kwargs, + } + + @llm_chat_callback() + def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: + # convert messages to mistral ChatMessage + from mistralai.client import ChatMessage as mistral_chatmessage + + messages = [ + mistral_chatmessage(role=x.role, content=x.content) for x in messages + ] + all_kwargs = self._get_all_kwargs(**kwargs) + response = self._client.chat(messages=messages, **all_kwargs) + return ChatResponse( + message=ChatMessage( + role=MessageRole.ASSISTANT, content=response.choices[0].message.content + ), + raw=dict(response), + ) + + @llm_completion_callback() + def complete( + self, prompt: str, formatted: bool = False, **kwargs: Any + ) -> CompletionResponse: + complete_fn = chat_to_completion_decorator(self.chat) + return complete_fn(prompt, **kwargs) + + @llm_chat_callback() + def stream_chat( + self, messages: Sequence[ChatMessage], **kwargs: Any + ) -> ChatResponseGen: + # convert messages to mistral ChatMessage + from mistralai.client import ChatMessage as mistral_chatmessage + + messages = [ + mistral_chatmessage(role=message.role, content=message.content) + for message in messages + ] + all_kwargs = self._get_all_kwargs(**kwargs) + + response = self._client.chat_stream(messages=messages, **all_kwargs) + + def gen() -> ChatResponseGen: + content = "" + role = MessageRole.ASSISTANT + for chunk in response: + content_delta = chunk.choices[0].delta.content + if content_delta is None: + continue + content += content_delta + yield ChatResponse( + message=ChatMessage(role=role, content=content), + delta=content_delta, + raw=chunk, + ) + + return gen() + + @llm_completion_callback() + def stream_complete( + self, prompt: str, formatted: bool = False, **kwargs: Any + ) -> CompletionResponseGen: + stream_complete_fn = stream_chat_to_completion_decorator(self.stream_chat) + return stream_complete_fn(prompt, **kwargs) + + @llm_chat_callback() + async def achat( + self, messages: Sequence[ChatMessage], **kwargs: Any + ) -> ChatResponse: + # convert messages to mistral ChatMessage + from mistralai.client import ChatMessage as mistral_chatmessage + + messages = [ + mistral_chatmessage(role=message.role, content=message.content) + for message in messages + ] + all_kwargs = self._get_all_kwargs(**kwargs) + response = await self._aclient.chat(messages=messages, **all_kwargs) + return ChatResponse( + message=ChatMessage( + role=MessageRole.ASSISTANT, content=response.choices[0].message.content + ), + raw=dict(response), + ) + + @llm_completion_callback() + async def acomplete( + self, prompt: str, formatted: bool = False, **kwargs: Any + ) -> CompletionResponse: + acomplete_fn = achat_to_completion_decorator(self.achat) + return await acomplete_fn(prompt, **kwargs) + + @llm_chat_callback() + async def astream_chat( + self, messages: Sequence[ChatMessage], **kwargs: Any + ) -> ChatResponseAsyncGen: + # convert messages to mistral ChatMessage + from mistralai.client import ChatMessage as mistral_chatmessage + + messages = [ + mistral_chatmessage(role=x.role, content=x.content) for x in messages + ] + all_kwargs = self._get_all_kwargs(**kwargs) + + response = await self._aclient.chat_stream(messages=messages, **all_kwargs) + + async def gen() -> ChatResponseAsyncGen: + content = "" + role = MessageRole.ASSISTANT + async for chunk in response: + content_delta = chunk.choices[0].delta.content + if content_delta is None: + continue + content += content_delta + yield ChatResponse( + message=ChatMessage(role=role, content=content), + delta=content_delta, + raw=chunk, + ) + + return gen() + + @llm_completion_callback() + async def astream_complete( + self, prompt: str, formatted: bool = False, **kwargs: Any + ) -> CompletionResponseAsyncGen: + astream_complete_fn = astream_chat_to_completion_decorator(self.astream_chat) + return await astream_complete_fn(prompt, **kwargs) diff --git a/llama_index/llms/mistralai/utils.py b/llama_index/llms/mistralai/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dece6b324c41b7bc37a1a371c3256b25e5b98301 --- /dev/null +++ b/llama_index/llms/mistralai/utils.py @@ -0,0 +1,17 @@ +from typing import Dict + +MISTRALAI_MODELS: Dict[str, int] = { + "mistral-tiny": 32000, + "mistral-small": 32000, + "mistral-medium": 32000, +} + + +def mistralai_modelname_to_contextsize(modelname: str) -> int: + if modelname not in MISTRALAI_MODELS: + raise ValueError( + f"Unknown model: {modelname}. Please provide a valid MistralAI model name." + "Known models are: " + ", ".join(MISTRALAI_MODELS.keys()) + ) + + return MISTRALAI_MODELS[modelname] diff --git a/llama_index/readers/web/__init__.py b/llama_index/readers/web/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f5270004f76383184a7c0d8de6b884967a21722f --- /dev/null +++ b/llama_index/readers/web/__init__.py @@ -0,0 +1,54 @@ +"""Init file.""" +from llama_index.readers.web.async_web.base import ( + AsyncWebPageReader, +) +from llama_index.readers.web.beautiful_soup_web.base import ( + BeautifulSoupWebReader, +) +from llama_index.readers.web.knowledge_base.base import ( + KnowledgeBaseWebReader, +) +from llama_index.readers.web.main_content_extractor.base import ( + MainContentExtractorReader, +) +from llama_index.readers.web.news.base import NewsArticleReader +from llama_index.readers.web.readability_web.base import ( + ReadabilityWebPageReader, +) +from llama_index.readers.web.rss.base import ( + RssReader, +) +from llama_index.readers.web.rss_news.base import ( + RssNewsReader, +) +from llama_index.readers.web.simple_web.base import ( + SimpleWebPageReader, +) +from llama_index.readers.web.sitemap.base import ( + SitemapReader, +) +from llama_index.readers.web.trafilatura_web.base import ( + TrafilaturaWebReader, +) +from llama_index.readers.web.unstructured_web.base import ( + UnstructuredURLLoader, +) +from llama_index.readers.web.whole_site.base import ( + WholeSiteReader, +) + +__all__ = [ + "AsyncWebPageReader", + "BeautifulSoupWebReader", + "KnowledgeBaseWebReader", + "MainContentExtractorReader", + "NewsArticleReader", + "ReadabilityWebPageReader", + "RssReader", + "RssNewsReader", + "SimpleWebPageReader", + "SitemapReader", + "TrafilaturaWebReader", + "UnstructuredURLLoader", + "WholeSiteReader", +] diff --git a/llama_index/readers/web/async_web/README.md b/llama_index/readers/web/async_web/README.md new file mode 100644 index 0000000000000000000000000000000000000000..44c8985196a367e4e394a91f231099b50ee33391 --- /dev/null +++ b/llama_index/readers/web/async_web/README.md @@ -0,0 +1,35 @@ +# Async Website Loader + +This loader is an asynchronous web scraper that fetches the text from static websites by converting the HTML to text. + +## Usage + +To use this loader, you need to pass in an array of URLs. + +```python +from llama_index.readers.web.async_web.base import AsyncWebPageReader + +# for jupyter notebooks uncomment the following two lines of code: +# import nest_asyncio +# nest_asyncio.apply() + +loader = AsyncWebPageReader() +documents = loader.load_data(urls=["https://google.com"]) +``` + +### Issues Jupyter Notebooks asyncio + +If you get a `RuntimeError: asyncio.run() cannot be called from a running event loop` you might be interested in this (solution here)[https://saturncloud.io/blog/asynciorun-cannot-be-called-from-a-running-event-loop-a-guide-for-data-scientists-using-jupyter-notebook/#option-3-use-nest_asyncio] + +### Old Usage + +use this syntax for earlier versions of llama_index where llama_hub loaders where loaded via separate download process: + +```python +from llama_index import download_loader + +AsyncWebPageReader = download_loader("AsyncWebPageReader") + +loader = AsyncWebPageReader() +documents = loader.load_data(urls=["https://google.com"]) +``` diff --git a/llama_index/readers/web/async_web/__init__.py b/llama_index/readers/web/async_web/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llama_index/readers/web/async_web/base.py b/llama_index/readers/web/async_web/base.py new file mode 100644 index 0000000000000000000000000000000000000000..9005b927a93d4f3c18b8f727dde2e55be6c2e56f --- /dev/null +++ b/llama_index/readers/web/async_web/base.py @@ -0,0 +1,115 @@ +import asyncio +import logging +from typing import List + +from llama_index.core.readers.base import BaseReader +from llama_index.core.schema import Document + +logger = logging.getLogger(__name__) + + +class AsyncWebPageReader(BaseReader): + """Asynchronous web page reader. + + Reads pages from the web asynchronously. + + Args: + html_to_text (bool): Whether to convert HTML to text. + Requires `html2text` package. + limit (int): Maximum number of concurrent requests. + dedupe (bool): to deduplicate urls if there is exact-match within given list + fail_on_error (bool): if requested url does not return status code 200 the routine will raise an ValueError + """ + + def __init__( + self, + html_to_text: bool = False, + limit: int = 10, + dedupe: bool = True, + fail_on_error: bool = False, + ) -> None: + """Initialize with parameters.""" + try: + import html2text # noqa: F401 + except ImportError: + raise ImportError( + "`html2text` package not found, please run `pip install html2text`" + ) + try: + import aiohttp # noqa: F401 + except ImportError: + raise ImportError( + "`aiohttp` package not found, please run `pip install aiohttp`" + ) + self._limit = limit + self._html_to_text = html_to_text + self._dedupe = dedupe + self._fail_on_error = fail_on_error + + def load_data(self, urls: List[str]) -> List[Document]: + """Load data from the input urls. + + Args: + urls (List[str]): List of URLs to scrape. + + Returns: + List[Document]: List of documents. + + """ + if self._dedupe: + urls = list(dict.fromkeys(urls)) + + import aiohttp + + def chunked_http_client(limit: int): + semaphore = asyncio.Semaphore(limit) + + async def http_get(url: str, session: aiohttp.ClientSession): + async with semaphore: + async with session.get(url) as response: + return response, await response.text() + + return http_get + + async def fetch_urls(urls: List[str]): + http_client = chunked_http_client(self._limit) + async with aiohttp.ClientSession() as session: + tasks = [http_client(url, session) for url in urls] + return await asyncio.gather(*tasks, return_exceptions=True) + + if not isinstance(urls, list): + raise ValueError("urls must be a list of strings.") + + documents = [] + responses = asyncio.run(fetch_urls(urls)) + + for i, response_tuple in enumerate(responses): + if not isinstance(response_tuple, tuple): + raise ValueError(f"One of the inputs is not a valid url: {urls[i]}") + + response, raw_page = response_tuple + + if response.status != 200: + logger.warning(f"error fetching page from {urls[i]}") + logger.info(response) + + if self._fail_on_error: + raise ValueError( + f"error fetching page from {urls[i]}. server returned status:" + f" {response.status} and response {raw_page}" + ) + + continue + + if self._html_to_text: + import html2text + + response_text = html2text.html2text(raw_page) + else: + response_text = raw_page + + documents.append( + Document(text=response_text, extra_info={"Source": str(response.url)}) + ) + + return documents diff --git a/llama_index/readers/web/beautiful_soup_web/README.md b/llama_index/readers/web/beautiful_soup_web/README.md new file mode 100644 index 0000000000000000000000000000000000000000..331cd5fce7661492bc15d455ca66d91a0e7a5441 --- /dev/null +++ b/llama_index/readers/web/beautiful_soup_web/README.md @@ -0,0 +1,92 @@ +# Beautiful Soup Website Loader + +This loader is a web scraper that fetches the text from websites using the `Beautiful Soup` (aka `bs4`) Python package. Furthermore, the flexibility of Beautiful Soup allows for custom templates that enable the loader to extract the desired text from specific website designs, such as Substack. Check out the code to see how to add your own. + +## Usage + +To use this loader, you need to pass in an array of URLs. + +```python +from llama_index import download_loader + +BeautifulSoupWebReader = download_loader("BeautifulSoupWebReader") + +loader = BeautifulSoupWebReader() +documents = loader.load_data(urls=["https://google.com"]) +``` + +You can also add your own specific website parsers in `base.py` that automatically get used for certain URLs. Alternatively, you may tell the loader to use a certain parser by passing in the `custom_hostname` argument. For reference, this is what the Beautiful Soup parser looks like for Substack sites: + +```python +def _substack_reader(soup: Any) -> Tuple[str, Dict[str, Any]]: + """Extract text from Substack blog post.""" + extra_info = { + "Title of this Substack post": soup.select_one( + "h1.post-title" + ).getText(), + "Subtitle": soup.select_one("h3.subtitle").getText(), + "Author": soup.select_one("span.byline-names").getText(), + } + text = soup.select_one("div.available-content").getText() + return text, extra_info +``` + +## Examples + +This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent. + +### LlamaIndex + +```python +from llama_index import VectorStoreIndex, download_loader + +BeautifulSoupWebReader = download_loader("BeautifulSoupWebReader") + +loader = BeautifulSoupWebReader() +documents = loader.load_data(urls=["https://google.com"]) +index = VectorStoreIndex.from_documents(documents) +index.query("What language is on this website?") +``` + +### LangChain + +Note: Make sure you change the description of the `Tool` to match your use-case. + +```python +from llama_index import VectorStoreIndex, download_loader +from langchain.agents import initialize_agent, Tool +from langchain.llms import OpenAI +from langchain.chains.conversation.memory import ConversationBufferMemory + +BeautifulSoupWebReader = download_loader("BeautifulSoupWebReader") + +loader = BeautifulSoupWebReader() +documents = loader.load_data(urls=["https://google.com"]) +index = VectorStoreIndex.from_documents(documents) + +tools = [ + Tool( + name="Website Index", + func=lambda q: index.query(q), + description=f"Useful when you want answer questions about the text on websites.", + ), +] +llm = OpenAI(temperature=0) +memory = ConversationBufferMemory(memory_key="chat_history") +agent_chain = initialize_agent( + tools, llm, agent="zero-shot-react-description", memory=memory +) + +output = agent_chain.run(input="What language is on this website?") +``` + +## Custom hostname example + +To use a custom hostname like readme.co, substack.com or any other commonly-used website template, you can pass in the `custom_hostname` argument to guarantee that a custom parser is used (if it exists). Check out the code to see which ones are currently implemented. + +```python +documents = loader.load_data( + urls=["https://langchain.readthedocs.io/en/latest/"], + custom_hostname="readthedocs.io", +) +``` diff --git a/llama_index/readers/web/beautiful_soup_web/__init__.py b/llama_index/readers/web/beautiful_soup_web/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llama_index/readers/web/beautiful_soup_web/base.py b/llama_index/readers/web/beautiful_soup_web/base.py new file mode 100644 index 0000000000000000000000000000000000000000..384ee2d1814768183245ad9fe4c2dca55001ecc9 --- /dev/null +++ b/llama_index/readers/web/beautiful_soup_web/base.py @@ -0,0 +1,206 @@ +"""Beautiful Soup Web scraper.""" + +import logging +from typing import Any, Callable, Dict, List, Optional, Tuple +from urllib.parse import urljoin + +from llama_index.core.bridge.pydantic import PrivateAttr +from llama_index.core.readers.base import BasePydanticReader +from llama_index.core.schema import Document + +logger = logging.getLogger(__name__) + + +def _substack_reader(soup: Any, **kwargs) -> Tuple[str, Dict[str, Any]]: + """Extract text from Substack blog post.""" + extra_info = { + "Title of this Substack post": soup.select_one("h1.post-title").getText(), + "Subtitle": soup.select_one("h3.subtitle").getText(), + "Author": soup.select_one("span.byline-names").getText(), + } + text = soup.select_one("div.available-content").getText() + return text, extra_info + + +def _readthedocs_reader(soup: Any, url: str, **kwargs) -> Tuple[str, Dict[str, Any]]: + """Extract text from a ReadTheDocs documentation site.""" + import requests + from bs4 import BeautifulSoup + + links = soup.find_all("a", {"class": "reference internal"}) + rtd_links = [] + + for link in links: + rtd_links.append(link["href"]) + for i in range(len(rtd_links)): + if not rtd_links[i].startswith("http"): + rtd_links[i] = urljoin(url, rtd_links[i]) + + texts = [] + for doc_link in rtd_links: + page_link = requests.get(doc_link) + soup = BeautifulSoup(page_link.text, "html.parser") + try: + text = soup.find(attrs={"role": "main"}).get_text() + + except IndexError: + text = None + if text: + texts.append("\n".join([t for t in text.split("\n") if t])) + return "\n".join(texts), {} + + +def _readmedocs_reader( + soup: Any, url: str, include_url_in_text: bool = True +) -> Tuple[str, Dict[str, Any]]: + """Extract text from a ReadMe documentation site.""" + import requests + from bs4 import BeautifulSoup + + links = soup.find_all("a") + docs_links = [link["href"] for link in links if "/docs/" in link["href"]] + docs_links = list(set(docs_links)) + for i in range(len(docs_links)): + if not docs_links[i].startswith("http"): + docs_links[i] = urljoin(url, docs_links[i]) + + texts = [] + for doc_link in docs_links: + page_link = requests.get(doc_link) + soup = BeautifulSoup(page_link.text, "html.parser") + try: + text = "" + for element in soup.find_all("article", {"id": "content"}): + for child in element.descendants: + if child.name == "a" and child.has_attr("href"): + if include_url_in_text: + url = child.get("href") + if url is not None and "edit" in url: + text += child.text + else: + text += ( + f"{child.text} (Reference url: {doc_link}{url}) " + ) + elif child.string and child.string.strip(): + text += child.string.strip() + " " + + except IndexError: + text = None + logger.error(f"Could not extract text from {doc_link}") + continue + texts.append("\n".join([t for t in text.split("\n") if t])) + return "\n".join(texts), {} + + +def _gitbook_reader( + soup: Any, url: str, include_url_in_text: bool = True +) -> Tuple[str, Dict[str, Any]]: + """Extract text from a ReadMe documentation site.""" + import requests + from bs4 import BeautifulSoup + + links = soup.find_all("a") + docs_links = [link["href"] for link in links if "/docs/" in link["href"]] + docs_links = list(set(docs_links)) + for i in range(len(docs_links)): + if not docs_links[i].startswith("http"): + docs_links[i] = urljoin(url, docs_links[i]) + + texts = [] + for doc_link in docs_links: + page_link = requests.get(doc_link) + soup = BeautifulSoup(page_link.text, "html.parser") + try: + text = "" + text = soup.find("main") + clean_text = clean_text = ", ".join([tag.get_text() for tag in text]) + except IndexError: + text = None + logger.error(f"Could not extract text from {doc_link}") + continue + texts.append(clean_text) + return "\n".join(texts), {} + + +DEFAULT_WEBSITE_EXTRACTOR: Dict[ + str, Callable[[Any, str], Tuple[str, Dict[str, Any]]] +] = { + "substack.com": _substack_reader, + "readthedocs.io": _readthedocs_reader, + "readme.com": _readmedocs_reader, + "gitbook.io": _gitbook_reader, +} + + +class BeautifulSoupWebReader(BasePydanticReader): + """BeautifulSoup web page reader. + + Reads pages from the web. + Requires the `bs4` and `urllib` packages. + + Args: + website_extractor (Optional[Dict[str, Callable]]): A mapping of website + hostname (e.g. google.com) to a function that specifies how to + extract text from the BeautifulSoup obj. See DEFAULT_WEBSITE_EXTRACTOR. + """ + + is_remote: bool = True + _website_extractor: Dict[str, Callable] = PrivateAttr() + + def __init__(self, website_extractor: Optional[Dict[str, Callable]] = None) -> None: + self._website_extractor = website_extractor or DEFAULT_WEBSITE_EXTRACTOR + super().__init__() + + @classmethod + def class_name(cls) -> str: + """Get the name identifier of the class.""" + return "BeautifulSoupWebReader" + + def load_data( + self, + urls: List[str], + custom_hostname: Optional[str] = None, + include_url_in_text: Optional[bool] = True, + ) -> List[Document]: + """Load data from the urls. + + Args: + urls (List[str]): List of URLs to scrape. + custom_hostname (Optional[str]): Force a certain hostname in the case + a website is displayed under custom URLs (e.g. Substack blogs) + include_url_in_text (Optional[bool]): Include the reference url in the text of the document + + Returns: + List[Document]: List of documents. + + """ + from urllib.parse import urlparse + + import requests + from bs4 import BeautifulSoup + + documents = [] + for url in urls: + try: + page = requests.get(url) + except Exception: + raise ValueError(f"One of the inputs is not a valid url: {url}") + + hostname = custom_hostname or urlparse(url).hostname or "" + + soup = BeautifulSoup(page.content, "html.parser") + + data = "" + extra_info = {"URL": url} + if hostname in self._website_extractor: + data, metadata = self._website_extractor[hostname]( + soup=soup, url=url, include_url_in_text=include_url_in_text + ) + extra_info.update(metadata) + + else: + data = soup.getText() + + documents.append(Document(text=data, id_=url, extra_info=extra_info)) + + return documents diff --git a/llama_index/readers/web/beautiful_soup_web/requirements.txt b/llama_index/readers/web/beautiful_soup_web/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..a7c46586a0b94d5b33b99e0c4e30956ed99daf8f --- /dev/null +++ b/llama_index/readers/web/beautiful_soup_web/requirements.txt @@ -0,0 +1,3 @@ +beautifulsoup4 +requests +urllib3 diff --git a/llama_index/readers/web/knowledge_base/README.md b/llama_index/readers/web/knowledge_base/README.md new file mode 100644 index 0000000000000000000000000000000000000000..816e70f3e7a614573cf6a715a2148c42f614a727 --- /dev/null +++ b/llama_index/readers/web/knowledge_base/README.md @@ -0,0 +1,96 @@ +# Knowledge Base Website Loader + +This loader is a web crawler and scraper that fetches text content from websites hosting public knowledge bases. Examples are the [Intercom help center](https://www.intercom.com/help/en/) or the [Robinhood help center](https://robinhood.com/us/en/support/). Typically these sites have a directory structure with several sections and many articles in each section. This loader crawls and finds all links that match the article path provided, and scrapes the content of each article. This can be used to create bots that answer customer questions based on public documentation. + +It uses [Playwright](https://playwright.dev/python/) to drive a browser. This reduces the chance of getting blocked by Cloudflare or other CDNs, but makes it a bit more challenging to run on cloud services. + +## Usage + +First run + +``` +playwright install +``` + +This installs the browsers that Playwright requires. + +To use this loader, you need to pass in the root URL and the string to search for in the URL to tell if the crawler has reached an article. You also need to pass in several CSS selectors so the cralwer knows which links to follow and which elements to extract content from. use + +```python +from llama_index import download_loader + +KnowledgeBaseWebReader = download_loader("KnowledgeBaseWebReader") + +loader = KnowledgeBaseWebReader() +documents = loader.load_data( + root_url="https://www.intercom.com/help", + link_selectors=[".article-list a", ".article-list a"], + article_path="/articles", + body_selector=".article-body", + title_selector=".article-title", + subtitle_selector=".article-subtitle", +) +``` + +## Examples + +This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent. + +### LlamaIndex + +```python +from llama_index import VectorStoreIndex, download_loader + +KnowledgeBaseWebReader = download_loader("KnowledgeBaseWebReader") + +loader = KnowledgeBaseWebReader() +documents = loader.load_data( + root_url="https://support.intercom.com", + link_selectors=[".article-list a", ".article-list a"], + article_path="/articles", + body_selector=".article-body", + title_selector=".article-title", + subtitle_selector=".article-subtitle", +) +index = VectorStoreIndex.from_documents(documents) +index.query("What languages does Intercom support?") +``` + +### LangChain + +Note: Make sure you change the description of the `Tool` to match your use-case. + +```python +from llama_index import VectorStoreIndex, download_loader +from langchain.agents import initialize_agent, Tool +from langchain.llms import OpenAI +from langchain.chains.conversation.memory import ConversationBufferMemory + +KnowledgeBaseWebReader = download_loader("KnowledgeBaseWebReader") + +loader = KnowledgeBaseWebReader() +documents = loader.load_data( + root_url="https://support.intercom.com", + link_selectors=[".article-list a", ".article-list a"], + article_path="/articles", + body_selector=".article-body", + title_selector=".article-title", + subtitle_selector=".article-subtitle", +) +index = VectorStoreIndex.from_documents(documents) + +tools = [ + Tool( + name="Website Index", + func=lambda q: index.query(q), + description=f"Useful when you want answer questions about a product that has a public knowledge base.", + ), +] +llm = OpenAI(temperature=0) +memory = ConversationBufferMemory(memory_key="chat_history") +agent_chain = initialize_agent( + tools, llm, agent="zero-shot-react-description", memory=memory +) + +output = agent_chain.run(input="What languages does Intercom support?") +``` diff --git a/llama_index/readers/web/knowledge_base/__init__.py b/llama_index/readers/web/knowledge_base/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llama_index/readers/web/knowledge_base/base.py b/llama_index/readers/web/knowledge_base/base.py new file mode 100644 index 0000000000000000000000000000000000000000..c78c7397c6452ddd7ba1a6319ffe93b3b8d365b7 --- /dev/null +++ b/llama_index/readers/web/knowledge_base/base.py @@ -0,0 +1,165 @@ +from typing import Any, Dict, List, Optional + +from llama_index.core.readers.base import BaseReader +from llama_index.core.schema import Document + + +class KnowledgeBaseWebReader(BaseReader): + """Knowledge base reader. + + Crawls and reads articles from a knowledge base/help center with Playwright. + Tested on Zendesk and Intercom CMS, may work on others. + Can be run in headless mode but it may be blocked by Cloudflare. Run it headed to be safe. + Times out occasionally, just increase the default time out if it does. + Requires the `playwright` package. + + Args: + root_url (str): the base url of the knowledge base, with no trailing slash + e.g. 'https://support.intercom.com' + link_selectors (List[str]): list of css selectors to find links to articles while crawling + e.g. ['.article-list a', '.article-list a'] + article_path (str): the url path of articles on this domain so the crawler knows when to stop + e.g. '/articles' + title_selector (Optional[str]): css selector to find the title of the article + e.g. '.article-title' + subtitle_selector (Optional[str]): css selector to find the subtitle/description of the article + e.g. '.article-subtitle' + body_selector (Optional[str]): css selector to find the body of the article + e.g. '.article-body' + """ + + def __init__( + self, + root_url: str, + link_selectors: List[str], + article_path: str, + title_selector: Optional[str] = None, + subtitle_selector: Optional[str] = None, + body_selector: Optional[str] = None, + ) -> None: + """Initialize with parameters.""" + self.root_url = root_url + self.link_selectors = link_selectors + self.article_path = article_path + self.title_selector = title_selector + self.subtitle_selector = subtitle_selector + self.body_selector = body_selector + + def load_data(self) -> List[Document]: + """Load data from the knowledge base.""" + from playwright.sync_api import sync_playwright + + with sync_playwright() as p: + browser = p.chromium.launch(headless=False) + + # Crawl + article_urls = self.get_article_urls( + browser, + self.root_url, + self.root_url, + ) + + # Scrape + documents = [] + for url in article_urls: + article = self.scrape_article( + browser, + url, + ) + extra_info = { + "title": article["title"], + "subtitle": article["subtitle"], + "url": article["url"], + } + documents.append(Document(text=article["body"], extra_info=extra_info)) + + browser.close() + + return documents + + def scrape_article( + self, + browser: Any, + url: str, + ) -> Dict[str, str]: + """Scrape a single article url. + + Args: + browser (Any): a Playwright Chromium browser. + url (str): URL of the article to scrape. + + Returns: + Dict[str, str]: a mapping of article attributes to their values. + + """ + page = browser.new_page(ignore_https_errors=True) + page.set_default_timeout(60000) + page.goto(url, wait_until="domcontentloaded") + + title = ( + ( + page.query_selector(self.title_selector).evaluate( + "node => node.innerText" + ) + ) + if self.title_selector + else "" + ) + subtitle = ( + ( + page.query_selector(self.subtitle_selector).evaluate( + "node => node.innerText" + ) + ) + if self.subtitle_selector + else "" + ) + body = ( + (page.query_selector(self.body_selector).evaluate("node => node.innerText")) + if self.body_selector + else "" + ) + + page.close() + print("scraped:", url) + return {"title": title, "subtitle": subtitle, "body": body, "url": url} + + def get_article_urls( + self, browser: Any, root_url: str, current_url: str + ) -> List[str]: + """Recursively crawl through the knowledge base to find a list of articles. + + Args: + browser (Any): a Playwright Chromium browser. + root_url (str): root URL of the knowledge base. + current_url (str): current URL that is being crawled. + + Returns: + List[str]: a list of URLs of found articles. + + """ + page = browser.new_page(ignore_https_errors=True) + page.set_default_timeout(60000) + page.goto(current_url, wait_until="domcontentloaded") + + # If this is a leaf node aka article page, return itself + if self.article_path in current_url: + print("Found an article: ", current_url) + page.close() + return [current_url] + + # Otherwise crawl this page and find all the articles linked from it + article_urls = [] + links = [] + + for link_selector in self.link_selectors: + ahrefs = page.query_selector_all(link_selector) + links.extend(ahrefs) + + for link in links: + url = root_url + page.evaluate("(node) => node.getAttribute('href')", link) + article_urls.extend(self.get_article_urls(browser, root_url, url)) + + page.close() + + return article_urls diff --git a/llama_index/readers/web/knowledge_base/requirements.txt b/llama_index/readers/web/knowledge_base/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..da0b9b0327e37c2d602b8bdb0d9f636f905b079f --- /dev/null +++ b/llama_index/readers/web/knowledge_base/requirements.txt @@ -0,0 +1 @@ +playwright~=1.30 diff --git a/llama_index/readers/web/main_content_extractor/README.md b/llama_index/readers/web/main_content_extractor/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6fb33b7b5e7b547549e78a515a04b6afdd0fbcd3 --- /dev/null +++ b/llama_index/readers/web/main_content_extractor/README.md @@ -0,0 +1,67 @@ +# MainContentExtractor Website Loader + +This loader is a web scraper that fetches the text from static websites using the `MainContentExtractor` Python package. + +For information on how to extract main content, README in the following github repository + +[HawkClaws/main_content_extractor](https://github.com/HawkClaws/main_content_extractor) + +## Usage + +To use this loader, you need to pass in an array of URLs. + +```python +from llama_index import download_loader + +MainContentExtractorReader = download_loader("MainContentExtractorReader") + +loader = MainContentExtractorReader() +documents = loader.load_data(urls=["https://google.com"]) +``` + +## Examples + +### LlamaIndex + +```python +from llama_index import VectorStoreIndex, download_loader + +MainContentExtractorReader = download_loader("MainContentExtractorReader") + +loader = MainContentExtractorReader() +documents = loader.load_data(urls=["https://google.com"]) +index = VectorStoreIndex.from_documents(documents) +index.query("What language is on this website?") +``` + +### LangChain + +Note: Make sure you change the description of the `Tool` to match your use-case. + +```python +from llama_index import VectorStoreIndex, download_loader +from langchain.agents import initialize_agent, Tool +from langchain.llms import OpenAI +from langchain.chains.conversation.memory import ConversationBufferMemory + +MainContentExtractorReader = download_loader("MainContentExtractorReader") + +loader = MainContentExtractorReader() +documents = loader.load_data(urls=["https://google.com"]) +index = VectorStoreIndex.from_documents(documents) + +tools = [ + Tool( + name="Website Index", + func=lambda q: index.query(q), + description=f"Useful when you want answer questions about the text on websites.", + ), +] +llm = OpenAI(temperature=0) +memory = ConversationBufferMemory(memory_key="chat_history") +agent_chain = initialize_agent( + tools, llm, agent="zero-shot-react-description", memory=memory +) + +output = agent_chain.run(input="What language is on this website?") +``` diff --git a/llama_index/readers/web/main_content_extractor/__init__.py b/llama_index/readers/web/main_content_extractor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llama_index/readers/web/main_content_extractor/base.py b/llama_index/readers/web/main_content_extractor/base.py new file mode 100644 index 0000000000000000000000000000000000000000..7ae096e00c3773612008668d3e3e5aecdd9fac74 --- /dev/null +++ b/llama_index/readers/web/main_content_extractor/base.py @@ -0,0 +1,47 @@ +from typing import List + +import requests +from llama_index.core.readers.base import BaseReader +from llama_index.core.schema import Document + + +class MainContentExtractorReader(BaseReader): + """MainContentExtractor web page reader. + + Reads pages from the web. + + Args: + text_format (str, optional): The format of the text. Defaults to "markdown". + Requires `MainContentExtractor` package. + + """ + + def __init__(self, text_format: str = "markdown") -> None: + """Initialize with parameters.""" + self.text_format = text_format + + def load_data(self, urls: List[str]) -> List[Document]: + """Load data from the input directory. + + Args: + urls (List[str]): List of URLs to scrape. + + Returns: + List[Document]: List of documents. + + """ + if not isinstance(urls, list): + raise ValueError("urls must be a list of strings.") + + from main_content_extractor import MainContentExtractor + + documents = [] + for url in urls: + response = requests.get(url).text + response = MainContentExtractor.extract( + response, output_format=self.text_format, include_links=False + ) + + documents.append(Document(text=response)) + + return documents diff --git a/llama_index/readers/web/main_content_extractor/requirements.txt b/llama_index/readers/web/main_content_extractor/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..9523ff77b31bc4bf0b4bdb206cd637dc27ac6ff6 --- /dev/null +++ b/llama_index/readers/web/main_content_extractor/requirements.txt @@ -0,0 +1 @@ +MainContentExtractor diff --git a/llama_index/readers/web/news/README.md b/llama_index/readers/web/news/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a20e912379d8073b045b480e7860ebe6670f2576 --- /dev/null +++ b/llama_index/readers/web/news/README.md @@ -0,0 +1,24 @@ +# News Article Loader + +This loader makes use of the `newspaper3k` library to parse web page urls which have news +articles in them. + +## Usage + +``` +pip install newspaper3k +``` + +Pass in an array of individual page URLs: + +```python +from llama_index.readers.web.news import NewsArticleReader + +reader = NewsArticleReader(use_nlp=False) +documents = reader.load_data( + [ + "https://www.cnbc.com/2023/08/03/amazon-amzn-q2-earnings-report-2023.html", + "https://www.theverge.com/2023/8/3/23818388/brave-search-image-video-results-privacy-index", + ] +) +``` diff --git a/llama_index/readers/web/news/__init__.py b/llama_index/readers/web/news/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llama_index/readers/web/news/base.py b/llama_index/readers/web/news/base.py new file mode 100644 index 0000000000000000000000000000000000000000..5258519edb5e192cbde4a1c6841e939b793e95ce --- /dev/null +++ b/llama_index/readers/web/news/base.py @@ -0,0 +1,90 @@ +"""News article reader using Newspaper.""" +import logging +from importlib.util import find_spec +from typing import Any, Generator, List + +from llama_index.core.readers.base import BaseReader +from llama_index.core.schema import Document + +logger = logging.getLogger(__name__) + + +class NewsArticleReader(BaseReader): + """Simple news article reader. + + Reads news articles from the web and parses them using the `newspaper` library. + + Args: + text_mode (bool): Whether to load a text version or HTML version of the content (default=True). + use_nlp (bool): Whether to use NLP to extract additional summary and keywords (default=True). + newspaper_kwargs: Additional keyword arguments to pass to newspaper.Article. See + https://newspaper.readthedocs.io/en/latest/user_guide/quickstart.html#article + """ + + def __init__( + self, text_mode: bool = True, use_nlp: bool = True, **newspaper_kwargs: Any + ) -> None: + """Initialize with parameters.""" + if find_spec("newspaper") is None: + raise ImportError( + "`newspaper` package not found, please run `pip install newspaper3k`" + ) + self.load_text = text_mode + self.use_nlp = use_nlp + self.newspaper_kwargs = newspaper_kwargs + + def load_data(self, urls: List[str]) -> List[Document]: + """Load data from the list of news article urls. + + Args: + urls (List[str]): List of URLs to load news articles. + + Returns: + List[Document]: List of documents. + + """ + if not isinstance(urls, list) and not isinstance(urls, Generator): + raise ValueError("urls must be a list or generator.") + documents = [] + for url in urls: + from newspaper import Article + + try: + article = Article(url, **self.newspaper_kwargs) + article.download() + article.parse() + + if self.use_nlp: + article.nlp() + + except Exception as e: + logger.error(f"Error fetching or processing {url}, exception: {e}") + continue + + metadata = { + "title": getattr(article, "title", ""), + "link": getattr(article, "url", getattr(article, "canonical_link", "")), + "authors": getattr(article, "authors", []), + "language": getattr(article, "meta_lang", ""), + "description": getattr(article, "meta_description", ""), + "publish_date": getattr(article, "publish_date", ""), + } + + if self.load_text: + content = article.text + else: + content = article.html + + if self.use_nlp: + metadata["keywords"] = getattr(article, "keywords", []) + metadata["summary"] = getattr(article, "summary", "") + + documents.append(Document(text=content, metadata=metadata)) + + return documents + + +if __name__ == "__main__": + reader = NewsArticleReader() + article = reader.load_data(["https://www.bbc.com/news/world-us-canada-56797998"]) + print(article) diff --git a/llama_index/readers/web/news/requirements.txt b/llama_index/readers/web/news/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..666d1802dac38835cc423297c1a8c531f683a443 --- /dev/null +++ b/llama_index/readers/web/news/requirements.txt @@ -0,0 +1 @@ +newspaper3k diff --git a/llama_index/readers/web/readability_web/README.md b/llama_index/readers/web/readability_web/README.md new file mode 100644 index 0000000000000000000000000000000000000000..75da465912bc2736b2424989baa23dbcfaeb23d5 --- /dev/null +++ b/llama_index/readers/web/readability_web/README.md @@ -0,0 +1,82 @@ +# Readability Webpage Loader + +Extracting relevant information from a fully rendered web page. +During the processing, it is always assumed that web pages used as data sources contain textual content. + +It is particularly effective for websites that use client-side rendering. + +1. Load the page and wait for it rendered. (playwright) +2. Inject Readability.js to extract the main content. + +## Usage + +To use this loader, you need to pass in a single of URL. + +```python +from llama_index import download_loader + +ReadabilityWebPageReader = download_loader("ReadabilityWebPageReader") + +# or set proxy server for playwright: loader = ReadabilityWebPageReader(proxy="http://your-proxy-server:port") +# For some specific web pages, you may need to set "wait_until" to "networkidle". loader = ReadabilityWebPageReader(wait_until="networkidle") +loader = ReadabilityWebPageReader() + +documents = loader.load_data( + url="https://support.squarespace.com/hc/en-us/articles/206795137-Pages-and-content-basics" +) +``` + +## Examples + +This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent. + +### LlamaIndex + +```python +from llama_index import download_loader + +ReadabilityWebPageReader = download_loader("ReadabilityWebPageReader") + +loader = ReadabilityWebPageReader() +documents = loader.load_data( + url="https://support.squarespace.com/hc/en-us/articles/206795137-Pages-and-content-basics" +) + +index = VectorStoreIndex.from_documents(documents) +print(index.query("What is pages?")) +``` + +### LangChain + +Note: Make sure you change the description of the `Tool` to match your use-case. + +```python +from llama_index import VectorStoreIndex, download_loader +from langchain.agents import initialize_agent, Tool +from langchain.llms import OpenAI +from langchain.chains.conversation.memory import ConversationBufferMemory + +ReadabilityWebPageReader = download_loader("ReadabilityWebPageReader") + +loader = ReadabilityWebPageReader() +documents = loader.load_data( + url="https://support.squarespace.com/hc/en-us/articles/206795137-Pages-and-content-basics" +) + +index = VectorStoreIndex.from_documents(documents) + +tools = [ + Tool( + name="Website Index", + func=lambda q: index.query(q), + description=f"Useful when you want answer questions about the text on websites.", + ), +] +llm = OpenAI(temperature=0) +memory = ConversationBufferMemory(memory_key="chat_history") +agent_chain = initialize_agent( + tools, llm, agent="zero-shot-react-description", memory=memory +) + +output = agent_chain.run(input="What is pages?") +``` diff --git a/llama_index/readers/web/readability_web/Readability.js b/llama_index/readers/web/readability_web/Readability.js new file mode 100644 index 0000000000000000000000000000000000000000..28a5464edd95f6dec398d16f0510a683ae52e126 --- /dev/null +++ b/llama_index/readers/web/readability_web/Readability.js @@ -0,0 +1,2613 @@ +/* + * Copyright (c) 2010 Arc90 Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * This code is heavily based on Arc90's readability.js (1.7.1) script + * available at: http://code.google.com/p/arc90labs-readability + */ + +/** + * Public constructor. + * @param {HTMLDocument} doc The document to parse. + * @param {Object} options The options object. + */ +function Readability(doc, options) { + // In some older versions, people passed a URI as the first argument. Cope: + if (options && options.documentElement) { + doc = options; + options = arguments[2]; + } else if (!doc || !doc.documentElement) { + throw new Error( + "First argument to Readability constructor should be a document object.", + ); + } + options = options || {}; + + this._doc = doc; + this._docJSDOMParser = this._doc.firstChild.__JSDOMParser__; + this._articleTitle = null; + this._articleByline = null; + this._articleDir = null; + this._articleSiteName = null; + this._attempts = []; + + // Configurable options + this._debug = !!options.debug; + this._maxElemsToParse = + options.maxElemsToParse || this.DEFAULT_MAX_ELEMS_TO_PARSE; + this._nbTopCandidates = + options.nbTopCandidates || this.DEFAULT_N_TOP_CANDIDATES; + this._charThreshold = options.charThreshold || this.DEFAULT_CHAR_THRESHOLD; + this._classesToPreserve = this.CLASSES_TO_PRESERVE.concat( + options.classesToPreserve || [], + ); + this._keepClasses = !!options.keepClasses; + this._serializer = + options.serializer || + function (el) { + return el.innerHTML; + }; + this._disableJSONLD = !!options.disableJSONLD; + this._allowedVideoRegex = options.allowedVideoRegex || this.REGEXPS.videos; + + // Start with all flags set + this._flags = + this.FLAG_STRIP_UNLIKELYS | + this.FLAG_WEIGHT_CLASSES | + this.FLAG_CLEAN_CONDITIONALLY; + + // Control whether log messages are sent to the console + if (this._debug) { + let logNode = function (node) { + if (node.nodeType == node.TEXT_NODE) { + return `${node.nodeName} ("${node.textContent}")`; + } + let attrPairs = Array.from(node.attributes || [], function (attr) { + return `${attr.name}="${attr.value}"`; + }).join(" "); + return `<${node.localName} ${attrPairs}>`; + }; + this.log = function () { + if (typeof console !== "undefined") { + let args = Array.from(arguments, (arg) => { + if (arg && arg.nodeType == this.ELEMENT_NODE) { + return logNode(arg); + } + return arg; + }); + args.unshift("Reader: (Readability)"); + console.log.apply(console, args); + } else if (typeof dump !== "undefined") { + /* global dump */ + var msg = Array.prototype.map + .call(arguments, function (x) { + return x && x.nodeName ? logNode(x) : x; + }) + .join(" "); + dump("Reader: (Readability) " + msg + "\n"); + } + }; + } else { + this.log = function () {}; + } +} + +Readability.prototype = { + FLAG_STRIP_UNLIKELYS: 0x1, + FLAG_WEIGHT_CLASSES: 0x2, + FLAG_CLEAN_CONDITIONALLY: 0x4, + + // https://developer.mozilla.org/en-US/docs/Web/API/Node/nodeType + ELEMENT_NODE: 1, + TEXT_NODE: 3, + + // Max number of nodes supported by this parser. Default: 0 (no limit) + DEFAULT_MAX_ELEMS_TO_PARSE: 0, + + // The number of top candidates to consider when analysing how + // tight the competition is among candidates. + DEFAULT_N_TOP_CANDIDATES: 5, + + // Element tags to score by default. + DEFAULT_TAGS_TO_SCORE: "section,h2,h3,h4,h5,h6,p,td,pre" + .toUpperCase() + .split(","), + + // The default number of chars an article must have in order to return a result + DEFAULT_CHAR_THRESHOLD: 500, + + // All of the regular expressions in use within readability. + // Defined up here so we don't instantiate them repeatedly in loops. + REGEXPS: { + // NOTE: These two regular expressions are duplicated in + // Readability-readerable.js. Please keep both copies in sync. + unlikelyCandidates: + /-ad-|ai2html|banner|breadcrumbs|combx|comment|community|cover-wrap|disqus|extra|footer|gdpr|header|legends|menu|related|remark|replies|rss|shoutbox|sidebar|skyscraper|social|sponsor|supplemental|ad-break|agegate|pagination|pager|popup|yom-remote/i, + okMaybeItsACandidate: /and|article|body|column|content|main|shadow/i, + + positive: + /article|body|content|entry|hentry|h-entry|main|page|pagination|post|text|blog|story/i, + negative: + /-ad-|hidden|^hid$| hid$| hid |^hid |banner|combx|comment|com-|contact|foot|footer|footnote|gdpr|masthead|media|meta|outbrain|promo|related|scroll|share|shoutbox|sidebar|skyscraper|sponsor|shopping|tags|tool|widget/i, + extraneous: + /print|archive|comment|discuss|e[\-]?mail|share|reply|all|login|sign|single|utility/i, + byline: /byline|author|dateline|writtenby|p-author/i, + replaceFonts: /<(\/?)font[^>]*>/gi, + normalize: /\s{2,}/g, + videos: + /\/\/(www\.)?((dailymotion|youtube|youtube-nocookie|player\.vimeo|v\.qq)\.com|(archive|upload\.wikimedia)\.org|player\.twitch\.tv)/i, + shareElements: /(\b|_)(share|sharedaddy)(\b|_)/i, + nextLink: /(next|weiter|continue|>([^\|]|$)|»([^\|]|$))/i, + prevLink: /(prev|earl|old|new|<|«)/i, + tokenize: /\W+/g, + whitespace: /^\s*$/, + hasContent: /\S$/, + hashUrl: /^#.+/, + srcsetUrl: /(\S+)(\s+[\d.]+[xw])?(\s*(?:,|$))/g, + b64DataUrl: /^data:\s*([^\s;,]+)\s*;\s*base64\s*,/i, + // See: https://schema.org/Article + jsonLdArticleTypes: + /^Article|AdvertiserContentArticle|NewsArticle|AnalysisNewsArticle|AskPublicNewsArticle|BackgroundNewsArticle|OpinionNewsArticle|ReportageNewsArticle|ReviewNewsArticle|Report|SatiricalArticle|ScholarlyArticle|MedicalScholarlyArticle|SocialMediaPosting|BlogPosting|LiveBlogPosting|DiscussionForumPosting|TechArticle|APIReference$/, + }, + + UNLIKELY_ROLES: [ + "menu", + "menubar", + "complementary", + "navigation", + "alert", + "alertdialog", + "dialog", + ], + + DIV_TO_P_ELEMS: new Set([ + "BLOCKQUOTE", + "DL", + "DIV", + "IMG", + "OL", + "P", + "PRE", + "TABLE", + "UL", + ]), + + ALTER_TO_DIV_EXCEPTIONS: ["DIV", "ARTICLE", "SECTION", "P"], + + PRESENTATIONAL_ATTRIBUTES: [ + "align", + "background", + "bgcolor", + "border", + "cellpadding", + "cellspacing", + "frame", + "hspace", + "rules", + "style", + "valign", + "vspace", + ], + + DEPRECATED_SIZE_ATTRIBUTE_ELEMS: ["TABLE", "TH", "TD", "HR", "PRE"], + + // The commented out elements qualify as phrasing content but tend to be + // removed by readability when put into paragraphs, so we ignore them here. + PHRASING_ELEMS: [ + // "CANVAS", "IFRAME", "SVG", "VIDEO", + "ABBR", + "AUDIO", + "B", + "BDO", + "BR", + "BUTTON", + "CITE", + "CODE", + "DATA", + "DATALIST", + "DFN", + "EM", + "EMBED", + "I", + "IMG", + "INPUT", + "KBD", + "LABEL", + "MARK", + "MATH", + "METER", + "NOSCRIPT", + "OBJECT", + "OUTPUT", + "PROGRESS", + "Q", + "RUBY", + "SAMP", + "SCRIPT", + "SELECT", + "SMALL", + "SPAN", + "STRONG", + "SUB", + "SUP", + "TEXTAREA", + "TIME", + "VAR", + "WBR", + ], + + // These are the classes that readability sets itself. + CLASSES_TO_PRESERVE: ["page"], + + // These are the list of HTML entities that need to be escaped. + HTML_ESCAPE_MAP: { + lt: "<", + gt: ">", + amp: "&", + quot: '"', + apos: "'", + }, + + /** + * Run any post-process modifications to article content as necessary. + * + * @param Element + * @return void + **/ + _postProcessContent: function (articleContent) { + // Readability cannot open relative uris so we convert them to absolute uris. + this._fixRelativeUris(articleContent); + + this._simplifyNestedElements(articleContent); + + if (!this._keepClasses) { + // Remove classes. + this._cleanClasses(articleContent); + } + }, + + /** + * Iterates over a NodeList, calls `filterFn` for each node and removes node + * if function returned `true`. + * + * If function is not passed, removes all the nodes in node list. + * + * @param NodeList nodeList The nodes to operate on + * @param Function filterFn the function to use as a filter + * @return void + */ + _removeNodes: function (nodeList, filterFn) { + // Avoid ever operating on live node lists. + if (this._docJSDOMParser && nodeList._isLiveNodeList) { + throw new Error("Do not pass live node lists to _removeNodes"); + } + for (var i = nodeList.length - 1; i >= 0; i--) { + var node = nodeList[i]; + var parentNode = node.parentNode; + if (parentNode) { + if (!filterFn || filterFn.call(this, node, i, nodeList)) { + parentNode.removeChild(node); + } + } + } + }, + + /** + * Iterates over a NodeList, and calls _setNodeTag for each node. + * + * @param NodeList nodeList The nodes to operate on + * @param String newTagName the new tag name to use + * @return void + */ + _replaceNodeTags: function (nodeList, newTagName) { + // Avoid ever operating on live node lists. + if (this._docJSDOMParser && nodeList._isLiveNodeList) { + throw new Error("Do not pass live node lists to _replaceNodeTags"); + } + for (const node of nodeList) { + this._setNodeTag(node, newTagName); + } + }, + + /** + * Iterate over a NodeList, which doesn't natively fully implement the Array + * interface. + * + * For convenience, the current object context is applied to the provided + * iterate function. + * + * @param NodeList nodeList The NodeList. + * @param Function fn The iterate function. + * @return void + */ + _forEachNode: function (nodeList, fn) { + Array.prototype.forEach.call(nodeList, fn, this); + }, + + /** + * Iterate over a NodeList, and return the first node that passes + * the supplied test function + * + * For convenience, the current object context is applied to the provided + * test function. + * + * @param NodeList nodeList The NodeList. + * @param Function fn The test function. + * @return void + */ + _findNode: function (nodeList, fn) { + return Array.prototype.find.call(nodeList, fn, this); + }, + + /** + * Iterate over a NodeList, return true if any of the provided iterate + * function calls returns true, false otherwise. + * + * For convenience, the current object context is applied to the + * provided iterate function. + * + * @param NodeList nodeList The NodeList. + * @param Function fn The iterate function. + * @return Boolean + */ + _someNode: function (nodeList, fn) { + return Array.prototype.some.call(nodeList, fn, this); + }, + + /** + * Iterate over a NodeList, return true if all of the provided iterate + * function calls return true, false otherwise. + * + * For convenience, the current object context is applied to the + * provided iterate function. + * + * @param NodeList nodeList The NodeList. + * @param Function fn The iterate function. + * @return Boolean + */ + _everyNode: function (nodeList, fn) { + return Array.prototype.every.call(nodeList, fn, this); + }, + + /** + * Concat all nodelists passed as arguments. + * + * @return ...NodeList + * @return Array + */ + _concatNodeLists: function () { + var slice = Array.prototype.slice; + var args = slice.call(arguments); + var nodeLists = args.map(function (list) { + return slice.call(list); + }); + return Array.prototype.concat.apply([], nodeLists); + }, + + _getAllNodesWithTag: function (node, tagNames) { + if (node.querySelectorAll) { + return node.querySelectorAll(tagNames.join(",")); + } + return [].concat.apply( + [], + tagNames.map(function (tag) { + var collection = node.getElementsByTagName(tag); + return Array.isArray(collection) ? collection : Array.from(collection); + }), + ); + }, + + /** + * Removes the class="" attribute from every element in the given + * subtree, except those that match CLASSES_TO_PRESERVE and + * the classesToPreserve array from the options object. + * + * @param Element + * @return void + */ + _cleanClasses: function (node) { + var classesToPreserve = this._classesToPreserve; + var className = (node.getAttribute("class") || "") + .split(/\s+/) + .filter(function (cls) { + return classesToPreserve.indexOf(cls) != -1; + }) + .join(" "); + + if (className) { + node.setAttribute("class", className); + } else { + node.removeAttribute("class"); + } + + for (node = node.firstElementChild; node; node = node.nextElementSibling) { + this._cleanClasses(node); + } + }, + + /** + * Converts each and uri in the given element to an absolute URI, + * ignoring #ref URIs. + * + * @param Element + * @return void + */ + _fixRelativeUris: function (articleContent) { + var baseURI = this._doc.baseURI; + var documentURI = this._doc.documentURI; + function toAbsoluteURI(uri) { + // Leave hash links alone if the base URI matches the document URI: + if (baseURI == documentURI && uri.charAt(0) == "#") { + return uri; + } + + // Otherwise, resolve against base URI: + try { + return new URL(uri, baseURI).href; + } catch (ex) { + // Something went wrong, just return the original: + } + return uri; + } + + var links = this._getAllNodesWithTag(articleContent, ["a"]); + this._forEachNode(links, function (link) { + var href = link.getAttribute("href"); + if (href) { + // Remove links with javascript: URIs, since + // they won't work after scripts have been removed from the page. + if (href.indexOf("javascript:") === 0) { + // if the link only contains simple text content, it can be converted to a text node + if ( + link.childNodes.length === 1 && + link.childNodes[0].nodeType === this.TEXT_NODE + ) { + var text = this._doc.createTextNode(link.textContent); + link.parentNode.replaceChild(text, link); + } else { + // if the link has multiple children, they should all be preserved + var container = this._doc.createElement("span"); + while (link.firstChild) { + container.appendChild(link.firstChild); + } + link.parentNode.replaceChild(container, link); + } + } else { + link.setAttribute("href", toAbsoluteURI(href)); + } + } + }); + + var mediaTypes = this._getAllNodesWithTag(articleContent, [ + "img", + "picture", + "figure", + "video", + "audio", + "source", + ]); + + this._forEachNode(mediaTypes, function (media) { + var src = media.getAttribute("src"); + var poster = media.getAttribute("poster"); + var srcset = media.getAttribute("srcset"); + + if (src) { + media.setAttribute("src", toAbsoluteURI(src)); + } + + if (poster) { + media.setAttribute("poster", toAbsoluteURI(poster)); + } + + if (srcset) { + var newSrcset = srcset.replace( + this.REGEXPS.srcsetUrl, + function (_, p1, p2, p3) { + return toAbsoluteURI(p1) + (p2 || "") + p3; + }, + ); + + media.setAttribute("srcset", newSrcset); + } + }); + }, + + _simplifyNestedElements: function (articleContent) { + var node = articleContent; + + while (node) { + if ( + node.parentNode && + ["DIV", "SECTION"].includes(node.tagName) && + !(node.id && node.id.startsWith("readability")) + ) { + if (this._isElementWithoutContent(node)) { + node = this._removeAndGetNext(node); + continue; + } else if ( + this._hasSingleTagInsideElement(node, "DIV") || + this._hasSingleTagInsideElement(node, "SECTION") + ) { + var child = node.children[0]; + for (var i = 0; i < node.attributes.length; i++) { + child.setAttribute( + node.attributes[i].name, + node.attributes[i].value, + ); + } + node.parentNode.replaceChild(child, node); + node = child; + continue; + } + } + + node = this._getNextNode(node); + } + }, + + /** + * Get the article title as an H1. + * + * @return string + **/ + _getArticleTitle: function () { + var doc = this._doc; + var curTitle = ""; + var origTitle = ""; + + try { + curTitle = origTitle = doc.title.trim(); + + // If they had an element with id "title" in their HTML + if (typeof curTitle !== "string") + curTitle = origTitle = this._getInnerText( + doc.getElementsByTagName("title")[0], + ); + } catch (e) { + /* ignore exceptions setting the title. */ + } + + var titleHadHierarchicalSeparators = false; + function wordCount(str) { + return str.split(/\s+/).length; + } + + // If there's a separator in the title, first remove the final part + if (/ [\|\-\\\/>»] /.test(curTitle)) { + titleHadHierarchicalSeparators = / [\\\/>»] /.test(curTitle); + curTitle = origTitle.replace(/(.*)[\|\-\\\/>»] .*/gi, "$1"); + + // If the resulting title is too short (3 words or fewer), remove + // the first part instead: + if (wordCount(curTitle) < 3) + curTitle = origTitle.replace(/[^\|\-\\\/>»]*[\|\-\\\/>»](.*)/gi, "$1"); + } else if (curTitle.indexOf(": ") !== -1) { + // Check if we have an heading containing this exact string, so we + // could assume it's the full title. + var headings = this._concatNodeLists( + doc.getElementsByTagName("h1"), + doc.getElementsByTagName("h2"), + ); + var trimmedTitle = curTitle.trim(); + var match = this._someNode(headings, function (heading) { + return heading.textContent.trim() === trimmedTitle; + }); + + // If we don't, let's extract the title out of the original title string. + if (!match) { + curTitle = origTitle.substring(origTitle.lastIndexOf(":") + 1); + + // If the title is now too short, try the first colon instead: + if (wordCount(curTitle) < 3) { + curTitle = origTitle.substring(origTitle.indexOf(":") + 1); + // But if we have too many words before the colon there's something weird + // with the titles and the H tags so let's just use the original title instead + } else if (wordCount(origTitle.substr(0, origTitle.indexOf(":"))) > 5) { + curTitle = origTitle; + } + } + } else if (curTitle.length > 150 || curTitle.length < 15) { + var hOnes = doc.getElementsByTagName("h1"); + + if (hOnes.length === 1) curTitle = this._getInnerText(hOnes[0]); + } + + curTitle = curTitle.trim().replace(this.REGEXPS.normalize, " "); + // If we now have 4 words or fewer as our title, and either no + // 'hierarchical' separators (\, /, > or ») were found in the original + // title or we decreased the number of words by more than 1 word, use + // the original title. + var curTitleWordCount = wordCount(curTitle); + if ( + curTitleWordCount <= 4 && + (!titleHadHierarchicalSeparators || + curTitleWordCount != + wordCount(origTitle.replace(/[\|\-\\\/>»]+/g, "")) - 1) + ) { + curTitle = origTitle; + } + + return curTitle; + }, + + /** + * Prepare the HTML document for readability to scrape it. + * This includes things like stripping javascript, CSS, and handling terrible markup. + * + * @return void + **/ + _prepDocument: function () { + var doc = this._doc; + + // Remove all style tags in head + this._removeNodes(this._getAllNodesWithTag(doc, ["style"])); + + if (doc.body) { + this._replaceBrs(doc.body); + } + + this._replaceNodeTags(this._getAllNodesWithTag(doc, ["font"]), "SPAN"); + }, + + /** + * Finds the next node, starting from the given node, and ignoring + * whitespace in between. If the given node is an element, the same node is + * returned. + */ + _nextNode: function (node) { + var next = node; + while ( + next && + next.nodeType != this.ELEMENT_NODE && + this.REGEXPS.whitespace.test(next.textContent) + ) { + next = next.nextSibling; + } + return next; + }, + + /** + * Replaces 2 or more successive
elements with a single

. + * Whitespace between
elements are ignored. For example: + *

foo
bar


abc
+ * will become: + *
foo
bar

abc

+ */ + _replaceBrs: function (elem) { + this._forEachNode(this._getAllNodesWithTag(elem, ["br"]), function (br) { + var next = br.nextSibling; + + // Whether 2 or more
elements have been found and replaced with a + //

block. + var replaced = false; + + // If we find a
chain, remove the
s until we hit another node + // or non-whitespace. This leaves behind the first
in the chain + // (which will be replaced with a

later). + while ((next = this._nextNode(next)) && next.tagName == "BR") { + replaced = true; + var brSibling = next.nextSibling; + next.parentNode.removeChild(next); + next = brSibling; + } + + // If we removed a
chain, replace the remaining
with a

. Add + // all sibling nodes as children of the

until we hit another
+ // chain. + if (replaced) { + var p = this._doc.createElement("p"); + br.parentNode.replaceChild(p, br); + + next = p.nextSibling; + while (next) { + // If we've hit another

, we're done adding children to this

. + if (next.tagName == "BR") { + var nextElem = this._nextNode(next.nextSibling); + if (nextElem && nextElem.tagName == "BR") break; + } + + if (!this._isPhrasingContent(next)) break; + + // Otherwise, make this node a child of the new

. + var sibling = next.nextSibling; + p.appendChild(next); + next = sibling; + } + + while (p.lastChild && this._isWhitespace(p.lastChild)) { + p.removeChild(p.lastChild); + } + + if (p.parentNode.tagName === "P") this._setNodeTag(p.parentNode, "DIV"); + } + }); + }, + + _setNodeTag: function (node, tag) { + this.log("_setNodeTag", node, tag); + if (this._docJSDOMParser) { + node.localName = tag.toLowerCase(); + node.tagName = tag.toUpperCase(); + return node; + } + + var replacement = node.ownerDocument.createElement(tag); + while (node.firstChild) { + replacement.appendChild(node.firstChild); + } + node.parentNode.replaceChild(replacement, node); + if (node.readability) replacement.readability = node.readability; + + for (var i = 0; i < node.attributes.length; i++) { + try { + replacement.setAttribute( + node.attributes[i].name, + node.attributes[i].value, + ); + } catch (ex) { + /* it's possible for setAttribute() to throw if the attribute name + * isn't a valid XML Name. Such attributes can however be parsed from + * source in HTML docs, see https://github.com/whatwg/html/issues/4275, + * so we can hit them here and then throw. We don't care about such + * attributes so we ignore them. + */ + } + } + return replacement; + }, + + /** + * Prepare the article node for display. Clean out any inline styles, + * iframes, forms, strip extraneous

tags, etc. + * + * @param Element + * @return void + **/ + _prepArticle: function (articleContent) { + this._cleanStyles(articleContent); + + // Check for data tables before we continue, to avoid removing items in + // those tables, which will often be isolated even though they're + // visually linked to other content-ful elements (text, images, etc.). + this._markDataTables(articleContent); + + this._fixLazyImages(articleContent); + + // Clean out junk from the article content + this._cleanConditionally(articleContent, "form"); + this._cleanConditionally(articleContent, "fieldset"); + this._clean(articleContent, "object"); + this._clean(articleContent, "embed"); + this._clean(articleContent, "footer"); + this._clean(articleContent, "link"); + this._clean(articleContent, "aside"); + + // Clean out elements with little content that have "share" in their id/class combinations from final top candidates, + // which means we don't remove the top candidates even they have "share". + + var shareElementThreshold = this.DEFAULT_CHAR_THRESHOLD; + + this._forEachNode(articleContent.children, function (topCandidate) { + this._cleanMatchedNodes(topCandidate, function (node, matchString) { + return ( + this.REGEXPS.shareElements.test(matchString) && + node.textContent.length < shareElementThreshold + ); + }); + }); + + this._clean(articleContent, "iframe"); + this._clean(articleContent, "input"); + this._clean(articleContent, "textarea"); + this._clean(articleContent, "select"); + this._clean(articleContent, "button"); + this._cleanHeaders(articleContent); + + // Do these last as the previous stuff may have removed junk + // that will affect these + this._cleanConditionally(articleContent, "table"); + this._cleanConditionally(articleContent, "ul"); + this._cleanConditionally(articleContent, "div"); + + // replace H1 with H2 as H1 should be only title that is displayed separately + this._replaceNodeTags( + this._getAllNodesWithTag(articleContent, ["h1"]), + "h2", + ); + + // Remove extra paragraphs + this._removeNodes( + this._getAllNodesWithTag(articleContent, ["p"]), + function (paragraph) { + var imgCount = paragraph.getElementsByTagName("img").length; + var embedCount = paragraph.getElementsByTagName("embed").length; + var objectCount = paragraph.getElementsByTagName("object").length; + // At this point, nasty iframes have been removed, only remain embedded video ones. + var iframeCount = paragraph.getElementsByTagName("iframe").length; + var totalCount = imgCount + embedCount + objectCount + iframeCount; + + return totalCount === 0 && !this._getInnerText(paragraph, false); + }, + ); + + this._forEachNode( + this._getAllNodesWithTag(articleContent, ["br"]), + function (br) { + var next = this._nextNode(br.nextSibling); + if (next && next.tagName == "P") br.parentNode.removeChild(br); + }, + ); + + // Remove single-cell tables + this._forEachNode( + this._getAllNodesWithTag(articleContent, ["table"]), + function (table) { + var tbody = this._hasSingleTagInsideElement(table, "TBODY") + ? table.firstElementChild + : table; + if (this._hasSingleTagInsideElement(tbody, "TR")) { + var row = tbody.firstElementChild; + if (this._hasSingleTagInsideElement(row, "TD")) { + var cell = row.firstElementChild; + cell = this._setNodeTag( + cell, + this._everyNode(cell.childNodes, this._isPhrasingContent) + ? "P" + : "DIV", + ); + table.parentNode.replaceChild(cell, table); + } + } + }, + ); + }, + + /** + * Initialize a node with the readability object. Also checks the + * className/id for special names to add to its score. + * + * @param Element + * @return void + **/ + _initializeNode: function (node) { + node.readability = { contentScore: 0 }; + + switch (node.tagName) { + case "DIV": + node.readability.contentScore += 5; + break; + + case "PRE": + case "TD": + case "BLOCKQUOTE": + node.readability.contentScore += 3; + break; + + case "ADDRESS": + case "OL": + case "UL": + case "DL": + case "DD": + case "DT": + case "LI": + case "FORM": + node.readability.contentScore -= 3; + break; + + case "H1": + case "H2": + case "H3": + case "H4": + case "H5": + case "H6": + case "TH": + node.readability.contentScore -= 5; + break; + } + + node.readability.contentScore += this._getClassWeight(node); + }, + + _removeAndGetNext: function (node) { + var nextNode = this._getNextNode(node, true); + node.parentNode.removeChild(node); + return nextNode; + }, + + /** + * Traverse the DOM from node to node, starting at the node passed in. + * Pass true for the second parameter to indicate this node itself + * (and its kids) are going away, and we want the next node over. + * + * Calling this in a loop will traverse the DOM depth-first. + */ + _getNextNode: function (node, ignoreSelfAndKids) { + // First check for kids if those aren't being ignored + if (!ignoreSelfAndKids && node.firstElementChild) { + return node.firstElementChild; + } + // Then for siblings... + if (node.nextElementSibling) { + return node.nextElementSibling; + } + // And finally, move up the parent chain *and* find a sibling + // (because this is depth-first traversal, we will have already + // seen the parent nodes themselves). + do { + node = node.parentNode; + } while (node && !node.nextElementSibling); + return node && node.nextElementSibling; + }, + + // compares second text to first one + // 1 = same text, 0 = completely different text + // works the way that it splits both texts into words and then finds words that are unique in second text + // the result is given by the lower length of unique parts + _textSimilarity: function (textA, textB) { + var tokensA = textA + .toLowerCase() + .split(this.REGEXPS.tokenize) + .filter(Boolean); + var tokensB = textB + .toLowerCase() + .split(this.REGEXPS.tokenize) + .filter(Boolean); + if (!tokensA.length || !tokensB.length) { + return 0; + } + var uniqTokensB = tokensB.filter((token) => !tokensA.includes(token)); + var distanceB = uniqTokensB.join(" ").length / tokensB.join(" ").length; + return 1 - distanceB; + }, + + _checkByline: function (node, matchString) { + if (this._articleByline) { + return false; + } + + if (node.getAttribute !== undefined) { + var rel = node.getAttribute("rel"); + var itemprop = node.getAttribute("itemprop"); + } + + if ( + (rel === "author" || + (itemprop && itemprop.indexOf("author") !== -1) || + this.REGEXPS.byline.test(matchString)) && + this._isValidByline(node.textContent) + ) { + this._articleByline = node.textContent.trim(); + return true; + } + + return false; + }, + + _getNodeAncestors: function (node, maxDepth) { + maxDepth = maxDepth || 0; + var i = 0, + ancestors = []; + while (node.parentNode) { + ancestors.push(node.parentNode); + if (maxDepth && ++i === maxDepth) break; + node = node.parentNode; + } + return ancestors; + }, + + /*** + * grabArticle - Using a variety of metrics (content score, classname, element types), find the content that is + * most likely to be the stuff a user wants to read. Then return it wrapped up in a div. + * + * @param page a document to run upon. Needs to be a full document, complete with body. + * @return Element + **/ + _grabArticle: function (page) { + this.log("**** grabArticle ****"); + var doc = this._doc; + var isPaging = page !== null; + page = page ? page : this._doc.body; + + // We can't grab an article if we don't have a page! + if (!page) { + this.log("No body found in document. Abort."); + return null; + } + + var pageCacheHtml = page.innerHTML; + + while (true) { + this.log("Starting grabArticle loop"); + var stripUnlikelyCandidates = this._flagIsActive( + this.FLAG_STRIP_UNLIKELYS, + ); + + // First, node prepping. Trash nodes that look cruddy (like ones with the + // class name "comment", etc), and turn divs into P tags where they have been + // used inappropriately (as in, where they contain no other block level elements.) + var elementsToScore = []; + var node = this._doc.documentElement; + + let shouldRemoveTitleHeader = true; + + while (node) { + if (node.tagName === "HTML") { + this._articleLang = node.getAttribute("lang"); + } + + var matchString = node.className + " " + node.id; + + if (!this._isProbablyVisible(node)) { + this.log("Removing hidden node - " + matchString); + node = this._removeAndGetNext(node); + continue; + } + + // User is not able to see elements applied with both "aria-modal = true" and "role = dialog" + if ( + node.getAttribute("aria-modal") == "true" && + node.getAttribute("role") == "dialog" + ) { + node = this._removeAndGetNext(node); + continue; + } + + // Check to see if this node is a byline, and remove it if it is. + if (this._checkByline(node, matchString)) { + node = this._removeAndGetNext(node); + continue; + } + + if (shouldRemoveTitleHeader && this._headerDuplicatesTitle(node)) { + this.log( + "Removing header: ", + node.textContent.trim(), + this._articleTitle.trim(), + ); + shouldRemoveTitleHeader = false; + node = this._removeAndGetNext(node); + continue; + } + + // Remove unlikely candidates + if (stripUnlikelyCandidates) { + if ( + this.REGEXPS.unlikelyCandidates.test(matchString) && + !this.REGEXPS.okMaybeItsACandidate.test(matchString) && + !this._hasAncestorTag(node, "table") && + !this._hasAncestorTag(node, "code") && + node.tagName !== "BODY" && + node.tagName !== "A" + ) { + this.log("Removing unlikely candidate - " + matchString); + node = this._removeAndGetNext(node); + continue; + } + + if (this.UNLIKELY_ROLES.includes(node.getAttribute("role"))) { + this.log( + "Removing content with role " + + node.getAttribute("role") + + " - " + + matchString, + ); + node = this._removeAndGetNext(node); + continue; + } + } + + // Remove DIV, SECTION, and HEADER nodes without any content(e.g. text, image, video, or iframe). + if ( + (node.tagName === "DIV" || + node.tagName === "SECTION" || + node.tagName === "HEADER" || + node.tagName === "H1" || + node.tagName === "H2" || + node.tagName === "H3" || + node.tagName === "H4" || + node.tagName === "H5" || + node.tagName === "H6") && + this._isElementWithoutContent(node) + ) { + node = this._removeAndGetNext(node); + continue; + } + + if (this.DEFAULT_TAGS_TO_SCORE.indexOf(node.tagName) !== -1) { + elementsToScore.push(node); + } + + // Turn all divs that don't have children block level elements into p's + if (node.tagName === "DIV") { + // Put phrasing content into paragraphs. + var p = null; + var childNode = node.firstChild; + while (childNode) { + var nextSibling = childNode.nextSibling; + if (this._isPhrasingContent(childNode)) { + if (p !== null) { + p.appendChild(childNode); + } else if (!this._isWhitespace(childNode)) { + p = doc.createElement("p"); + node.replaceChild(p, childNode); + p.appendChild(childNode); + } + } else if (p !== null) { + while (p.lastChild && this._isWhitespace(p.lastChild)) { + p.removeChild(p.lastChild); + } + p = null; + } + childNode = nextSibling; + } + + // Sites like http://mobile.slate.com encloses each paragraph with a DIV + // element. DIVs with only a P element inside and no text content can be + // safely converted into plain P elements to avoid confusing the scoring + // algorithm with DIVs with are, in practice, paragraphs. + if ( + this._hasSingleTagInsideElement(node, "P") && + this._getLinkDensity(node) < 0.25 + ) { + var newNode = node.children[0]; + node.parentNode.replaceChild(newNode, node); + node = newNode; + elementsToScore.push(node); + } else if (!this._hasChildBlockElement(node)) { + node = this._setNodeTag(node, "P"); + elementsToScore.push(node); + } + } + node = this._getNextNode(node); + } + + /** + * Loop through all paragraphs, and assign a score to them based on how content-y they look. + * Then add their score to their parent node. + * + * A score is determined by things like number of commas, class names, etc. Maybe eventually link density. + **/ + var candidates = []; + this._forEachNode(elementsToScore, function (elementToScore) { + if ( + !elementToScore.parentNode || + typeof elementToScore.parentNode.tagName === "undefined" + ) + return; + + // If this paragraph is less than 25 characters, don't even count it. + var innerText = this._getInnerText(elementToScore); + if (innerText.length < 25) return; + + // Exclude nodes with no ancestor. + var ancestors = this._getNodeAncestors(elementToScore, 5); + if (ancestors.length === 0) return; + + var contentScore = 0; + + // Add a point for the paragraph itself as a base. + contentScore += 1; + + // Add points for any commas within this paragraph. + contentScore += innerText.split(",").length; + + // For every 100 characters in this paragraph, add another point. Up to 3 points. + contentScore += Math.min(Math.floor(innerText.length / 100), 3); + + // Initialize and score ancestors. + this._forEachNode(ancestors, function (ancestor, level) { + if ( + !ancestor.tagName || + !ancestor.parentNode || + typeof ancestor.parentNode.tagName === "undefined" + ) + return; + + if (typeof ancestor.readability === "undefined") { + this._initializeNode(ancestor); + candidates.push(ancestor); + } + + // Node score divider: + // - parent: 1 (no division) + // - grandparent: 2 + // - great grandparent+: ancestor level * 3 + if (level === 0) var scoreDivider = 1; + else if (level === 1) scoreDivider = 2; + else scoreDivider = level * 3; + ancestor.readability.contentScore += contentScore / scoreDivider; + }); + }); + + // After we've calculated scores, loop through all of the possible + // candidate nodes we found and find the one with the highest score. + var topCandidates = []; + for (var c = 0, cl = candidates.length; c < cl; c += 1) { + var candidate = candidates[c]; + + // Scale the final candidates score based on link density. Good content + // should have a relatively small link density (5% or less) and be mostly + // unaffected by this operation. + var candidateScore = + candidate.readability.contentScore * + (1 - this._getLinkDensity(candidate)); + candidate.readability.contentScore = candidateScore; + + this.log("Candidate:", candidate, "with score " + candidateScore); + + for (var t = 0; t < this._nbTopCandidates; t++) { + var aTopCandidate = topCandidates[t]; + + if ( + !aTopCandidate || + candidateScore > aTopCandidate.readability.contentScore + ) { + topCandidates.splice(t, 0, candidate); + if (topCandidates.length > this._nbTopCandidates) + topCandidates.pop(); + break; + } + } + } + + var topCandidate = topCandidates[0] || null; + var neededToCreateTopCandidate = false; + var parentOfTopCandidate; + + // If we still have no top candidate, just use the body as a last resort. + // We also have to copy the body node so it is something we can modify. + if (topCandidate === null || topCandidate.tagName === "BODY") { + // Move all of the page's children into topCandidate + topCandidate = doc.createElement("DIV"); + neededToCreateTopCandidate = true; + // Move everything (not just elements, also text nodes etc.) into the container + // so we even include text directly in the body: + while (page.firstChild) { + this.log("Moving child out:", page.firstChild); + topCandidate.appendChild(page.firstChild); + } + + page.appendChild(topCandidate); + + this._initializeNode(topCandidate); + } else if (topCandidate) { + // Find a better top candidate node if it contains (at least three) nodes which belong to `topCandidates` array + // and whose scores are quite closed with current `topCandidate` node. + var alternativeCandidateAncestors = []; + for (var i = 1; i < topCandidates.length; i++) { + if ( + topCandidates[i].readability.contentScore / + topCandidate.readability.contentScore >= + 0.75 + ) { + alternativeCandidateAncestors.push( + this._getNodeAncestors(topCandidates[i]), + ); + } + } + var MINIMUM_TOPCANDIDATES = 3; + if (alternativeCandidateAncestors.length >= MINIMUM_TOPCANDIDATES) { + parentOfTopCandidate = topCandidate.parentNode; + while (parentOfTopCandidate.tagName !== "BODY") { + var listsContainingThisAncestor = 0; + for ( + var ancestorIndex = 0; + ancestorIndex < alternativeCandidateAncestors.length && + listsContainingThisAncestor < MINIMUM_TOPCANDIDATES; + ancestorIndex++ + ) { + listsContainingThisAncestor += Number( + alternativeCandidateAncestors[ancestorIndex].includes( + parentOfTopCandidate, + ), + ); + } + if (listsContainingThisAncestor >= MINIMUM_TOPCANDIDATES) { + topCandidate = parentOfTopCandidate; + break; + } + parentOfTopCandidate = parentOfTopCandidate.parentNode; + } + } + if (!topCandidate.readability) { + this._initializeNode(topCandidate); + } + + // Because of our bonus system, parents of candidates might have scores + // themselves. They get half of the node. There won't be nodes with higher + // scores than our topCandidate, but if we see the score going *up* in the first + // few steps up the tree, that's a decent sign that there might be more content + // lurking in other places that we want to unify in. The sibling stuff + // below does some of that - but only if we've looked high enough up the DOM + // tree. + parentOfTopCandidate = topCandidate.parentNode; + var lastScore = topCandidate.readability.contentScore; + // The scores shouldn't get too low. + var scoreThreshold = lastScore / 3; + while (parentOfTopCandidate.tagName !== "BODY") { + if (!parentOfTopCandidate.readability) { + parentOfTopCandidate = parentOfTopCandidate.parentNode; + continue; + } + var parentScore = parentOfTopCandidate.readability.contentScore; + if (parentScore < scoreThreshold) break; + if (parentScore > lastScore) { + // Alright! We found a better parent to use. + topCandidate = parentOfTopCandidate; + break; + } + lastScore = parentOfTopCandidate.readability.contentScore; + parentOfTopCandidate = parentOfTopCandidate.parentNode; + } + + // If the top candidate is the only child, use parent instead. This will help sibling + // joining logic when adjacent content is actually located in parent's sibling node. + parentOfTopCandidate = topCandidate.parentNode; + while ( + parentOfTopCandidate.tagName != "BODY" && + parentOfTopCandidate.children.length == 1 + ) { + topCandidate = parentOfTopCandidate; + parentOfTopCandidate = topCandidate.parentNode; + } + if (!topCandidate.readability) { + this._initializeNode(topCandidate); + } + } + + // Now that we have the top candidate, look through its siblings for content + // that might also be related. Things like preambles, content split by ads + // that we removed, etc. + var articleContent = doc.createElement("DIV"); + if (isPaging) articleContent.id = "readability-content"; + + var siblingScoreThreshold = Math.max( + 10, + topCandidate.readability.contentScore * 0.2, + ); + // Keep potential top candidate's parent node to try to get text direction of it later. + parentOfTopCandidate = topCandidate.parentNode; + var siblings = parentOfTopCandidate.children; + + for (var s = 0, sl = siblings.length; s < sl; s++) { + var sibling = siblings[s]; + var append = false; + + this.log( + "Looking at sibling node:", + sibling, + sibling.readability + ? "with score " + sibling.readability.contentScore + : "", + ); + this.log( + "Sibling has score", + sibling.readability ? sibling.readability.contentScore : "Unknown", + ); + + if (sibling === topCandidate) { + append = true; + } else { + var contentBonus = 0; + + // Give a bonus if sibling nodes and top candidates have the example same classname + if ( + sibling.className === topCandidate.className && + topCandidate.className !== "" + ) + contentBonus += topCandidate.readability.contentScore * 0.2; + + if ( + sibling.readability && + sibling.readability.contentScore + contentBonus >= + siblingScoreThreshold + ) { + append = true; + } else if (sibling.nodeName === "P") { + var linkDensity = this._getLinkDensity(sibling); + var nodeContent = this._getInnerText(sibling); + var nodeLength = nodeContent.length; + + if (nodeLength > 80 && linkDensity < 0.25) { + append = true; + } else if ( + nodeLength < 80 && + nodeLength > 0 && + linkDensity === 0 && + nodeContent.search(/\.( |$)/) !== -1 + ) { + append = true; + } + } + } + + if (append) { + this.log("Appending node:", sibling); + + if (this.ALTER_TO_DIV_EXCEPTIONS.indexOf(sibling.nodeName) === -1) { + // We have a node that isn't a common block level element, like a form or td tag. + // Turn it into a div so it doesn't get filtered out later by accident. + this.log("Altering sibling:", sibling, "to div."); + + sibling = this._setNodeTag(sibling, "DIV"); + } + + articleContent.appendChild(sibling); + // Fetch children again to make it compatible + // with DOM parsers without live collection support. + siblings = parentOfTopCandidate.children; + // siblings is a reference to the children array, and + // sibling is removed from the array when we call appendChild(). + // As a result, we must revisit this index since the nodes + // have been shifted. + s -= 1; + sl -= 1; + } + } + + if (this._debug) + this.log("Article content pre-prep: " + articleContent.innerHTML); + // So we have all of the content that we need. Now we clean it up for presentation. + this._prepArticle(articleContent); + if (this._debug) + this.log("Article content post-prep: " + articleContent.innerHTML); + + if (neededToCreateTopCandidate) { + // We already created a fake div thing, and there wouldn't have been any siblings left + // for the previous loop, so there's no point trying to create a new div, and then + // move all the children over. Just assign IDs and class names here. No need to append + // because that already happened anyway. + topCandidate.id = "readability-page-1"; + topCandidate.className = "page"; + } else { + var div = doc.createElement("DIV"); + div.id = "readability-page-1"; + div.className = "page"; + while (articleContent.firstChild) { + div.appendChild(articleContent.firstChild); + } + articleContent.appendChild(div); + } + + if (this._debug) + this.log("Article content after paging: " + articleContent.innerHTML); + + var parseSuccessful = true; + + // Now that we've gone through the full algorithm, check to see if + // we got any meaningful content. If we didn't, we may need to re-run + // grabArticle with different flags set. This gives us a higher likelihood of + // finding the content, and the sieve approach gives us a higher likelihood of + // finding the -right- content. + var textLength = this._getInnerText(articleContent, true).length; + if (textLength < this._charThreshold) { + parseSuccessful = false; + page.innerHTML = pageCacheHtml; + + if (this._flagIsActive(this.FLAG_STRIP_UNLIKELYS)) { + this._removeFlag(this.FLAG_STRIP_UNLIKELYS); + this._attempts.push({ + articleContent: articleContent, + textLength: textLength, + }); + } else if (this._flagIsActive(this.FLAG_WEIGHT_CLASSES)) { + this._removeFlag(this.FLAG_WEIGHT_CLASSES); + this._attempts.push({ + articleContent: articleContent, + textLength: textLength, + }); + } else if (this._flagIsActive(this.FLAG_CLEAN_CONDITIONALLY)) { + this._removeFlag(this.FLAG_CLEAN_CONDITIONALLY); + this._attempts.push({ + articleContent: articleContent, + textLength: textLength, + }); + } else { + this._attempts.push({ + articleContent: articleContent, + textLength: textLength, + }); + // No luck after removing flags, just return the longest text we found during the different loops + this._attempts.sort(function (a, b) { + return b.textLength - a.textLength; + }); + + // But first check if we actually have something + if (!this._attempts[0].textLength) { + return null; + } + + articleContent = this._attempts[0].articleContent; + parseSuccessful = true; + } + } + + if (parseSuccessful) { + // Find out text direction from ancestors of final top candidate. + var ancestors = [parentOfTopCandidate, topCandidate].concat( + this._getNodeAncestors(parentOfTopCandidate), + ); + this._someNode(ancestors, function (ancestor) { + if (!ancestor.tagName) return false; + var articleDir = ancestor.getAttribute("dir"); + if (articleDir) { + this._articleDir = articleDir; + return true; + } + return false; + }); + return articleContent; + } + } + }, + + /** + * Check whether the input string could be a byline. + * This verifies that the input is a string, and that the length + * is less than 100 chars. + * + * @param possibleByline {string} - a string to check whether its a byline. + * @return Boolean - whether the input string is a byline. + */ + _isValidByline: function (byline) { + if (typeof byline == "string" || byline instanceof String) { + byline = byline.trim(); + return byline.length > 0 && byline.length < 100; + } + return false; + }, + + /** + * Converts some of the common HTML entities in string to their corresponding characters. + * + * @param str {string} - a string to unescape. + * @return string without HTML entity. + */ + _unescapeHtmlEntities: function (str) { + if (!str) { + return str; + } + + var htmlEscapeMap = this.HTML_ESCAPE_MAP; + return str + .replace(/&(quot|amp|apos|lt|gt);/g, function (_, tag) { + return htmlEscapeMap[tag]; + }) + .replace( + /&#(?:x([0-9a-z]{1,4})|([0-9]{1,4}));/gi, + function (_, hex, numStr) { + var num = parseInt(hex || numStr, hex ? 16 : 10); + return String.fromCharCode(num); + }, + ); + }, + + /** + * Try to extract metadata from JSON-LD object. + * For now, only Schema.org objects of type Article or its subtypes are supported. + * @return Object with any metadata that could be extracted (possibly none) + */ + _getJSONLD: function (doc) { + var scripts = this._getAllNodesWithTag(doc, ["script"]); + + var metadata; + + this._forEachNode(scripts, function (jsonLdElement) { + if ( + !metadata && + jsonLdElement.getAttribute("type") === "application/ld+json" + ) { + try { + // Strip CDATA markers if present + var content = jsonLdElement.textContent.replace( + /^\s*\s*$/g, + "", + ); + var parsed = JSON.parse(content); + if ( + !parsed["@context"] || + !parsed["@context"].match(/^https?\:\/\/schema\.org$/) + ) { + return; + } + + if (!parsed["@type"] && Array.isArray(parsed["@graph"])) { + parsed = parsed["@graph"].find(function (it) { + return (it["@type"] || "").match(this.REGEXPS.jsonLdArticleTypes); + }); + } + + if ( + !parsed || + !parsed["@type"] || + !parsed["@type"].match(this.REGEXPS.jsonLdArticleTypes) + ) { + return; + } + + metadata = {}; + + if ( + typeof parsed.name === "string" && + typeof parsed.headline === "string" && + parsed.name !== parsed.headline + ) { + // we have both name and headline element in the JSON-LD. They should both be the same but some websites like aktualne.cz + // put their own name into "name" and the article title to "headline" which confuses Readability. So we try to check if either + // "name" or "headline" closely matches the html title, and if so, use that one. If not, then we use "name" by default. + + var title = this._getArticleTitle(); + var nameMatches = this._textSimilarity(parsed.name, title) > 0.75; + var headlineMatches = + this._textSimilarity(parsed.headline, title) > 0.75; + + if (headlineMatches && !nameMatches) { + metadata.title = parsed.headline; + } else { + metadata.title = parsed.name; + } + } else if (typeof parsed.name === "string") { + metadata.title = parsed.name.trim(); + } else if (typeof parsed.headline === "string") { + metadata.title = parsed.headline.trim(); + } + if (parsed.author) { + if (typeof parsed.author.name === "string") { + metadata.byline = parsed.author.name.trim(); + } else if ( + Array.isArray(parsed.author) && + parsed.author[0] && + typeof parsed.author[0].name === "string" + ) { + metadata.byline = parsed.author + .filter(function (author) { + return author && typeof author.name === "string"; + }) + .map(function (author) { + return author.name.trim(); + }) + .join(", "); + } + } + if (typeof parsed.description === "string") { + metadata.excerpt = parsed.description.trim(); + } + if (parsed.publisher && typeof parsed.publisher.name === "string") { + metadata.siteName = parsed.publisher.name.trim(); + } + return; + } catch (err) { + this.log(err.message); + } + } + }); + return metadata ? metadata : {}; + }, + + /** + * Attempts to get excerpt and byline metadata for the article. + * + * @param {Object} jsonld — object containing any metadata that + * could be extracted from JSON-LD object. + * + * @return Object with optional "excerpt" and "byline" properties + */ + _getArticleMetadata: function (jsonld) { + var metadata = {}; + var values = {}; + var metaElements = this._doc.getElementsByTagName("meta"); + + // property is a space-separated list of values + var propertyPattern = + /\s*(dc|dcterm|og|twitter)\s*:\s*(author|creator|description|title|site_name)\s*/gi; + + // name is a single value + var namePattern = + /^\s*(?:(dc|dcterm|og|twitter|weibo:(article|webpage))\s*[\.:]\s*)?(author|creator|description|title|site_name)\s*$/i; + + // Find description tags. + this._forEachNode(metaElements, function (element) { + var elementName = element.getAttribute("name"); + var elementProperty = element.getAttribute("property"); + var content = element.getAttribute("content"); + if (!content) { + return; + } + var matches = null; + var name = null; + + if (elementProperty) { + matches = elementProperty.match(propertyPattern); + if (matches) { + // Convert to lowercase, and remove any whitespace + // so we can match below. + name = matches[0].toLowerCase().replace(/\s/g, ""); + // multiple authors + values[name] = content.trim(); + } + } + if (!matches && elementName && namePattern.test(elementName)) { + name = elementName; + if (content) { + // Convert to lowercase, remove any whitespace, and convert dots + // to colons so we can match below. + name = name.toLowerCase().replace(/\s/g, "").replace(/\./g, ":"); + values[name] = content.trim(); + } + } + }); + + // get title + metadata.title = + jsonld.title || + values["dc:title"] || + values["dcterm:title"] || + values["og:title"] || + values["weibo:article:title"] || + values["weibo:webpage:title"] || + values["title"] || + values["twitter:title"]; + + if (!metadata.title) { + metadata.title = this._getArticleTitle(); + } + + // get author + metadata.byline = + jsonld.byline || + values["dc:creator"] || + values["dcterm:creator"] || + values["author"]; + + // get description + metadata.excerpt = + jsonld.excerpt || + values["dc:description"] || + values["dcterm:description"] || + values["og:description"] || + values["weibo:article:description"] || + values["weibo:webpage:description"] || + values["description"] || + values["twitter:description"]; + + // get site name + metadata.siteName = jsonld.siteName || values["og:site_name"]; + + // in many sites the meta value is escaped with HTML entities, + // so here we need to unescape it + metadata.title = this._unescapeHtmlEntities(metadata.title); + metadata.byline = this._unescapeHtmlEntities(metadata.byline); + metadata.excerpt = this._unescapeHtmlEntities(metadata.excerpt); + metadata.siteName = this._unescapeHtmlEntities(metadata.siteName); + + return metadata; + }, + + /** + * Check if node is image, or if node contains exactly only one image + * whether as a direct child or as its descendants. + * + * @param Element + **/ + _isSingleImage: function (node) { + if (node.tagName === "IMG") { + return true; + } + + if (node.children.length !== 1 || node.textContent.trim() !== "") { + return false; + } + + return this._isSingleImage(node.children[0]); + }, + + /** + * Find all