Skip to content

Commit

Permalink
Merge pull request #88 from SylphAI-Inc/li
Browse files Browse the repository at this point in the history
[Improve doc deployment]
  • Loading branch information
liyin2015 authored Jul 6, 2024
2 parents caed402 + d264c54 commit 86c8bf1
Show file tree
Hide file tree
Showing 15 changed files with 1,763 additions and 2,743 deletions.
18 changes: 10 additions & 8 deletions .github/workflows/documentation.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ name: Documentation
on:
push:
branches:
- release # Trigger the workflow when changes are pushed to the release branch
- li # Trigger the workflow when changes are pushed to the release branch

permissions:
contents: write
Expand All @@ -29,22 +29,23 @@ jobs:
curl -sSL https://install.python-poetry.org | python3 -
echo "$HOME/.local/bin" >> $GITHUB_PATH # Ensure Poetry's bin directory is in PATH
- name: Install dependencies using Poetry
- name: Install documentation dependencies using Poetry
run: |
cd docs
poetry config virtualenvs.create false # Avoid creating a virtual environment
poetry install # Install dependencies as specified in pyproject.toml
poetry install # Install only the doc dependencies as specified in pyproject.toml
- name: Build documentation using Makefile
run: |
echo "Building documentation from: $(pwd)"
ls -l # Debug: List current directory contents
poetry run make -C docs html # Run Makefile in docs directory to build HTML docs
working-directory: ${{ github.workspace }}
poetry run make html # Run Makefile in docs directory to build HTML docs
working-directory: ${{ github.workspace }}/docs

- name: List built documentation
run: |
find ./build/ -type f # List all files in the build directory
working-directory: ${{ github.workspace }}/docs
find ./docs/build/ -type f # List all files in the build directory
working-directory: ${{ github.workspace }}

- name: Create .nojekyll file
run: |
Expand All @@ -53,7 +54,8 @@ jobs:

- name: Copy CNAME file
run: |
cp ${{ github.workspace }}/CNAME ${{ github.workspace }}/docs/build/CNAME
cp ${{ github.workspace }}/CNAME ${{ github.workspace }}/docs/build/CNAME || true
working-directory: ${{ github.workspace }}

- name: Deploy to GitHub Pages
uses: peaceiris/actions-gh-pages@v3
Expand Down
1 change: 1 addition & 0 deletions _lightrag/lightrag/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""Dummy file to make this directory a package."""
1 change: 1 addition & 0 deletions docs/_dummy/dummy/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""Dummy file to make this directory a package."""
2,037 changes: 1,561 additions & 476 deletions docs/poetry.lock

Large diffs are not rendered by default.

111 changes: 16 additions & 95 deletions docs/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,106 +1,27 @@
# [tool.poetry]
# name = "lightrag"

# packages = [
# { include = "core", from = "." },
# { include = "components", from = "." },
# { include = "prompts", from = "." },
# { include = "eval", from = "." },
# { include = "utils", from = "." },
# { include = "tracing", from = "." },
# ]
# version = "0.1.0"
# description = "The 'PyTorch' library for LLM applications. RAG=Retriever-Agent-Generator."
# authors = ["Li Yin <[email protected]>"]
# readme = "README.md"
# license = "MIT"
# classifiers = [
# "Topic :: Software Development :: Build Tools",
# "Topic :: Software Development :: Libraries :: Python Modules",
# ]

# # [[tool.poetry.packages]]
# # include = "lightrag"

# [tool.poetry.dependencies]
# python = ">=3.11, <4.0"
# python-dotenv = "^1.0.1"
# backoff = "^2.2.1"
# jinja2 = "^3.1.3"
# # TODO: decide if we need people to install faiss, or openai, or groq separately
# openai = "^1.12.0"
# groq = "^0.5.0" # should only be installed if groq client is used
# faiss-cpu = "^1.8.0"
# torchviz = "^0.0.2"
# matplotlib = "^3.8.4"
# colorama = "^0.4.6"
# jsonlines = "^4.0.0"


# [tool.poetry.group.test.dependencies]
# pytest = "^8.1.1"
# pytest-mock = "^3.14.0"

# [tool.poetry.group.typing.dependencies]
# mypy = "^1"

# [tool.poetry.group.doc.dependencies]
# datasets = ">=2.14.6, <=2.19.1"
# sphinx = "^7.3.7"
# sphinx-rtd-theme = "^2.0.0"
# pydata-sphinx-theme = "0.15.2"
# sphinx-design = "^0.6.0"


# [tool.poetry.group.dev.dependencies]
# langchain = "^0.1.16"
# llama-index = "^0.10.30"
# pre-commit = "^3.7.0"
# litellm = "^1.35.34"
# haystack-ai = "^2.0.1"
# torchvision = "^0.18.0"
# torch = "^2.3.0"
# langsmith = "^0.1.56"
# langchain-openai = "^0.1.6"
# pyvis = "^0.3.2"
# llama-index-llms-ollama = "^0.1.3"
# anthropic = "^0.26.0"
# google-generativeai = "^0.5.4"
# transformers = "^4.41.0"
# torchmetrics = "^1.4.0.post0"
# lightning = "^2.2.4"
# dspy-ai = "^2.4.9"
# jupyter = "^1.0.0"
# ipykernel = "^6.29.4"
# colorama = "^0.4.6"


# [tool.ruff]
# exclude = ["images"]


# [build-system]
# requires = ["poetry-core"]
# build-backend = "poetry.core.masonry.api"

[tool.poetry]
name = "lightrag-docs"
version = "0.1.0"
description = "A project to develop and test the lightrag library"
description = "Documentation for the lightrag project"
authors = ["Your Name <[email protected]>"]
license = "MIT"

packages = [{ include = "../docs", from = "." }]

packages = [{ from = "_dummy", include = "dummy" }] # empty packages

[tool.poetry.dependencies]
python = ">=3.11, <4.0"
lightrag = { path = "../lightrag", develop = true }


[tool.poetry.group.dev.dependencies]
torch = "^2.3.0"

lightrag = "^0.0.0a13" # Assuming this is the main library you are developing


[tool.poetry.group.doc.dependencies]
pydata-sphinx-theme = "^0.15.3"
sphinx-design = "^0.6.0"
sphinx-copybutton = "^0.5.2"
sphinx = "^7.3.7"
nbsphinx = "^0.9.4"
nbconvert = "^7.16.4"
pandoc = "^2.3"
readthedocs-sphinx-search = "^0.3.2"
sqlalchemy = "^2.0.31"
google-generativeai = "^0.7.1"

[build-system]
requires = ["poetry-core>=1.0.0"]
Expand Down
29 changes: 19 additions & 10 deletions docs/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,6 @@
:alt: LightRAG Logo


.. raw:: html

<h1 style="text-align: center; font-size: 2em; margin-top: 10px;">⚡ The PyTorch Library for Large Language Model Applications ⚡</h1>

*LightRAG* helps developers with both building and optimizing *Retriever-Agent-Generator (RAG)* pipelines.
It is *light*, *modular*, and *robust*.




.. |License| image:: https://img.shields.io/github/license/SylphAI-Inc/LightRAG
:target: https://opensource.org/license/MIT

Expand Down Expand Up @@ -44,6 +34,25 @@ It is *light*, *modular*, and *robust*.
<a href="https://discord.gg/zt2mTPcu"><img src="https://dcbadge.vercel.app/api/server/zt2mTPcu?compact=true&style=flat" alt="Discord"></a>
</div>

.. raw:: html

<h1 style="text-align: center; font-size: 2em; margin-top: 10px;">⚡ The PyTorch Library for Large Language Model Applications ⚡</h1>

<div style="text-align: center;">
<p>
<em>LightRAG</em> helps developers with both building and optimizing <em>Retriever-Agent-Generator (RAG)</em> pipelines.<br>
It is <em>light</em>, <em>modular</em>, and <em>robust</em>.
</p>
</div>

.. *LightRAG* helps developers with both building and optimizing *Retriever-Agent-Generator (RAG)* pipelines.
.. It is *light*, *modular*, and *robust*.
.. grid:: 1
:gutter: 1

Expand Down
14 changes: 10 additions & 4 deletions docs/source/insert_autosummary.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,16 @@ def generate_autosummary_docs(src_dir, dest_dir):
print(f"module_full_name: {module_full_name}")
module_dir = dest_dir
# spec and load the module
spec = importlib.util.spec_from_file_location(module_full_name, code_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
generate_rst_for_module(module_full_name, module, module_dir)
try:
spec = importlib.util.spec_from_file_location(
module_full_name, code_path
)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
generate_rst_for_module(module_full_name, module, module_dir)
except Exception as e:
print(f"Error loading module {module_full_name}: {e}")
continue


if __name__ == "__main__":
Expand Down
19 changes: 11 additions & 8 deletions lightrag/lightrag/components/model_client/anthropic_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,17 @@
import backoff
import logging

import anthropic

from lightrag.core.model_client import ModelClient
from lightrag.core.types import ModelType

# optional import
from lightrag.utils.lazy_import import safe_import, OptionalPackages

anthropic = safe_import(
OptionalPackages.ANTHROPIC.value[0], OptionalPackages.ANTHROPIC.value[1]
)

from anthropic import (
RateLimitError,
APITimeoutError,
Expand All @@ -15,15 +25,8 @@
)
from anthropic.types import Message


from lightrag.core.model_client import ModelClient
from lightrag.core.types import ModelType
from lightrag.utils.lazy_import import safe_import, OptionalPackages

log = logging.getLogger(__name__)

safe_import(OptionalPackages.ANTHROPIC.value[0], OptionalPackages.ANTHROPIC.value[1])


def get_first_message_content(completion: Message) -> str:
r"""When we only need the content of the first message.
Expand Down
3 changes: 1 addition & 2 deletions lightrag/lightrag/components/model_client/cohere_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,7 @@
import backoff
from lightrag.utils.lazy_import import safe_import, OptionalPackages

safe_import(OptionalPackages.COHERE.value[0], OptionalPackages.COHERE.value[1])
import cohere
cohere = safe_import(OptionalPackages.COHERE.value[0], OptionalPackages.COHERE.value[1])
from cohere import (
BadRequestError,
InternalServerError,
Expand Down
19 changes: 9 additions & 10 deletions lightrag/lightrag/components/model_client/google_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,24 +5,23 @@
import backoff


import google.generativeai as genai
from google.api_core.exceptions import (
InternalServerError,
BadRequest,
GoogleAPICallError,
)
from google.generativeai.types import GenerateContentResponse


from lightrag.core.model_client import ModelClient
from lightrag.core.types import ModelType

from lightrag.utils.lazy_import import safe_import, OptionalPackages

safe_import(
# optional import
google = safe_import(
OptionalPackages.GOOGLE_GENERATIVEAI.value[0],
OptionalPackages.GOOGLE_GENERATIVEAI.value[1],
)
import google.generativeai as genai
from google.api_core.exceptions import (
InternalServerError,
BadRequest,
GoogleAPICallError,
)
from google.generativeai.types import GenerateContentResponse


class GoogleGenAIClient(ModelClient):
Expand Down
14 changes: 7 additions & 7 deletions lightrag/lightrag/components/model_client/groq_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,14 @@
import os
from typing import Dict, Sequence, Optional, Any
import backoff
from lightrag.core.model_client import ModelClient
from lightrag.core.types import ModelType


from lightrag.utils.lazy_import import safe_import, OptionalPackages

# optional import
groq = safe_import(OptionalPackages.GROQ.value[0], OptionalPackages.GROQ.value[1])

from groq import Groq, AsyncGroq
from groq import (
Expand All @@ -14,13 +21,6 @@
)


from lightrag.core.model_client import ModelClient
from lightrag.core.types import ModelType
from lightrag.utils.lazy_import import safe_import, OptionalPackages

safe_import(OptionalPackages.GROQ.value[0], OptionalPackages.GROQ.value[1])


class GroqAPIClient(ModelClient):
__doc__ = r"""A component wrapper for the Groq API client.
Expand Down
26 changes: 12 additions & 14 deletions lightrag/lightrag/components/model_client/openai_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,32 +4,30 @@
from typing import Dict, Sequence, Optional, List, Any, TypeVar, Callable

import logging

from openai import OpenAI, AsyncOpenAI
from openai import (
APITimeoutError,
InternalServerError,
RateLimitError,
UnprocessableEntityError,
BadRequestError,
)

from lightrag.utils.lazy_import import safe_import, OptionalPackages
import backoff


from lightrag.core.model_client import ModelClient
from lightrag.core.types import ModelType, EmbedderOutput, TokenLogProb
from lightrag.components.model_client.utils import parse_embedding_response

# optional import
from lightrag.utils.lazy_import import safe_import, OptionalPackages

safe_import(OptionalPackages.OPENAI.value[0], OptionalPackages.OPENAI.value[1])

openai = safe_import(OptionalPackages.OPENAI.value[0], OptionalPackages.OPENAI.value[1])

from openai import OpenAI, AsyncOpenAI
from openai import (
APITimeoutError,
InternalServerError,
RateLimitError,
UnprocessableEntityError,
BadRequestError,
)
from openai.types import Completion, CreateEmbeddingResponse


import backoff

log = logging.getLogger(__name__)
T = TypeVar("T")

Expand Down
Loading

0 comments on commit 86c8bf1

Please sign in to comment.