Skip to content

Commit

Permalink
Merge pull request #195 from raspawar/raspawar/nvidia_integration
Browse files Browse the repository at this point in the history
NVIDIA NIM CrewAI Integration
  • Loading branch information
bhancockio authored Dec 3, 2024
2 parents f5c5b29 + 8069997 commit e6bc346
Show file tree
Hide file tree
Showing 21 changed files with 834 additions and 0 deletions.
2 changes: 2 additions & 0 deletions nvidia_models/intro/.env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
NVIDIA_API_KEY=
MODEL=meta/llama-3.1-8b-instruct
6 changes: 6 additions & 0 deletions nvidia_models/intro/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
.env
.DS_Store
__pycache__
.venv
poetry.lock
.ruff_cache
40 changes: 40 additions & 0 deletions nvidia_models/intro/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
.PHONY: all format lint test tests integration_tests help

# Default target executed when no arguments are given to make.
all: help

install: ## Install the poetry environment and dependencies
poetry install --no-root

clean: ## Clean up cache directories and build artifacts
find . -type d -name "__pycache__" -exec rm -rf {} +
find . -type d -name "*.pyc" -exec rm -rf {} +
find . -type d -name ".ruff_cache" -exec rm -rf {} +
find . -type d -name ".pytest_cache" -exec rm -rf {} +
find . -type d -name ".coverage" -exec rm -rf {} +
rm -rf dist/
rm -rf build/

######################
# LINTING AND FORMATTING
######################

# Define a variable for Python and notebook files.
PYTHON_FILES=.
MYPY_CACHE=.mypy_cache
lint: ## Run code quality tools
poetry run ruff check $(PYTHON_FILES)
poetry run ruff format $(PYTHON_FILES) --check

format: ## Format code using ruff
poetry run ruff format $(PYTHON_FILES)
poetry run ruff check $(PYTHON_FILES) --fix

######################
# HELP
######################

help:
@echo '----'
@echo 'format - run code formatters'
@echo 'lint - run linters'
15 changes: 15 additions & 0 deletions nvidia_models/intro/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# AI Crew using NVIDIA NIM Endpoint

## Introduction
This is a simple example using the CrewAI framework with an NVIDIA endpoint and langchain-nvidia-ai-endpoints integration.

## Running the Script
This example uses the Azure OpenAI API to call a model.

- **Configure Environment**: Set NVIDIA_API_KEY to appropriate api key.
Set MODEL to select appropriate model
- **Install Dependencies**: Run `make install`.
- **Execute the Script**: Run `python main.py` to see a list of recommended changes to this document.

## Details & Explanation
- **Running the Script**: Execute `python main.py`. The script will leverage the CrewAI framework to process the specified file and return a list of changes.
152 changes: 152 additions & 0 deletions nvidia_models/intro/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
import logging
import os
from typing import Any, Dict, List, Optional, Union

import litellm
from crewai import LLM, Agent, Crew, Process, Task
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededException,
)
from dotenv import load_dotenv
from langchain_nvidia_ai_endpoints import ChatNVIDIA

load_dotenv()


class nvllm(LLM):
def __init__(
self,
llm: ChatNVIDIA,
model_str: str,
timeout: Optional[Union[float, int]] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
n: Optional[int] = None,
stop: Optional[Union[str, List[str]]] = None,
max_completion_tokens: Optional[int] = None,
max_tokens: Optional[int] = None,
presence_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
logit_bias: Optional[Dict[int, float]] = None,
response_format: Optional[Dict[str, Any]] = None,
seed: Optional[int] = None,
logprobs: Optional[bool] = None,
top_logprobs: Optional[int] = None,
base_url: Optional[str] = None,
api_version: Optional[str] = None,
api_key: Optional[str] = None,
callbacks: List[Any] = None,
**kwargs,
):
self.model = model_str
self.timeout = timeout
self.temperature = temperature
self.top_p = top_p
self.n = n
self.stop = stop
self.max_completion_tokens = max_completion_tokens
self.max_tokens = max_tokens
self.presence_penalty = presence_penalty
self.frequency_penalty = frequency_penalty
self.logit_bias = logit_bias
self.response_format = response_format
self.seed = seed
self.logprobs = logprobs
self.top_logprobs = top_logprobs
self.base_url = base_url
self.api_version = api_version
self.api_key = api_key
self.callbacks = callbacks
self.kwargs = kwargs
self.llm = llm

if callbacks is None:
self.callbacks = callbacks = []

self.set_callbacks(callbacks)

def call(self, messages: List[Dict[str, str]], callbacks: List[Any] = None) -> str:
if callbacks is None:
callbacks = []
if callbacks and len(callbacks) > 0:
self.set_callbacks(callbacks)

try:
params = {
"model": self.llm.model,
"input": messages,
"timeout": self.timeout,
"temperature": self.temperature,
"top_p": self.top_p,
"n": self.n,
"stop": self.stop,
"max_tokens": self.max_tokens or self.max_completion_tokens,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"logit_bias": self.logit_bias,
"response_format": self.response_format,
"seed": self.seed,
"logprobs": self.logprobs,
"top_logprobs": self.top_logprobs,
"api_key": self.api_key,
**self.kwargs,
}

response = self.llm.invoke(**params)
return response.content
except Exception as e:
if not LLMContextLengthExceededException(str(e))._is_context_limit_error(
str(e)
):
logging.error(f"LiteLLM call failed: {str(e)}")

raise # Re-raise the exception after logging

def set_callbacks(self, callbacks: List[Any]):
callback_types = [type(callback) for callback in callbacks]
for callback in litellm.success_callback[:]:
if type(callback) in callback_types:
litellm.success_callback.remove(callback)

for callback in litellm._async_success_callback[:]:
if type(callback) in callback_types:
litellm._async_success_callback.remove(callback)

litellm.callbacks = callbacks


model = os.environ.get("MODEL", "meta/llama-3.1-8b-instruct")
llm = ChatNVIDIA(model=model)
default_llm = nvllm(model_str="nvidia_nim/" + model, llm=llm)

os.environ["NVIDIA_NIM_API_KEY"] = os.environ.get("NVIDIA_API_KEY")

# Create a researcher agent
researcher = Agent(
role="Senior Researcher",
goal="Discover groundbreaking technologies",
verbose=True,
llm=default_llm,
backstory=(
"A curious mind fascinated by cutting-edge innovation and the potential "
"to change the world, you know everything about tech."
),
)

# Task for the researcher
research_task = Task(
description="Identify the next big trend in AI",
agent=researcher, # Assigning the task to the researcher
expected_output="Data Insights",
)


# Instantiate your crew
tech_crew = Crew(
agents=[researcher],
tasks=[research_task],
process=Process.sequential, # Tasks will be executed one after the other
)

# Begin the task execution
tech_crew.kickoff()
38 changes: 38 additions & 0 deletions nvidia_models/intro/pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
[tool.poetry]
name = "nvidia-intro-crewai-example"
version = "0.1.0"
description = ""
authors = ["raspawar <[email protected]>"]

[tool.poetry.dependencies]
python = ">=3.10.0,<3.12"
python-dotenv = "1.0.0"
litellm = "^1.52.10"
langchain-nvidia-ai-endpoints = "^0.3.5"
crewai = "^0.80.0"

[tool.pyright]
# https://github.com/microsoft/pyright/blob/main/docs/configuration.md
useLibraryCodeForTypes = true
exclude = [".cache"]

[tool.ruff.lint]
select = [
"E", # pycodestyle
"F", # pyflakes
"I", # isort
"B", # flake8-bugbear
"C4", # flake8-comprehensions
"ARG", # flake8-unused-arguments
"SIM", # flake8-simplify
"T201", # print
]
ignore = [
"W291", # trailing whitespace
"W292", # no newline at end of file
"W293", # blank line contains whitespace
]

[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
27 changes: 27 additions & 0 deletions nvidia_models/intro/scripts/check_pydantic.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#!/bin/bash
#
# This script searches for lines starting with "import pydantic" or "from pydantic"
# in tracked files within a Git repository.
#
# Usage: ./scripts/check_pydantic.sh /path/to/repository

# Check if a path argument is provided
if [ $# -ne 1 ]; then
echo "Usage: $0 /path/to/repository"
exit 1
fi

repository_path="$1"

# Search for lines matching the pattern within the specified repository
result=$(git -C "$repository_path" grep -E '^import pydantic|^from pydantic')

# Check if any matching lines were found
if [ -n "$result" ]; then
echo "ERROR: The following lines need to be updated:"
echo "$result"
echo "Please replace the code with an import from langchain_core.pydantic_v1."
echo "For example, replace 'from pydantic import BaseModel'"
echo "with 'from langchain_core.pydantic_v1 import BaseModel'"
exit 1
fi
17 changes: 17 additions & 0 deletions nvidia_models/intro/scripts/lint_imports.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#!/bin/bash

set -eu

# Initialize a variable to keep track of errors
errors=0

# make sure not importing from langchain or langchain_experimental
git --no-pager grep '^from langchain\.' . && errors=$((errors+1))
git --no-pager grep '^from langchain_experimental\.' . && errors=$((errors+1))

# Decide on an exit status based on the errors
if [ "$errors" -gt 0 ]; then
exit 1
else
exit 0
fi
3 changes: 3 additions & 0 deletions nvidia_models/marketing_strategy/.env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
SERPER_API_KEY=
NVIDIA_API_KEY=
MODEL=meta/llama-3.1-8b-instruct
6 changes: 6 additions & 0 deletions nvidia_models/marketing_strategy/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
.env
.DS_Store
__pycache__
.venv
poetry.lock
.ruff_cache
40 changes: 40 additions & 0 deletions nvidia_models/marketing_strategy/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
.PHONY: all format lint test tests integration_tests help

# Default target executed when no arguments are given to make.
all: help

install: ## Install the poetry environment and dependencies
poetry install --no-root

clean: ## Clean up cache directories and build artifacts
find . -type d -name "__pycache__" -exec rm -rf {} +
find . -type d -name "*.pyc" -exec rm -rf {} +
find . -type d -name ".ruff_cache" -exec rm -rf {} +
find . -type d -name ".pytest_cache" -exec rm -rf {} +
find . -type d -name ".coverage" -exec rm -rf {} +
rm -rf dist/
rm -rf build/

######################
# LINTING AND FORMATTING
######################

# Define a variable for Python and notebook files.
PYTHON_FILES=.
MYPY_CACHE=.mypy_cache
lint: ## Run code quality tools
poetry run ruff check $(PYTHON_FILES)
poetry run ruff format $(PYTHON_FILES) --check

format: ## Format code using ruff
poetry run ruff format $(PYTHON_FILES)
poetry run ruff check $(PYTHON_FILES) --fix

######################
# HELP
######################

help:
@echo '----'
@echo 'format - run code formatters'
@echo 'lint - run linters'
Loading

0 comments on commit e6bc346

Please sign in to comment.