Skip to content

Commit

Permalink
Merge pull request #17 from ApeWorX/feat/ninjagod/claude
Browse files Browse the repository at this point in the history
feat: added claude
  • Loading branch information
Ninjagod1251 authored Apr 10, 2024
2 parents f7c2212 + c46afb7 commit 3bbfa8b
Show file tree
Hide file tree
Showing 3 changed files with 47 additions and 27 deletions.
44 changes: 25 additions & 19 deletions bot.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,19 @@
from threading import Lock
from telegram import Update, ParseMode
from telegram.ext import Updater, CommandHandler, CallbackContext, MessageHandler, Filters
import openai
import requests
import anthropic


# Load your OpenAI API key and Telegram token from environment variables or direct string assignment
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
CLAUDE_KEY = os.getenv('CLAUDE_KEY')
TELEGRAM_TOKEN = os.getenv('TELEGRAM_TOKEN')

# Initialize OpenAI client
openai.api_key = OPENAI_API_KEY
# Initialize Claude client
client = anthropic.Anthropic(
# defaults to os.environ.get("ANTHROPIC_API_KEY")
api_key=CLAUDE_KEY
)

# Initialize a lock for thread-safe file writing
file_lock = Lock()
Expand Down Expand Up @@ -120,21 +124,26 @@ def preaudit(update: Update, context: CallbackContext) -> None:
"role": "user",
"content": prompt
},
{
"role": "assistant",
"content": ":"
},
{
"role": "user",
"content": code_content
}
]

openai_response = openai.chat.completions.create(
model="gpt-4-turbo",
response = client.messages.create(
model="claude-3-opus-20240229",
max_tokens=4000,
temperature=0,
messages=messages,
messages=messages
)

bot_response = openai_response.choices[0].message.content
bot_response = response.content[0].text
# Split the message into chunks of 4096 characters
max_length = 4096
max_length = 4000
messages = [bot_response[i:i+max_length] for i in range(0, len(bot_response), max_length)]
for msg in messages:
update.message.reply_text(msg)
Expand Down Expand Up @@ -198,13 +207,14 @@ def handle_message(update: Update, context: CallbackContext) -> None:
})

try:
response = openai.chat.completions.create(
model="gpt-4-1106-preview",
response = client.messages.create(
model="claude-3-opus-20240229",
max_tokens=4000,
temperature=0,
messages=messages,
messages=messages
)

bot_response = response.choices[0].message.content
bot_response = response.content[0].text
# Split the message into chunks of 4096 characters
max_length = 4096
messages = [bot_response[i:i+max_length] for i in range(0, len(bot_response), max_length)]
Expand All @@ -215,12 +225,8 @@ def handle_message(update: Update, context: CallbackContext) -> None:
if not admins.get(str(update.message.from_user.id)):
groups[group_id]['messages_today'] += 1
save_data()
except openai.error.OpenAIError as e:
# Log the error for debugging purposes
context.logger.error(f"OpenAIError: {e}")

# Inform the user that the service is currently unavailable
update.message.reply_text(f"OpenAIError: {e}")
except Exception as e:
context.logger.error(f"Claude Error: {e}")



Expand Down
28 changes: 21 additions & 7 deletions request.py
Original file line number Diff line number Diff line change
@@ -1,33 +1,47 @@
import os
import openai
import anthropic

openai.api_key = os.getenv('OPENAI_API_KEY')
CLAUDE_KEY = os.getenv('CLAUDE_KEY')

client = anthropic.Anthropic(
# defaults to os.environ.get("ANTHROPIC_API_KEY")
api_key=CLAUDE_KEY
)

knowledge_base = ''
with open('knowledge-base.txt', 'r', encoding="utf-8") as file:
knowledge_base = file.read()

response = openai.chat.completions.create(
model="gpt-4-1106-preview",
response = client.messages.create(
model="claude-3-opus-20240229",
max_tokens = 4000,
temperature=0,
messages=[
{
"role": "user",
# example for ygenius
"content": "default instructions here"
"content": "Answer the question using the knowledge base"
},
{
"role": "assistant",
"content": ":"
},
{
"role": "user",
"content": knowledge_base
},
{
"role": "assistant",
"content": ":"
},
{
"role": "user",
# add your question here
"content": "question here"
"content": "what is apeworx"
}
],
)

bot_response = response.choices[0].message.content
bot_response = response.content[0].text

print(bot_response)
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
openai==1.1.1
anthropic
python-telegram-bot==13.7
PyYAML==6.0.1
requests

0 comments on commit 3bbfa8b

Please sign in to comment.