Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixing some errors #157

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 8 additions & 3 deletions AUTOGPT.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from dotenv import load_dotenv
from pathlib import Path
from json import JSONDecodeError
from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT
from langchain_experimental.autonomous_agents import AutoGPT
from FreeLLM import ChatGPTAPI # FREE CHATGPT API
from FreeLLM import HuggingChatAPI # FREE HUGGINGCHAT API
from FreeLLM import BingChatAPI # FREE BINGCHAT API
Expand Down Expand Up @@ -84,15 +84,20 @@

elif select_model == "4":
GB_TOKEN = os.getenv("BARDCHAT_TOKEN", "your-googlebard-token")
GB_PSIDTS = os.getenv("BARDCHAT_1PSITS", "your-googlebard-1PSITS")

if GB_TOKEN != "your-googlebard-token":
os.environ["BARDCHAT_TOKEN"] = GB_TOKEN
else:
raise ValueError(
"GoogleBard Token EMPTY. Edit the .env file and put your GoogleBard token"
)
cookie_path = os.environ["BARDCHAT_TOKEN"]
llm = BardChatAPI.BardChat(cookie=cookie_path)
if GB_TOKEN != "your-googlebard-1PSITS":
os.environ["BARDCHAT_1PSITS"] = GB_PSIDTS

secure_1psid = os.environ["BARDCHAT_TOKEN"]
secure_1psidts = os.environ["BARDCHAT_1PSITS"]
llm = BardChatAPI.BardChat(secure_1psid=secure_1psid,secure_1psidts=secure_1psidts)


HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN", "your-huggingface-token")
Expand Down
11 changes: 8 additions & 3 deletions BABYAGI.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from langchain.vectorstores.base import VectorStore
from pydantic import BaseModel, Field
from langchain.chains.base import Chain
from langchain.experimental import BabyAGI
from langchain_experimental.autonomous_agents import BabyAGI
from BabyAgi import BabyAGIMod

import faiss
Expand Down Expand Up @@ -83,15 +83,20 @@

elif select_model == "4":
GB_TOKEN = os.getenv("BARDCHAT_TOKEN", "your-googlebard-token")
GB_PSIDTS = os.getenv("BARDCHAT_1PSITS", "your-googlebard-1PSITS")

if GB_TOKEN != "your-googlebard-token":
os.environ["BARDCHAT_TOKEN"] = GB_TOKEN
else:
raise ValueError(
"GoogleBard Token EMPTY. Edit the .env file and put your GoogleBard token"
)
cookie_path = os.environ["BARDCHAT_TOKEN"]
llm = BardChatAPI.BardChat(cookie=cookie_path)
if GB_TOKEN != "your-googlebard-1PSITS":
os.environ["BARDCHAT_1PSITS"] = GB_PSIDTS

secure_1psid = os.environ["BARDCHAT_TOKEN"]
secure_1psidts = os.environ["BARDCHAT_1PSITS"]
llm = BardChatAPI.BardChat(secure_1psid=secure_1psid,secure_1psidts=secure_1psidts)



Expand Down
9 changes: 5 additions & 4 deletions FreeLLM/BardChatAPI.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@
class BardChat(LLM):

history_data: Optional[List] = []
cookie : Optional[str]
secure_1psid : Optional[str]
secure_1psidts : Optional[str]
chatbot : Optional[Chatbot] = None


Expand All @@ -28,11 +29,11 @@ async def call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
#raise ValueError("stop kwargs are not permitted.")
#cookie is a must check
if self.chatbot is None:
if self.cookie is None:
if self.secure_1psid is None or self.secure_1psidts is None:
raise ValueError("Need a COOKIE , check https://github.com/acheong08/EdgeGPT/tree/master#getting-authentication-required for get your COOKIE AND SAVE")
else:
#if self.chatbot == None:
self.chatbot = Chatbot(self.cookie)
self.chatbot = Chatbot(self.secure_1psid,self.secure_1psidts)

response = self.chatbot.ask(prompt)
#print(response)
Expand All @@ -48,7 +49,7 @@ def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"model": "BardCHAT", "cookie": self.cookie}
return {"model": "BardCHAT", "secure_1psid": self.secure_1psid}



Expand Down
2 changes: 1 addition & 1 deletion FreeLLM/BingChatAPI.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from EdgeGPT import Chatbot, ConversationStyle
from EdgeGPT.EdgeGPT import Chatbot, ConversationStyle
import asyncio

import requests
Expand Down
2 changes: 1 addition & 1 deletion FreeLLM/HuggingChatAPI.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
cookies = sign.login()

# Save cookies to usercookies/<email>.json
sign.saveCookies()
sign.saveCookiesToDir("usercookies/")

# Create a ChatBot
self.chatbot = hugchat.ChatBot(cookies=cookies.get_dict())
Expand Down
5 changes: 3 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
requests
langchain
langchain_experimental
streamlit
streamlit-chat-media
numpy
Expand All @@ -8,8 +9,8 @@ duckduckgo-search
transformers
tabulate
wikipedia
faiss-gpu # if u have a GPU
#faiss-cpu # if u dont have a GPU
#faiss-gpu # if u have a GPU
faiss-cpu # if u dont have a GPU
nest_asyncio
torch
# tensorflow >= 2.0 -- For other future models
Expand Down