diff --git a/__init__.py b/__init__.py
index 1f605c8..8155b1d 100644
--- a/__init__.py
+++ b/__init__.py
@@ -11,14 +11,6 @@
This file is init point for project-wide structure.
"""
-# Engines
-from .openai_api.src.openai_api.chatgpt import ChatGPT # pylint: disable=unused-import
-from .openai_api.src.openai_api.dalle import DALLE # pylint: disable=unused-import
-from .leonardo_api.src.leonardo_api.leonardo_sync import Leonardo # pylint: disable=unused-import
-from .leonardo_api.src.leonardo_api.leonardo_async import Leonardo as LeonardoAsync # pylint: disable=unused-import
-
-
-# Utils
from .utils.tts import CustomTTS # pylint: disable=unused-import
from .utils.transcriptors import CustomTranscriptor # pylint: disable=unused-import
from .utils.translators import CustomTranslator # pylint: disable=unused-import
diff --git a/__main__.py b/__main__.py
index a86a2ee..5b08585 100644
--- a/__main__.py
+++ b/__main__.py
@@ -11,13 +11,6 @@
This file is entry point for project-wide structure.
"""
-# Engines
-from .openai_api.src.openai_api.chatgpt import ChatGPT # pylint: disable=unused-import
-from .openai_api.src.openai_api.dalle import DALLE # pylint: disable=unused-import
-from .leonardo_api.src.leonardo_api.leonardo_sync import Leonardo # pylint: disable=unused-import
-from .leonardo_api.src.leonardo_api.leonardo_async import Leonardo as LeonardoAsync # pylint: disable=unused-import
-
-# Utils
from .utils.tts import CustomTTS # pylint: disable=unused-import
from .utils.transcriptors import CustomTranscriptor # pylint: disable=unused-import
from .utils.translators import CustomTranslator # pylint: disable=unused-import
diff --git a/examples/image_generation/dalle_test.py b/examples/image_generation/dalle_test.py
index 53709c2..5435499 100644
--- a/examples/image_generation/dalle_test.py
+++ b/examples/image_generation/dalle_test.py
@@ -11,9 +11,9 @@
This file contains testing procedures for DALLE experiments
"""
import asyncio
+from openai_python_api import DALLE
from examples.creds import oai_token, oai_organization
-from openai_api.src.openai_api import DALLE
dalle = DALLE(auth_token=oai_token, organization=oai_organization)
diff --git a/examples/image_generation/gpt_functions.py b/examples/image_generation/gpt_functions.py
index 7cf1f41..9bdee77 100644
--- a/examples/image_generation/gpt_functions.py
+++ b/examples/image_generation/gpt_functions.py
@@ -15,10 +15,10 @@
import requests
from PIL import Image
+from leonardo_api import Leonardo
+from openai_python_api import DALLE
from examples.creds import oai_token, oai_organization
-from leonardo_api.src.leonardo_api.leonardo_sync import Leonardo
-from openai_api.src.openai_api.dalle import DALLE
def get_weather(city, units):
@@ -65,7 +65,7 @@ def draw_image_using_dalle(prompt):
dalle = DALLE(auth_token=oai_token, organization=oai_organization)
image = dalle.create_image_url(prompt)
url_dict = {"image_url": image[0]}
- response = requests.get(image[0])
+ response = requests.get(image[0], timeout=30)
img = Image.open(BytesIO(response.content))
img.show()
return json.dumps(url_dict)
@@ -90,7 +90,7 @@ def draw_image(prompt):
)
response = leonardo.wait_for_image_generation(generation_id=response["sdGenerationJob"]["generationId"])
url_dict = {"image_url": response[0]["url"]}
- response = requests.get(url_dict["image_url"])
+ response = requests.get(url_dict["image_url"], timeout=30)
img = Image.open(BytesIO(response.content))
img.show()
return json.dumps(url_dict)
diff --git a/examples/image_generation/test_leonardo.py b/examples/image_generation/test_leonardo.py
index 4b1f8e5..2478014 100644
--- a/examples/image_generation/test_leonardo.py
+++ b/examples/image_generation/test_leonardo.py
@@ -10,10 +10,10 @@
Description:
This file contains testing procedures for Leonardo experiments
"""
-import asyncio
import json
-from leonardo_api.src.leonardo_api.leonardo_async import Leonardo
+import asyncio
+from leonardo_api.leonardo_async import Leonardo
async def main():
diff --git a/examples/speak_and_hear/test_gpt.py b/examples/speak_and_hear/test_gpt.py
index 8290fe8..ccd84d5 100644
--- a/examples/speak_and_hear/test_gpt.py
+++ b/examples/speak_and_hear/test_gpt.py
@@ -11,15 +11,16 @@
This file contains testing procedures for ChatGPT experiments
"""
-import asyncio
import string
import sys
+import asyncio
+from openai_python_api import ChatGPT
+
+from examples.creds import oai_token, oai_organization
from utils.audio_recorder import AudioRecorder
from utils.transcriptors import CustomTranscriptor
from utils.tts import CustomTTS
-from ..creds import oai_token, oai_organization
-from ...openai_api import ChatGPT
gpt = ChatGPT(auth_token=oai_token, organization=oai_organization, model="gpt-3.5-turbo")
gpt.max_tokens = 200
diff --git a/examples/speak_and_hear/test_gpt_orig.py b/examples/speak_and_hear/test_gpt_orig.py
deleted file mode 100644
index 660c0bb..0000000
--- a/examples/speak_and_hear/test_gpt_orig.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Filename: chatgpt.py
-Author: Iliya Vereshchagin
-Copyright (c) 2023. All rights reserved.
-
-Created: 25.08.2023
-Last Modified: 25.08.2023
-
-Description:
-This file contains testing procedures for ChatGPt experiments
-"""
-
-import string
-import sys
-
-import asyncio
-
-from creds import oai_token, oai_organization
-from openai_api.src.openai_api import ChatGPT
-
-from utils.audio_recorder import AudioRecorder
-from utils.transcriptors import CustomTranscriptor
-from utils.tts import CustomTTS
-
-from gpt_functions import gpt_functions, gpt_functions_dict
-
-
-gpt = ChatGPT(auth_token=oai_token, organization=oai_organization, model="gpt-3.5-turbo-0613")
-gpt.max_tokens = 200
-gpt.stream = True
-gpt.functions = gpt_functions
-gpt.function_dict = gpt_functions_dict
-gpt.function_call = 'auto'
-
-tts = CustomTTS(method="google", lang="en")
-
-# queues
-prompt_queue = asyncio.Queue()
-tts_queue = asyncio.Queue()
-
-
-async def ask_chat(user_input):
- full_response = ""
- word = ""
- async for response in gpt.str_chat(user_input):
- for char in response:
- word += char
- if char in string.whitespace or char in string.punctuation:
- if word:
- await prompt_queue.put(word)
- word = ""
- sys.stdout.write(char)
- sys.stdout.flush()
- full_response += char
- print("\n")
- return full_response
-
-
-async def tts_task():
- limit = 5
- empty_counter = 0
- while True:
- if prompt_queue.empty():
- empty_counter += 1
- if empty_counter >= 3:
- limit = 5
- empty_counter = 0
- words = []
- # Get all available words
- limit_counter = 0
- while len(words) < limit:
- try:
- word = await asyncio.wait_for(prompt_queue.get(), timeout=0.5)
- words.extend(word.split())
- if len(words) >= limit:
- break
- except asyncio.TimeoutError:
- limit_counter += 1
- if limit_counter >= 10:
- limit = 1
-
- # If we have at least limit words or queue was empty 3 times, process them
- if len(words) >= limit:
- text = " ".join(words)
- await tts.process(text)
- limit = 1
-
-
-async def tts_sentence_task():
- punctuation_marks = ".?!,;:"
- sentence = ""
- while True:
- try:
- word = await asyncio.wait_for(prompt_queue.get(), timeout=0.5)
- sentence += " " + word
- # If the last character is a punctuation mark, process the sentence
- if sentence[-1] in punctuation_marks:
- await tts_queue.put(sentence)
- sentence = ""
- except Exception as error:
- pass
-
-
-async def tts_worker():
- while True:
- try:
- sentence = await tts_queue.get()
- if sentence:
- await tts.process(sentence)
- tts_queue.task_done()
- except Exception as error:
- pass
-
-
-async def get_user_input():
- while True:
- try:
- user_input = input()
- if user_input.lower() == "[done]":
- break
- else:
- await ask_chat(user_input)
- except KeyboardInterrupt:
- break
-
-
-async def main():
- asyncio.create_task(tts_sentence_task())
- asyncio.create_task(tts_worker())
- method = "google"
-
- while True:
- try:
- #if "google" not in method:
- # file_path = AudioRecorder().listen()
- # with open(file_path, "rb") as f:
- # transcript = await gpt.transcript(file=f, language="en")
- #else:
- # transcript = CustomTranscriptor(language="en-US").transcript()
- # pass
- #if transcript:
- # print(f"User: {transcript}")
- # #translate = CustomTranslator(source='ru', target='en').translate(transcript)
- # #print(translate)
- # response = await ask_chat(transcript)
- print("John Connor:" "Hello, my name is John Connor!")
- response = await ask_chat("Hello, my name is John Connor!")
- except KeyboardInterrupt:
- break
-
- asyncio.run(main())
-
diff --git a/examples/test_generator/generator_test.py b/examples/test_generator/generator_test.py
index 8693fa3..ef44cf1 100644
--- a/examples/test_generator/generator_test.py
+++ b/examples/test_generator/generator_test.py
@@ -11,21 +11,22 @@
This file contains testing procedures for ChatGPT experiments
"""
-import asyncio
import json
import logging
+import asyncio
+from openai_python_api import ChatGPT
+
from examples.creds import oai_token, oai_organization
from examples.test_generator.gpt_functions import gpt_functions, gpt_functions_dict
from examples.test_generator.pom_case_generator import PomTestCaseGenerator
-from openai_api.src.openai_api import ChatGPT
-from openai_api.src.openai_api.logger_config import setup_logger
+from utils.logger_config import setup_logger
generator = PomTestCaseGenerator(url="https://www.saucedemo.com/")
# generator = PomTestCaseGenerator(url='https://automationintesting.com/selenium/testpage/')
-system_instructions = """
+SYSTEM_INSTRUCTIONS = """
You're bot responsible for QA automation testing. You tech stack is selenium + pytest. I will provide you url for testing.
1) You may obtain page code by calling "get_page_code" function. It will return you:
@@ -62,12 +63,12 @@ def calculate_button(self):\\n
def setup_gpt():
"""Setup GPT bot with appropriate functions and settings"""
gpt = ChatGPT(auth_token=oai_token, organization=oai_organization, model="gpt-4-0613")
- gpt.logger = setup_logger("gpt", "gpt.log", logging.INFO)
+ gpt.logger = setup_logger("gpt", "gpt.log", logging.INFO) # supress DEBUG output of the ChatGPT
gpt.system_settings = ""
gpt.function_dict = gpt_functions_dict
gpt.function_call = "auto"
gpt.functions = gpt_functions
- gpt.system_settings = system_instructions
+ gpt.system_settings = SYSTEM_INSTRUCTIONS
return gpt
diff --git a/leonardo_api b/leonardo_api
deleted file mode 160000
index 44c3744..0000000
--- a/leonardo_api
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 44c3744c86489134b4dea30706c212e7d3790538
diff --git a/openai_api b/openai_api
deleted file mode 160000
index 5804e33..0000000
--- a/openai_api
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 5804e33fb22f449ef6b2feb1a6b2f113a359e2d9
diff --git a/requirements.txt b/requirements.txt
index 724f626..76d6e0b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -19,10 +19,13 @@ pillow==10.1.0
readability==0.3.1
# Testing
webdriver_manager==4.0.1
-selenium==4.14.0
+selenium==4.15.1
pytest==7.4.3
pytest-json-report==1.5.0
pytest-xdist==3.3.1
# Third-party-test
cohere==4.32
llamaapi==0.1.36
+# My AI APIs
+leonardo-api==0.0.7
+openai-python-api==0.0.5
diff --git a/utils/page_retriever.py b/utils/page_retriever.py
index 850dcf3..94766bf 100644
--- a/utils/page_retriever.py
+++ b/utils/page_retriever.py
@@ -74,7 +74,6 @@ def get_body_without_scripts(self, url=None):
Get the body content of the page without tags.
:param url: (str) URL of the page.
-
:return: (str) Body content of the page without tags.
"""
if url:
@@ -86,7 +85,6 @@ def get_page_content(self, url):
Get the page content from the url.
:param url: (str) URL of the page.
-
:return: (str) HTML content of the page.
"""
self.driver.get(url)
@@ -114,7 +112,6 @@ def extract_body_content(html_content):
Extract the body content from the html_content.
:param html_content: (str) HTML content of the page.
-
:return: (str) Body content of the page.
"""
soup = BeautifulSoup(html_content, "html.parser")
@@ -128,7 +125,6 @@ def remove_script_tags(input_content):
Remove all tags from the input_content.
:param input_content: (str) HTML content of the page.
-
:return: (str) Body content of the page without tags.
"""
pattern_1 = re.compile(r".*?", re.DOTALL)