-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
103 lines (81 loc) · 2.56 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
"""
Menu :
- manage chat_list/params/credentials/prompt
- chat
- open/search logs
Comparatif des offres :
- Google : 1er modele a priori gratuit pour usage raisonnable
- OpenAI : min 5€
- Mistral : 1 mois gratuit, puis abonnement
TODO :
- logger
"""
import os
import subprocess as sp
import yaml
import streamlit as st
import ollama
from utils.ollama_utils import (
preprocess_stream,
)
from config.config import ROOT_DIR
from utils.logger import custom_logger
if "logger" not in st.session_state:
st.session_state["logger"] = custom_logger()
logger = st.session_state["logger"]
if "config.yaml" not in os.listdir(
os.path.join(ROOT_DIR, "config")
):
sp.run(args=["ollama", "pull", "llama3:latest"])
config={"initialized":True}
with open(os.path.join(ROOT_DIR, "config", "config.yaml"),"w") as f:
yaml.dump(config, f)
print(f"model_list :")
try:
print([model["model"] for model in ollama.list()["models"]])
model_list = sorted(
model["model"] for model in ollama.list()["models"]
)
except:
model_list = ["llama3", ]
with st.sidebar:
st.session_state["ai_model"] = st.selectbox(
label="Model to use",
options=model_list, index=0,
placeholder="Choose an option"
)
st.text(body="get a new model :")
st.page_link(
label="available models",
page="https://ollama.com/library"
)
if new_model := st.text_input(
label="download new model",
placeholder="indiquer ici le modele à télécharger"
):
sp.Popen(args=["ollama", "pull", new_model])
logger.info(f"sending 'olama pull {new_model}' command")
st.title("ChatGPT-like clone")
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("Posez votre question ici..."):
logger.info(msg=f'role: user, content: {prompt}')
st.session_state.messages.append(
{"role": "user", "content": prompt}
)
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
stream = ollama.chat(
model=st.session_state["ai_model"],
messages=[
{'role': 'user', 'content': prompt}
],
stream=True,
)
response = st.write_stream(preprocess_stream(stream))
st.session_state.messages.append({"role": "assistant", "content": response})
logger.info(msg=f'role: assistant, content: {response}')