forked from WaitThatShouldntWork/Infer
-
Notifications
You must be signed in to change notification settings - Fork 0
/
.env.example
54 lines (44 loc) · 1.59 KB
/
.env.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
# neo4j authentication
NEO4J_USERNAME=neo4j
NEO4J_PASSWORD=change-me!
# neo4j graph database URI used by the backend to connect to neo4j
# use "bolt://localhost" when the backend and neo4j are both running locally outside Docker
# use "bolt://host.docker.internal" when the backend is running within Docker but neo4j is running locally (outside Docker)
# URI will be set to the neo4j container's host if using Docker Compose
NEO4J_URI=bolt://localhost:7687
# port configuration is optional
# used with Docker Compose to expose neo4j on non-default ports
NEO4J_HTTP_PORT=7474
NEO4J_BOLT_PORT=7687
# backend LLM properties
MISTRAL_KEY=my-api-key
# OpenAI LLM properties
OPENAI_KEY=my-openai-api-key
# frontend host - used to configure backend CORS
FRONTEND_URL=http://localhost:8650
# what backend URL should be used by frontend API requests
BACKEND_URL=http://localhost:8250
# websockets url to conect to backend websocket endpoint
WS_URL=ws://localhost:8250/ws
# Azure
AZURE_STORAGE_CONNECTION_STRING="my-connection-string"
AZURE_STORAGE_CONTAINER_NAME=my-container-name
AZURE_INITIAL_DATA_FILENAME=test-data.json
# llm config
ANSWER_AGENT_LLM="openai"
INTENT_AGENT_llm="openai"
VALIDATOR_AGENT_LLM="mistral"
DATASTORE_AGENT_LLM="openai"
MATHS_AGENT_LLM="openai"
WEB_AGENT_LLM="openai"
CHART_GENERATOR_LLM="openai"
ROUTER_LLM="openai"
# llm model
ANSWER_AGENT_MODEL="gpt-4o mini"
INTENT_AGENT_MODEL="gpt-4o mini"
VALIDATOR_AGENT_MODEL="mistral-large-latest"
DATASTORE_AGENT_MODEL="gpt-4o mini"
MATHS_AGENT_MODEL="gpt-4o mini"
WEB_AGENT_MODEL="gpt-4o mini"
CHART_GENERATOR_MODEL="gpt-4o mini"
ROUTER_MODEL="gpt-4o mini"