forked from LOVECHEN/raycast-unblock
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.example.toml
109 lines (96 loc) · 3.39 KB
/
config.example.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
[General]
# If you deploy this service on a remote server,
# it is recommended to set mode to "remote", maybe one day this program will be used
# and you **should** set host to "0.0.0.0", or you can't access the service from the outside
mode = "local"
port = 3000
host = "0.0.0.0"
# If there are some problems, you can set debug & logger to true
debug = false
# Fastify Logger
logger = false
[AI]
default = "copilot"
# If the parameter is not set in the specific AI service,
# this value will be used
# For example:
# If I don't set the temperature parameter in AI.OpenAI, this value will be used
# But if I set the temperature parameter in AI.Copilot, the temperature parameter in AI.Copilot will be used
temperature = 0.5
max_tokens = 100
[AI.OpenAI]
# If the default model is not set,
# or...
# if the default model is set,
# but the specific AI service's model is not set,
# the default model written in the code will be used
# default = "gpt-3.5-turbo-16k.legacy"
# You can edit the base_url if you want to use the custom OpenAI server
# base_url = "https://api.openai.com/v1"
api_key = "YOUR_API_KEY"
# if you'd like to use azure openai
# is_azure = true
# base_url = "https://<resource_name>.openai.azure.com"
# azure_deployment_name = "YOUR_AZURE_DEPLOYMENT_ID" # if not provided, use req.body.model
# If the parameter is set in the specific AI service
# this value will be used, and it has the highest priority
temperature = 0.5
max_tokens = 100
# Custom OpenAI Model
# You can add your own OpenAI model just like the following:
# # [NOTICE] You shouldn't use the dot in the model name. It will be parsed as a section
# [AI.OpenAI.Models.gpt-3_5-turbo-16k-legacy]
# id = "gpt-3.5-turbo-16k.legacy" # if it's not provided, it will be generated from the Object key. For example, here it will be "gpt-3.5-turbo-16k-legacy"
# model = "gpt-3.5-turbo-16k.legacy" # if it's not provided, it will be generated from the Object key.
# name = "Legacy GPT-3.5 Turbo" # if it's not provided, it will be generated from the Object key.
[AI.Gemini]
api_key = ""
temperature = 0.5
max_tokens = 100
[AI.Copilot]
# You can choose the model from the following: gpt-3.5-turbo, gpt-4
default = "gpt-4"
api_key = ""
temperature = 0.5
max_tokens = 100
[Translate]
# You can choose the default translation service from the following:
# shortcut, deeplx, ai, libretranslate
# Default: deeplx
default = "deeplx"
# Maybe one day there will be a [Translate.Shortcuts] configuration here...
# [Translate.Shortcuts]
[Translate.DeepLX]
# proxy_endpoint = ""
# access_token = ""
[Translate.AI]
# If the default model is not set,
# or...
# if the default model is set,
# but the specific AI service's model is not set,
# the default model written in the code will be used
# Default: openai
default = "openai"
# The model used by the AI service
# (only effective for openai)
# Default: gpt-3.5-turbo
model = "gpt-3.5-turbo"
[Translate.LibreTranslate]
base_url = "https://libretranslate.com"
# You can choose the type from the following:
# reserve, api
# Default: reserve
type = "reserve"
# If you choose api, you should set the api_key
api_key = ""
# The following is for the legacy configuration
# They will be removed in the future
# They are for the legacy configuration
# YOU SHOULD NOT USE THEM
# THEY WILL CAUSE WARNING IN LOGS
# [Legacy]
# ai_type = "OpenAI"
# ai_api_key = ""
# openai_base_url = "https://api.openai.com"
# ai_max_tokens = 100
# ai_temperature = 0.5