This repository has been archived by the owner on Jun 21, 2022. It is now read-only.
forked from broken-shotgun/Clover-Edition
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.ini
109 lines (87 loc) · 4.32 KB
/
config.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
[Settings]
# Temperature controls how random the AI's output is.
# Close to 0 means it always produces the most likely word.
# Usually short and boring responses, easily getting stuck in loops.
# 1 makes it as "statistically unpredictable" as real text.
# The AI isn't perfect at predicting humans, so results in very random behavior
# Lots of folk wisdom is being spread about the best choice.
# Claims that somewhere between 0.1-0.3 or even lower is optimal for coomers.
# Ok coomers.
temp = 0.9
# Repetitiveness Penalty
# Controls how repetitive the AIs output is allowed to be.
# <1 encourages repeats (no one wants this).
# 1 is no penalty/off
# > 1 penalizes repeats
# e.g. 1.2 is a 20% penalty
# Common value is 1.2 as it's the default from the CTRL paper who introduced this https://arxiv.org/abs/1909.05858
rep-pen = 1.2
# Repetitiveness Penalty Range
# Controls how far back tokens are penalized
rep-pen-range = 512
# Repetitiveness Penalty Slope
# Controls the slope of the penalty curve
rep-pen-slope = 3.33
# The number of words the AI has to choose from.
# It always chooses the "top k" most likely next words before randomly picking one according to temperature.
# Low values reduce the randomness of the AI similar to temp.
# Wont change generation speed. 0 is off
# Many projects turn this off and use top-p. Original AI Dungeon used 40.
top-keks = 0
# The number of words the AI has to choose from.
# top-p also called nucleus filtering, keep the top tokens with cumulative probability >= top_p ( see https://arxiv.org/pdf/1904.09751.pdf)
# similar to top k but probobly better. Can be used together, or you can use this instead.
# 0.9 is used as a default in a wide range of projects and papers
# Low values reduce the randomness of the AI similar to temp.
# Wont change generation speed
top-p = 0.9
# How long should the longest suggested actions be? higher is slower.
# More technically, this is the number of generated Byte Pair Encoding tokens
# (which are usually whole words) the AI generates for each story response.
generate-num = 40
# Dings the console bell when the AI responds
# Check your terminal emulator's support for console bells if this doesn't work, it should typically buzz the PC speaker
# Betcha didn't know ASCII supported sound
console-bell = off
# Maximum width of lines
# Set to 0 to disable
# Text wrapping has been much requested since I disabled it from vanilla.
# In principle this should be a function of your terminal emulator and not an issue
# Not sure of a good default but 80 was considered an ideal standard number of columns in old PCs.
text-wrap-width = 120
# On means you force use of the cpu even when you have a graphics card. off means you try to use the gpu if you have one
force-cpu = off
# 30 will not spam you with console log message, <30 will spam devs
log-level = 30
# use a dice to decide actions success. E.g. rolling a 1 means "You failed to X"
action-d20 = off
# how many action suggestions to generate, higher is slower
# TODO: Change this back to 4 once gpt2_generator.py is refactored
action-sugg = 0
# How weird (and potentially blank and loopy) should the suggested actions be.
# 0.15 is v conservative,
# 0.4 is conservative,
# 1.0 is weird (default)
# 1.5 is glitchy
action-temp = 0.65
# Experimental setting, ignore it for now
top-p-first = on
# Leave "off" unless in Google Colab
colab-mode = off
# Try to enable Python Prompt Toolkit. If problems are detected, it's disabled regardless of the setting
prompt-toolkit = on
# If true, saves after every action, and prompts the user when starting a story what so save it as
autosave = on
# Color scheme that is used if Python Prompt Toolkit is available. A classic-type color scheme can still be used here.
color-scheme = interface/colors-full.ini
# Backup color scheme in case Python Prompt Toolkit isn't available
backup-color-scheme = interface/colors-classic.ini
# Use experimental gpt2 (may be slightly faster, but buggy)
gpt2-experimental = off
# Max number of tokens for GPT-2 Models to use, more = more VRAM but more coherent story
# Do not set it higher than 1024
history-gpt-2 = 1024
# Max number of tokens for GPT-Neo Models to use, more = more VRAM but more coherent story
# Some people claim setting this a little lower (~2000) is more stable for 8GB VRAM GPUs
# Do not set it higher than 2048
history-gpt-neo = 2048