Skip to content

Commit

Permalink
chore: updated config template
Browse files Browse the repository at this point in the history
  • Loading branch information
tazlin committed Oct 4, 2023
1 parent 2e94c53 commit caeb854
Showing 1 changed file with 28 additions and 14 deletions.
42 changes: 28 additions & 14 deletions bridgeData_template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,11 @@ max_threads: 1
# We will keep this many requests in the queue so we can start working as soon as a thread is available
# Recommended to keep no higher than 1
queue_size: 0

# Enable this on cards with 12gb or more VRAM to increase the rate you complete jobs
# You can enable this on cards with less VRAM if you do not load SD2.0 or SDXL models, and keep your max_power low (<32)
safety_on_gpu: false

# If set to True, this worker will not only pick up jobs where the user has the required kudos upfront.
# Effectively this will exclude all anonymous accounts, and registered accounts who haven't contributed.
# Users in priority_usernames and trusted users will bypass this restriction
Expand All @@ -42,6 +47,7 @@ censor_nsfw: false
blacklist: []
# A list of words for which you always want to allow the NSFW censor filter, even when this worker is in NSFW mode
censorlist: []

# If set to False, this worker will no longer pick img2img jobs
allow_img2img: true
# If set to True, this worker will can pick inpainting jobs
Expand All @@ -59,43 +65,50 @@ allow_controlnet: false # needs at least 12G VRAM
# Your worker will download the top 10Gb of non-character LoRas
# and then will ad-hoc download any LoRa requested which you do not have, and cache that for a number a days
allow_lora: false

# Use this setting to control how much extra space LoRas can take after you downloaded the Top
# If a new Lora would exceed this space, an old lora you've downloaded previously will be deleted
# Note THIS IS ON TOP OF THE CURATED LORAS, so plan around +5G more than this
max_lora_cache_size: 10 # In gigabytes. Min is 10.

# Set to False to prevent this worker from reading the Horde model queue and loading models which are under load
dynamic_models: false
dynamic_models: false # Currently unused in reGen
# Adjust how many models to load into memory. In future this will likely be an argument for memory size or may disappear, but for right now, I'm lazy
number_of_dynamic_models: 0
number_of_dynamic_models: 0 # Currently unused in reGen
# The maximum amount of models to download dynamically for this worker. Increase this amount of you have plenty of space. Keep it low if you do not
# When the amount of models downloaded reaches this amount, the dynamic list will only use dynamic models already downloaded
# Therefore make sure you put some generalist and popular models in your models_to_load list if this number is small!
max_models_to_download: 10
max_models_to_download: 10 # Currently unused in reGen

# The frequency (in seconds) to output worker summary stats, such as kudos per hour.
# Set to zero to disable stats output completely.
stats_output_frequency: 30

# The location in which stable diffusion ckpt models are stored
cache_home: "./"
# Always download models when required without prompting
always_download: true
# The location of the temp directory, also used for the model cache
temp_dir: "./tmp"
temp_dir: "./tmp" # Currently unused in reGen


# Always download models when required without prompting
always_download: true # Currently unused in reGen
# Disable the terminal GUI, which displays information about the worker and the horde.
disable_terminal_ui: false
disable_terminal_ui: false # Currently unused in reGen

# VRAM to leave unused, as a percentage or in MB. VRAM the worker can use will be used to load and cache models.
# Note this NOT the amount of VRAM to use, it's the amount to KEEP FREE. So if something else starts using
# VRAM the worker will attempt to release it to allow the other software to use it.
# Don't set this too high, or you will run out of vram when it can't be released fast enough.
vram_to_leave_free: "80%"
vram_to_leave_free: "80%" # Currently unused in reGen
# RAM to leave unused, as a percentage or in MB. RAM the worker can use will be used to cache models. Same
# notes as for VRAM.
# Don't set this too high or your OS will likely start using lots of swap space and everything will slow down.
ram_to_leave_free: "80%"
ram_to_leave_free: "80%" # Currently unused in reGen
# Disable the disk cache. By default if RAM and VRAM are filled (up to the limits above) then models will
# spill over in to a disk cache. If you don't want this to happen you can disable it here. Note that if you
# disable disk cache and specify more models to load than will fit in memory your worker will endlessly cycle
# loading and unloading models.
disable_disk_cache: false
disable_disk_cache: false # Currently unused in reGen


# The models to use. You can select a different main model, or select more than one.
Expand All @@ -110,9 +123,10 @@ disable_disk_cache: false
# Instead of a model name you may use of any of the following magic constants:
# "ALL MODELS" - means load all possible models. Expect this to take over 1TB of space!
# "TOP n" - load the top "N" most popular models, use for example, "top 5" or "top 3", etc.
# "ALL <style> MODELS" - For example, "all anime models", styles are: generalist, artistic, realistic, anime, furry, other
# "ALL SFW MODELS" - All models marked as being SFW
# "ALL NSFW MODELS" - All models marked as being NSFW
# "BOTTOM n" - load the top "N" most popular models, use for example, "top 5" or "top 3", etc.
# (not currently supported) "ALL <style> MODELS" - For example, "all anime models", styles are: generalist, artistic, realistic, anime, furry, other
# (not currently supported) "ALL SFW MODELS" - All models marked as being SFW
# (not currently supported) "ALL NSFW MODELS" - All models marked as being NSFW
models_to_load:
- "top 2"
#- "ALL MODELS"
Expand Down Expand Up @@ -146,7 +160,7 @@ models_to_skip:
# If you are getting messages about jobs taking too long, you can change this to true if you no longer want to see them
# Please note, that if you *are* getting these messages, you are serving jobs substantially slower than is ideal,
# and you very likely would get more kudos/hr if you just lower your max_power.
suppress_speed_warnings: false
suppress_speed_warnings: false # Currently unused in reGen

## Scribe (LLM Worker)

Expand Down

0 comments on commit caeb854

Please sign in to comment.