tabbyAPI-ollama/config_sample.yml
kingbri 72e19dbc12 Config: Change default dirs in sample
Models and draft models default to the models directory while
loras default to the loras directory.

Signed-off-by: kingbri <bdashore3@proton.me>
2023-12-21 22:35:03 -05:00

111 lines
No EOL
3.8 KiB
YAML

# Sample YAML file for configuration.
# Comment out values as needed. Every value has a default within the application.
# Unless specified in the comments, DO NOT put these options in quotes!
# You can use https://www.yamllint.com/ if you want to check your YAML formatting.
# Options for networking
network:
# The IP to host on (default: 127.0.0.1).
# Use 0.0.0.0 to expose on all network adapters
host: 127.0.0.1
# The port to host on (default: 5000)
port: 5000
# Options for logging
logging:
# Enable prompt logging (default: False)
prompt: False
# Enable generation parameter logging (default: False)
generation_params: False
# Options for model overrides and loading
model:
# Overrides the directory to look for models (default: models)
# Windows users, DO NOT put this path in quotes! This directory will be invalid otherwise.
model_dir: models
# An initial model to load. Make sure the model is located in the model directory!
# A model can be loaded later via the API.
model_name: A model name
# Sends dummy model names when the models endpoint is queried
# Enable this if the program is looking for a specific OAI model
use_dummy_models: False
# The below parameters apply only if model_name is set
# Max sequence length (default: None)
# Fetched from the model's base sequence length in config.json by default
max_seq_len:
# Overrides base model context length (default: None)
# WARNING: Don't set this unless you know what you're doing!
# Only use this if the model's base sequence length in config.json is incorrect (ex. Mistral/Mixtral models)
override_base_seq_len:
# Automatically allocate resources to GPUs (default: True)
gpu_split_auto: True
# An integer array of GBs of vram to split between GPUs (default: [])
gpu_split: [20.6, 24]
# Rope scale (default: 1.0)
# Same thing as compress_pos_emb
# Only use if your model was trained on long context with rope (check config.json)
rope_scale: 1.0
# Rope alpha (default: 1.0)
# Same thing as alpha_value
# Leave blank to automatically calculate alpha
rope_alpha: 1.0
# Disable Flash-attention 2. Set to True for GPUs lower than Nvidia's 3000 series. (default: False)
no_flash_attention: False
# Enable 8 bit cache mode for VRAM savings (slight performance hit). Possible values FP16, FP8. (default: FP16)
cache_mode: FP16
# Set the prompt template for this model. If empty, chat completions will be disabled. (default: None)
# NOTE: Only works with chat completion message lists!
prompt_template:
# Number of experts to use per token. Loads from the model's config.json if not specified (default: None)
# WARNING: Don't set this unless you know what you're doing!
# NOTE: For MoE models (ex. Mixtral) only!
num_experts_per_token:
# Options for draft models (speculative decoding). This will use more VRAM!
draft:
# Overrides the directory to look for draft (default: models)
draft_model_dir: models
# An initial draft model to load. Make sure this model is located in the model directory!
# A draft model can be loaded later via the API.
draft_model_name: A model name
# Rope scale for draft models (default: 1.0)
# Same thing as compress_pos_emb
# Only use if your draft model was trained on long context with rope (check config.json)
draft_rope_scale: 1.0
# Rope alpha for draft model (default: 1.0)
# Same thing as alpha_value
# Leave blank to automatically calculate alpha value
draft_rope_alpha: 1.0
# Options for loras
lora:
# Overrides the directory to look for loras (default: loras)
lora_dir: loras
# List of loras to load and associated scaling factors (default: 1.0). Comment out unused entries or add more rows as needed.
loras:
- name: lora1
scaling: 1.0
- name: lora2
scaling: 0.9
- name: lora3
scaling: 0.5