From 7c6a05374786ee44f216feab337fc145723f5a08 Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Sun, 27 Apr 2025 22:27:26 -0400 Subject: [PATCH 01/45] Model: Add option to select backend Changing the backend switches the container that's used. Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- common/config_models.py | 7 +++++++ common/model.py | 28 +++++++++++++++++++++++----- config_sample.yml | 4 ++++ endpoints/core/types/model.py | 5 ++++- 4 files changed, 38 insertions(+), 6 deletions(-) diff --git a/common/config_models.py b/common/config_models.py index a31bd3e..bb09e57 100644 --- a/common/config_models.py +++ b/common/config_models.py @@ -163,6 +163,13 @@ class ModelConfig(BaseConfigModel): "Example: ['max_seq_len', 'cache_mode']." ), ) + backend: Optional[str] = Field( + "exllamav2", + description=( + "Backend to use for this model (default: exllamav2)\n" + "Options: exllamav2, exllamav3", + ), + ) max_seq_len: Optional[int] = Field( None, description=( diff --git a/common/model.py b/common/model.py index cc26b43..96d45f6 100644 --- a/common/model.py +++ b/common/model.py @@ -23,10 +23,14 @@ from common.utils import unwrap container: Optional[BaseModelContainer] = None embeddings_container = None -# FIXME: Possibly use this solely when creating the model + +_BACKEND_REGISTRY = {} + if dependencies.exllamav2: from backends.exllamav2.model import ExllamaV2Container + _BACKEND_REGISTRY["exllamav2"] = ExllamaV2Container + if dependencies.extras: from backends.infinity.model import InfinityContainer @@ -113,10 +117,24 @@ async def load_model_gen(model_path: pathlib.Path, **kwargs): kwargs = {**config.model_defaults, **kwargs} kwargs = await apply_inline_overrides(model_path, **kwargs) - # Create a new container - new_container = await ExllamaV2Container.create( - model_path.resolve(), False, **kwargs - ) + # Create a new container and check if the right dependencies are installed + backend_name = unwrap(kwargs.get("backend"), "exllamav2").lower() + container_class = _BACKEND_REGISTRY.get(backend_name) + + if not container_class: + available_backends = list(_BACKEND_REGISTRY.keys()) + if backend_name in available_backends: + raise ValueError( + f"Backend '{backend_name}' selected, but required dependencies " + "are not installed." + ) + else: + raise ValueError( + f"Invalid backend '{backend_name}'. " + "Available backends: {available_backends}" + ) + + new_container = await container_class.create(model_path.resolve(), False, **kwargs) # Add possible types of models that can be loaded model_type = [ModelType.MODEL] diff --git a/config_sample.yml b/config_sample.yml index b6f362d..a743c2c 100644 --- a/config_sample.yml +++ b/config_sample.yml @@ -74,6 +74,10 @@ model: # Example: ['max_seq_len', 'cache_mode']. use_as_default: [] + # Backend to use for the model (default: exllamav2) + # Options: exllamav2, exllamav3 + backend: exllamav2 + # Max sequence length (default: Empty). # Fetched from the model's base sequence length in config.json by default. max_seq_len: diff --git a/endpoints/core/types/model.py b/endpoints/core/types/model.py index 02213f9..6855108 100644 --- a/endpoints/core/types/model.py +++ b/endpoints/core/types/model.py @@ -81,7 +81,10 @@ class ModelLoadRequest(BaseModel): ) # Config arguments - + backend: Optional[str] = Field( + description="Backend to use", + default="exllamav2", + ) max_seq_len: Optional[int] = Field( description="Leave this blank to use the model's base sequence length", default=None, From 0c1d794390be3a15b582374a7404d6ca599c6615 Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Mon, 28 Apr 2025 23:54:55 -0400 Subject: [PATCH 02/45] Model: Add exl3 and associated load functions Initial exl3 compat and loading functionality. Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- backends/base_model_container.py | 108 ++++++------ backends/exllamav2/model.py | 22 +-- backends/exllamav3/model.py | 275 +++++++++++++++++++++++++++++++ common/model.py | 16 +- common/optional_dependencies.py | 3 +- 5 files changed, 357 insertions(+), 67 deletions(-) create mode 100644 backends/exllamav3/model.py diff --git a/backends/base_model_container.py b/backends/base_model_container.py index 5c79867..10eae0d 100644 --- a/backends/base_model_container.py +++ b/backends/base_model_container.py @@ -25,6 +25,10 @@ class BaseModelContainer(abc.ABC): prompt_template: Optional[PromptTemplate] = None generation_config: Optional[GenerationConfig] = None + # Optional features + use_draft_model: bool = False + use_vision: bool = False + # Load synchronization # The bool is a master switch for accepting requests # The lock keeps load tasks sequential @@ -65,7 +69,7 @@ class BaseModelContainer(abc.ABC): # NOTE: Might be an optional method @abc.abstractmethod - async def load_gen(self, progress_callback=None, **kwargs) -> AsyncIterator[Any]: + async def load_gen(self, progress_callback=None, **kwargs): """ Loads the model into memory, yielding progress updates. @@ -134,57 +138,6 @@ class BaseModelContainer(abc.ABC): pass - @abc.abstractmethod - async def generate( - self, - request_id: str, - prompt: str, - params: BaseSamplerRequest, - abort_event: Optional[asyncio.Event] = None, - mm_embeddings: Optional[MultimodalEmbeddingWrapper] = None, - ) -> Dict[str, Any]: - """ - Generates a complete response for a given prompt and parameters. - - Args: - request_id: Unique identifier for the generation request. - prompt: The input prompt string. - params: Sampling and generation parameters. - abort_event: An asyncio Event to signal cancellation. - mm_embeddings: Optional multimodal embeddings. - - Returns: - A dictionary containing the generation info - """ - - pass - - @abc.abstractmethod - async def stream_generate( - self, - request_id: str, - prompt: str, - params: BaseSamplerRequest, - abort_event: Optional[asyncio.Event] = None, - mm_embeddings: Optional[MultimodalEmbeddingWrapper] = None, - ) -> AsyncIterator[Dict[str, Any]]: - """ - Generates a response iteratively (streaming) for a given prompt. - - Args: - request_id: Unique identifier for the generation request. - prompt: The input prompt string. - params: Sampling and generation parameters. - abort_event: An asyncio Event to signal cancellation. - mm_embeddings: Optional multimodal embeddings. - - Yields: - Generation chunks - """ - - if False: - yield - @abc.abstractmethod def model_info(self) -> ModelCard: """ @@ -239,3 +192,54 @@ class BaseModelContainer(abc.ABC): """ return [] + + @abc.abstractmethod + async def generate( + self, + request_id: str, + prompt: str, + params: BaseSamplerRequest, + abort_event: Optional[asyncio.Event] = None, + mm_embeddings: Optional[MultimodalEmbeddingWrapper] = None, + ) -> Dict[str, Any]: + """ + Generates a complete response for a given prompt and parameters. + + Args: + request_id: Unique identifier for the generation request. + prompt: The input prompt string. + params: Sampling and generation parameters. + abort_event: An asyncio Event to signal cancellation. + mm_embeddings: Optional multimodal embeddings. + + Returns: + A dictionary containing the generation info + """ + + pass + + @abc.abstractmethod + async def stream_generate( + self, + request_id: str, + prompt: str, + params: BaseSamplerRequest, + abort_event: Optional[asyncio.Event] = None, + mm_embeddings: Optional[MultimodalEmbeddingWrapper] = None, + ) -> AsyncIterator[Dict[str, Any]]: + """ + Generates a response iteratively (streaming) for a given prompt. + + Args: + request_id: Unique identifier for the generation request. + prompt: The input prompt string. + params: Sampling and generation parameters. + abort_event: An asyncio Event to signal cancellation. + mm_embeddings: Optional multimodal embeddings. + + Yields: + Generation chunks + """ + + if False: + yield diff --git a/backends/exllamav2/model.py b/backends/exllamav2/model.py index b821d1a..fcf4f3c 100644 --- a/backends/exllamav2/model.py +++ b/backends/exllamav2/model.py @@ -64,16 +64,19 @@ class ExllamaV2Container(BaseModelContainer): # Exl2 vars config: Optional[ExLlamaV2Config] = None - draft_config: Optional[ExLlamaV2Config] = None model: Optional[ExLlamaV2] = None - draft_model: Optional[ExLlamaV2] = None cache: Optional[ExLlamaV2Cache] = None - draft_cache: Optional[ExLlamaV2Cache] = None tokenizer: Optional[ExLlamaV2Tokenizer] = None generator: Optional[ExLlamaV2DynamicGeneratorAsync] = None prompt_template: Optional[PromptTemplate] = None paged: bool = True + # Draft model vars + use_draft_model: bool = False + draft_config: Optional[ExLlamaV2Config] = None + draft_model: Optional[ExLlamaV2] = None + draft_cache: Optional[ExLlamaV2Cache] = None + # Internal config vars cache_size: int = None cache_mode: str = "FP16" @@ -100,7 +103,7 @@ class ExllamaV2Container(BaseModelContainer): load_condition: asyncio.Condition = asyncio.Condition() @classmethod - async def create(cls, model_directory: pathlib.Path, quiet=False, **kwargs): + async def create(cls, model_directory: pathlib.Path, **kwargs): """ Primary asynchronous initializer for model container. @@ -110,8 +113,6 @@ class ExllamaV2Container(BaseModelContainer): # Create a new instance as a "fake self" self = cls() - self.quiet = quiet - # Initialize config self.config = ExLlamaV2Config() self.model_dir = model_directory @@ -122,6 +123,7 @@ class ExllamaV2Container(BaseModelContainer): self.config.max_seq_len = 4096 self.config.prepare() + print(self.config.max_seq_len) # Check if the model arch is compatible with various exl2 features self.config.arch_compat_overrides() @@ -162,7 +164,7 @@ class ExllamaV2Container(BaseModelContainer): # Prepare the draft model config if necessary draft_args = unwrap(kwargs.get("draft_model"), {}) draft_model_name = draft_args.get("draft_model_name") - enable_draft = draft_args and draft_model_name + self.use_draft_model = draft_args and draft_model_name # Always disable draft if params are incorrectly configured if draft_args and draft_model_name is None: @@ -170,9 +172,9 @@ class ExllamaV2Container(BaseModelContainer): "Draft model is disabled because a model name " "wasn't provided. Please check your config.yml!" ) - enable_draft = False + self.use_draft_model = False - if enable_draft: + if self.use_draft_model: self.draft_config = ExLlamaV2Config() draft_model_path = pathlib.Path( unwrap(draft_args.get("draft_model_dir"), "models") @@ -365,7 +367,7 @@ class ExllamaV2Container(BaseModelContainer): self.config.max_attention_size = chunk_size**2 # Set user-configured draft model values - if enable_draft: + if self.use_draft_model: self.draft_config.max_seq_len = self.config.max_seq_len self.draft_config.scale_pos_emb = unwrap( diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py new file mode 100644 index 0000000..76360df --- /dev/null +++ b/backends/exllamav3/model.py @@ -0,0 +1,275 @@ +import asyncio +import gc +import pathlib +from loguru import logger +from typing import ( + Any, + AsyncIterator, + Dict, + List, + Optional, +) + +import torch + +from backends.base_model_container import BaseModelContainer +from common.concurrency import iterate_in_threadpool +from common.multimodal import MultimodalEmbeddingWrapper +from common.sampling import BaseSamplerRequest +from common.templating import PromptTemplate +from common.transformers_utils import GenerationConfig +from endpoints.core.types.model import ModelCard + +from exllamav3 import Config, Model, Cache, Tokenizer + + +class ExllamaV3Container(BaseModelContainer): + """Abstract base class for model containers.""" + + # Exposed model information + model_dir: pathlib.Path = pathlib.Path("models") + prompt_template: Optional[PromptTemplate] = None + generation_config: Optional[GenerationConfig] = None + + # Load synchronization + # The bool is a master switch for accepting requests + # The lock keeps load tasks sequential + # The condition notifies any waiting tasks + active_job_ids: Dict[str, Any] = {} + loaded: bool = False + load_lock: asyncio.Lock = asyncio.Lock() + load_condition: asyncio.Condition = asyncio.Condition() + + # Exl3 vars + model: Model + cache: Cache + tokenizer: Tokenizer + config: Config + + # Required methods + @classmethod + async def create(cls, model_directory: pathlib.Path, **kwargs): + """ + Asynchronously creates and initializes a model container instance. + + Args: + model_directory: Path to the model files. + **kwargs: Backend-specific configuration options. + + Returns: + An instance of the implementing class. + """ + + self = cls() + + logger.warning( + "ExllamaV3 is currently in an alpha state. " + "Please note that all config options may not work." + ) + + self.config = Config.from_directory(model_directory.resolve()) + self.model = Model.from_config(self.config) + self.tokenizer = Tokenizer.from_config(self.config) + + max_seq_len = kwargs.get("max_seq_len") + self.cache = Cache(self.model, max_num_tokens=max_seq_len) + + return self + + async def load(self, progress_callback=None, **kwargs): + """ + Loads the model into memory. + + Args: + progress_callback: Optional callback for progress updates. + **kwargs: Additional loading options. + """ + + async for _ in self.load_gen(progress_callback): + pass + + async def load_gen(self, progress_callback=None, **kwargs): + """ + Loads the model into memory, yielding progress updates. + + Args: + progress_callback: Optional callback for progress updates. + **kwargs: Additional loading options. + + Yields: + Progress updates + """ + + try: + await self.load_lock.acquire() + + # Wait for existing generation jobs to finish + await self.wait_for_jobs(kwargs.get("skip_wait")) + + generator = self.load_model_sync(progress_callback) + async for module, modules in iterate_in_threadpool(generator): + yield module, modules + + # Clean up any extra vram usage from torch and cuda + # (Helps reduce VRAM bottlenecking on Windows) + gc.collect() + torch.cuda.empty_cache() + + # Cleanup and update model load state + self.loaded = True + logger.info("Model successfully loaded.") + finally: + self.load_lock.release() + + async with self.load_condition: + self.load_condition.notify_all() + + # TODO: Add draft loading + @torch.inference_mode() + def load_model_sync(self, progress_callback=None): + for value in self.model.load_gen(callback=progress_callback): + if value: + yield value + + async def unload(self, loras_only: bool = False, **kwargs): + """ + Unloads the model and associated resources from memory. + + Args: + loras_only: If True, only unload LoRAs. + **kwargs: Additional unloading options (e.g., shutdown). + """ + + try: + await self.load_lock.acquire() + + # Wait for other jobs to finish + await self.wait_for_jobs(kwargs.get("skip_wait")) + + self.model.unload() + self.model = None + + self.config = None + self.cache = None + self.tokenizer = None + + gc.collect() + torch.cuda.empty_cache() + + logger.info("Model unloaded.") + finally: + self.load_lock.release() + + async with self.load_condition: + self.load_condition.notify_all() + + def encode_tokens(self, text: str, **kwargs) -> List[int]: + """ + Encodes a string of text into a list of token IDs. + + Args: + text: The input text string. + **kwargs: Backend-specific encoding options (e.g., add_bos_token). + + Returns: + A list of integer token IDs. + """ + + pass + + def decode_tokens(self, ids: List[int], **kwargs) -> str: + """ + Decodes a list of token IDs back into a string. + + Args: + ids: A list of integer token IDs. + **kwargs: Backend-specific decoding options (e.g., decode_special_tokens). + + Returns: + The decoded text string. + """ + + pass + + def get_special_tokens(self, **kwargs) -> Dict[str, Any]: + """ + Gets special tokens used by the model/tokenizer. + + Args: + **kwargs: Options like add_bos_token, ban_eos_token. + + Returns: + A dictionary mapping special token names (e.g., 'bos_token', 'eos_token') + to their string or ID representation. + """ + + pass + + def model_info(self) -> ModelCard: + """ + Returns a dictionary of the current model's configuration parameters. + + Returns: + Model parameters provided by the backend + """ + + pass + + async def wait_for_jobs(self, skip_wait: bool = False): + """ + Waits for any active generation jobs to complete. + + Args: + skip_wait: If True, cancel jobs immediately instead of waiting. + """ + + pass + + async def generate( + self, + request_id: str, + prompt: str, + params: BaseSamplerRequest, + abort_event: Optional[asyncio.Event] = None, + mm_embeddings: Optional[MultimodalEmbeddingWrapper] = None, + ) -> Dict[str, Any]: + """ + Generates a complete response for a given prompt and parameters. + + Args: + request_id: Unique identifier for the generation request. + prompt: The input prompt string. + params: Sampling and generation parameters. + abort_event: An asyncio Event to signal cancellation. + mm_embeddings: Optional multimodal embeddings. + + Returns: + A dictionary containing the generation info + """ + + pass + + async def stream_generate( + self, + request_id: str, + prompt: str, + params: BaseSamplerRequest, + abort_event: Optional[asyncio.Event] = None, + mm_embeddings: Optional[MultimodalEmbeddingWrapper] = None, + ) -> AsyncIterator[Dict[str, Any]]: + """ + Generates a response iteratively (streaming) for a given prompt. + + Args: + request_id: Unique identifier for the generation request. + prompt: The input prompt string. + params: Sampling and generation parameters. + abort_event: An asyncio Event to signal cancellation. + mm_embeddings: Optional multimodal embeddings. + + Yields: + Generation chunks + """ + + if False: + yield diff --git a/common/model.py b/common/model.py index 96d45f6..9de86f0 100644 --- a/common/model.py +++ b/common/model.py @@ -10,7 +10,7 @@ from enum import Enum from fastapi import HTTPException from loguru import logger from ruamel.yaml import YAML -from typing import Optional +from typing import Dict, Optional from backends.base_model_container import BaseModelContainer from common.logger import get_loading_progress_bar @@ -24,7 +24,7 @@ container: Optional[BaseModelContainer] = None embeddings_container = None -_BACKEND_REGISTRY = {} +_BACKEND_REGISTRY: Dict[str, BaseModelContainer] = {} if dependencies.exllamav2: from backends.exllamav2.model import ExllamaV2Container @@ -32,6 +32,12 @@ if dependencies.exllamav2: _BACKEND_REGISTRY["exllamav2"] = ExllamaV2Container +if dependencies.exllamav3: + from backends.exllamav3.model import ExllamaV3Container + + _BACKEND_REGISTRY["exllamav3"] = ExllamaV3Container + + if dependencies.extras: from backends.infinity.model import InfinityContainer @@ -134,7 +140,9 @@ async def load_model_gen(model_path: pathlib.Path, **kwargs): "Available backends: {available_backends}" ) - new_container = await container_class.create(model_path.resolve(), False, **kwargs) + new_container: BaseModelContainer = await container_class.create( + model_path.resolve(), **kwargs + ) # Add possible types of models that can be loaded model_type = [ModelType.MODEL] @@ -142,7 +150,7 @@ async def load_model_gen(model_path: pathlib.Path, **kwargs): if new_container.use_vision: model_type.insert(0, ModelType.VISION) - if new_container.draft_config: + if new_container.use_draft_model: model_type.insert(0, ModelType.DRAFT) load_status = new_container.load_gen(load_progress, **kwargs) diff --git a/common/optional_dependencies.py b/common/optional_dependencies.py index 06b1286..0c1e7ff 100644 --- a/common/optional_dependencies.py +++ b/common/optional_dependencies.py @@ -13,6 +13,7 @@ class DependenciesModel(BaseModel): torch: bool exllamav2: bool + exllamav3: bool flash_attn: bool infinity_emb: bool sentence_transformers: bool @@ -25,7 +26,7 @@ class DependenciesModel(BaseModel): @computed_field @property def inference(self) -> bool: - return self.torch and self.exllamav2 and self.flash_attn + return self.torch and (self.exllamav2 or self.exllamav3) and self.flash_attn def is_installed(package_name: str) -> bool: From b4ff2f23cff4a27ab3994728cc3e1794c77155b0 Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Fri, 2 May 2025 21:32:53 -0400 Subject: [PATCH 03/45] Exl3: Add token encode, decode, and special token fetch Base class methods Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- backends/exllamav3/model.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index 76360df..a9781ec 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -18,6 +18,7 @@ from common.multimodal import MultimodalEmbeddingWrapper from common.sampling import BaseSamplerRequest from common.templating import PromptTemplate from common.transformers_utils import GenerationConfig +from common.utils import unwrap from endpoints.core.types.model import ModelCard from exllamav3 import Config, Model, Cache, Tokenizer @@ -175,7 +176,11 @@ class ExllamaV3Container(BaseModelContainer): A list of integer token IDs. """ - pass + return self.tokenizer.encode( + text, + add_bos=unwrap(kwargs.get("add_bos_token"), True), + encode_special_tokens=unwrap(kwargs.get("encode_special_tokens"), True), + ).flatten().tolist() def decode_tokens(self, ids: List[int], **kwargs) -> str: """ @@ -189,9 +194,15 @@ class ExllamaV3Container(BaseModelContainer): The decoded text string. """ - pass + ids = torch.tensor([ids]) + return self.tokenizer.decode( + ids, + decode_special_tokens=unwrap(kwargs.get("decode_special_tokens"), True), + )[0] - def get_special_tokens(self, **kwargs) -> Dict[str, Any]: + def get_special_tokens( + self, add_bos_token: bool = True, ban_eos_token: bool = False + ): """ Gets special tokens used by the model/tokenizer. @@ -203,7 +214,12 @@ class ExllamaV3Container(BaseModelContainer): to their string or ID representation. """ - pass + return { + "bos_token": self.tokenizer.bos_token if add_bos_token else "", + "eos_token": self.tokenizer.eos_token if not ban_eos_token else "", + "pad_token": self.tokenizer.pad_token, + "unk_token": self.tokenizer.unk_token, + } def model_info(self) -> ModelCard: """ From daae9ec43d0ed1b489e03f012021702f0b010e5a Mon Sep 17 00:00:00 2001 From: randoentity <137087500+randoentity@users.noreply.github.com> Date: Tue, 29 Apr 2025 23:57:53 +0200 Subject: [PATCH 04/45] Exl3: Couldn't wait Just copied some stuff around and it ended up working for basic use. --- backends/exllamav2/model.py | 6 +- backends/exllamav3/model.py | 225 +++++++++++++++++++++++++++++++++++- 2 files changed, 221 insertions(+), 10 deletions(-) diff --git a/backends/exllamav2/model.py b/backends/exllamav2/model.py index fcf4f3c..09da9a2 100644 --- a/backends/exllamav2/model.py +++ b/backends/exllamav2/model.py @@ -533,8 +533,7 @@ class ExllamaV2Container(BaseModelContainer): # Load draft model if a config is present if self.draft_config: self.draft_model = ExLlamaV2(self.draft_config) - if not self.quiet: - logger.info("Loading draft model: " + self.draft_config.model_dir) + logger.info("Loading draft model: " + self.draft_config.model_dir) # Draft uses the autosplit loader, so create a cache that reflects this draft_cache_class = self.get_cache_class(self.draft_cache_mode) @@ -587,8 +586,7 @@ class ExllamaV2Container(BaseModelContainer): yield value self.model = ExLlamaV2(self.config) - if not self.quiet: - logger.info("Loading model: " + self.config.model_dir) + logger.info("Loading model: " + self.config.model_dir) # Get class of the model cache cache_class = self.get_cache_class(self.cache_mode) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index a9781ec..fc4f198 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -16,12 +16,12 @@ from backends.base_model_container import BaseModelContainer from common.concurrency import iterate_in_threadpool from common.multimodal import MultimodalEmbeddingWrapper from common.sampling import BaseSamplerRequest -from common.templating import PromptTemplate +from common.templating import PromptTemplate, find_prompt_template from common.transformers_utils import GenerationConfig from common.utils import unwrap from endpoints.core.types.model import ModelCard -from exllamav3 import Config, Model, Cache, Tokenizer +from exllamav3 import AsyncGenerator, AsyncJob, Config, Model, Cache, Tokenizer class ExllamaV3Container(BaseModelContainer): @@ -46,6 +46,8 @@ class ExllamaV3Container(BaseModelContainer): cache: Cache tokenizer: Tokenizer config: Config + gpu_split: List[float] = [] + max_seq_len: int = 2048 # Required methods @classmethod @@ -74,6 +76,16 @@ class ExllamaV3Container(BaseModelContainer): max_seq_len = kwargs.get("max_seq_len") self.cache = Cache(self.model, max_num_tokens=max_seq_len) + gpu_split = unwrap(kwargs.get("gpu_split"), []) + + # Set GPU split options + # Enable manual GPU split if provided + if gpu_split: + self.gpu_split = gpu_split + # Try to set prompt template + self.prompt_template = await find_prompt_template( + kwargs.get("prompt_template"), model_directory + ) return self @@ -128,7 +140,10 @@ class ExllamaV3Container(BaseModelContainer): # TODO: Add draft loading @torch.inference_mode() def load_model_sync(self, progress_callback=None): - for value in self.model.load_gen(callback=progress_callback): + for value in self.model.load_gen( + use_per_device=self.gpu_split, + callback=progress_callback + ): if value: yield value @@ -263,7 +278,58 @@ class ExllamaV3Container(BaseModelContainer): A dictionary containing the generation info """ - pass + generations = [] + async for generation in self.stream_generate( + request_id, + prompt, + params, + abort_event, + mm_embeddings, + ): + generations.append(generation) + + joined_generation = { + "text": "", + "prompt_tokens": 0, + "generation_tokens": 0, + "tool_calls": None, + "offset": [], + "token_probs": {}, + "logprobs": [], + } + + if generations: + # Get finish_reason first and then shift where -1 points to + if "finish_reason" in generations[-1]: + finish_reason_gen = generations.pop() + joined_generation["finish_reason"] = finish_reason_gen.get( + "finish_reason" + ) + joined_generation["stop_str"] = finish_reason_gen.get("stop_str") + else: + joined_generation["finish_reason"] = "stop" + + if len(generations) > 0: + for generation in generations: + joined_generation["text"] += unwrap(generation.get("text"), "") + joined_generation["offset"].append(unwrap(generation.get("offset"), -1)) + joined_generation["token_probs"].update( + unwrap(generation.get("token_probs"), {}) + ) + + # Include empty logprob dicts for index preservation + joined_generation["logprobs"].append( + unwrap(generation.get("logprobs"), {}) + ) + + joined_generation["prompt_tokens"] = unwrap( + generations[-1].get("prompt_tokens"), 0 + ) + joined_generation["generated_tokens"] = unwrap( + generations[-1].get("generated_tokens"), 0 + ) + + return joined_generation async def stream_generate( self, @@ -287,5 +353,152 @@ class ExllamaV3Container(BaseModelContainer): Generation chunks """ - if False: - yield + try: + # Wait for load lock to be freed before processing + # Mainly used for loras and other operations where the class is available + async with self.load_condition: + await self.load_condition.wait_for(lambda: not self.load_lock.locked()) + + # If the model is being unloaded, don't accept new requests + if not self.loaded: + raise RuntimeError( + "Model is being unloaded. Cannot process new generation requests." + ) + + # Mark that the job is running + self.active_job_ids[request_id] = None + + # Yield from the internal generator + async for generation_chunk in self.generate_gen( + request_id=request_id, + prompt=prompt, + params=params, + abort_event=abort_event, + mm_embeddings=mm_embeddings, + ): + yield generation_chunk + finally: + # Clean up and remove the job from active IDs + del self.active_job_ids[request_id] + + def handle_finish_chunk(self, result: dict, generation: dict): + eos_reason = result.get("eos_reason") + + stop_str = None + if eos_reason == "max_new_tokens": + finish_reason = "length" + else: + finish_reason = "stop" + # Grab stop string if stop was the reason + if eos_reason == "stop_token": + stop_str = result.get("eos_triggering_token_str") + elif eos_reason == "stop_string": + stop_str = result.get("eos_triggering_string") + + finish_chunk = { + "prompt_tokens": generation.get("prompt_tokens"), + "generated_tokens": generation.get("generated_tokens"), + "finish_reason": finish_reason, + "stop_str": stop_str, + } + + return finish_chunk + + async def generate_gen( + self, + request_id: str, + prompt: str, + params: BaseSamplerRequest, + abort_event: Optional[asyncio.Event] = None, + mm_embeddings: Optional[MultimodalEmbeddingWrapper] = None, + ): + """ + Create generator function for prompt completion. + + for kwargs, check common/sampling.py + """ + chunk_tokens: torch.Tensor | tuple[torch.Tensor, torch.Tensor] + + prompts = [prompt] + stop_conditions = params.stop + add_bos_token = params.add_bos_token + + # Fetch EOS tokens from generation_config if they exist + eos_tokens = ( + self.generation_config.eos_tokens() + if self.generation_config + else [self.tokenizer.eos_token_id] + ) + + stop_conditions += eos_tokens + + input_ids = [ + self.tokenizer.encode( + prompt, + add_bos=add_bos_token, + encode_special_tokens=True, + ) + for prompt in prompts + ] + + # The first index will always be the positive prompt + context_len = input_ids[0].size(dim=-1) + + # Automatically set max_tokens to fill up the context + # This should be an OK default, but may be changed in the future + max_tokens = unwrap( + params.max_tokens, + self.max_seq_len - context_len, + ) + if max_tokens < 1: + logger.warning("max_tokens must be a positive integer, setting to 1.") + max_tokens = 1 + + # Determine if the negative context or the context length is bigger + context_to_check = context_len + + # Check total length of prompt against max context length + if context_to_check > self.max_seq_len: + preamble = "Prompt" + + raise ValueError( + f"{preamble} length {context_to_check} is greater than " + f"max_seq_len {self.max_seq_len}" + ) + + self.generator = AsyncGenerator( + model=self.model, + cache=self.cache, + tokenizer=self.tokenizer, + ) + + generation = {} + print(max_tokens) + job = AsyncJob( + self.generator, + input_ids=self.tokenizer.encode(prompt, add_bos=False), + max_new_tokens=max_tokens, + stop_conditions=stop_conditions, + ) + generated_tokens = 0 + full_response = "" + async for result in job: + chunk = unwrap(result.get("text"), "") + if chunk: + chunk_tokens = result.get("token_ids", self.tokenizer.encode(chunk)) + full_response += chunk + if isinstance(chunk_tokens, torch.Tensor): + generated_tokens += chunk_tokens.size(dim=0) + generation = { + "text": chunk, + "prompt_tokens": context_len, + "generated_tokens": generated_tokens, + "offset": len(full_response), + } + yield generation + + if result.get("eos"): + generation = self.handle_finish_chunk(result, generation) + yield generation + # Assign the active job to the request ID + self.active_job_ids[request_id] = job From 14fb57337189f5b60a3f3db64f827bfc0f50deb0 Mon Sep 17 00:00:00 2001 From: randoentity <137087500+randoentity@users.noreply.github.com> Date: Wed, 30 Apr 2025 00:23:25 +0200 Subject: [PATCH 05/45] fixup: max_seq_len Whoops --- backends/exllamav3/model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index fc4f198..6bec250 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -47,7 +47,7 @@ class ExllamaV3Container(BaseModelContainer): tokenizer: Tokenizer config: Config gpu_split: List[float] = [] - max_seq_len: int = 2048 + max_seq_len: int # Required methods @classmethod @@ -74,8 +74,8 @@ class ExllamaV3Container(BaseModelContainer): self.model = Model.from_config(self.config) self.tokenizer = Tokenizer.from_config(self.config) - max_seq_len = kwargs.get("max_seq_len") - self.cache = Cache(self.model, max_num_tokens=max_seq_len) + self.max_seq_len = kwargs.get("max_seq_len") + self.cache = Cache(self.model, max_num_tokens=self.max_seq_len) gpu_split = unwrap(kwargs.get("gpu_split"), []) # Set GPU split options From acb3adb9536b3e7e55f39bb10a074ef2894700cf Mon Sep 17 00:00:00 2001 From: randoentity <137087500+randoentity@users.noreply.github.com> Date: Wed, 30 Apr 2025 08:43:26 +0200 Subject: [PATCH 06/45] fixup: auto split --- backends/exllamav3/model.py | 42 +++++++++++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index 6bec250..793ec35 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -1,5 +1,6 @@ import asyncio import gc +import math import pathlib from loguru import logger from typing import ( @@ -46,8 +47,11 @@ class ExllamaV3Container(BaseModelContainer): cache: Cache tokenizer: Tokenizer config: Config - gpu_split: List[float] = [] + gpu_split: List[float] | None = None + gpu_split_auto: bool = True + autosplit_reserve: List[float] = [96 * 1024**2] max_seq_len: int + use_tp: bool = False # Required methods @classmethod @@ -76,17 +80,43 @@ class ExllamaV3Container(BaseModelContainer): self.max_seq_len = kwargs.get("max_seq_len") self.cache = Cache(self.model, max_num_tokens=self.max_seq_len) - gpu_split = unwrap(kwargs.get("gpu_split"), []) - # Set GPU split options - # Enable manual GPU split if provided - if gpu_split: - self.gpu_split = gpu_split # Try to set prompt template self.prompt_template = await find_prompt_template( kwargs.get("prompt_template"), model_directory ) + # Turn off GPU split if the user is using 1 GPU + gpu_count = torch.cuda.device_count() + gpu_split_auto = unwrap(kwargs.get("gpu_split_auto"), True) + gpu_split = unwrap(kwargs.get("gpu_split"), None) + + # Set GPU split options + if gpu_count == 1: + self.gpu_split_auto = False + logger.info("Disabling GPU split because one GPU is in use.") + else: + # TODO: Set tensor parallel + + # Set GPU split options + # Enable manual GPU split if provided + if gpu_split: + self.gpu_split = gpu_split + elif gpu_split_auto and not self.use_tp: + # Otherwise fallback to autosplit settings + self.gpu_split_auto = gpu_split_auto + + autosplit_reserve_megabytes = unwrap( + kwargs.get("autosplit_reserve"), [96] + ) + + # Reserve VRAM for each GPU + self.autosplit_reserve = [ + int(math.ceil(value * 1024**2)) + for value in autosplit_reserve_megabytes + ] + # TODO: speculative decoding + return self async def load(self, progress_callback=None, **kwargs): From 306fc7cd153185012dc758b6655b670309830bb5 Mon Sep 17 00:00:00 2001 From: randoentity <137087500+randoentity@users.noreply.github.com> Date: Wed, 30 Apr 2025 09:43:33 +0200 Subject: [PATCH 07/45] fixup: autosplit reserve this probably breaks v2 support --- backends/exllamav3/model.py | 5 +++-- common/config_models.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index 793ec35..f2dfd4f 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -49,7 +49,7 @@ class ExllamaV3Container(BaseModelContainer): config: Config gpu_split: List[float] | None = None gpu_split_auto: bool = True - autosplit_reserve: List[float] = [96 * 1024**2] + autosplit_reserve: List[float] = [96 / 1024] max_seq_len: int use_tp: bool = False @@ -112,7 +112,7 @@ class ExllamaV3Container(BaseModelContainer): # Reserve VRAM for each GPU self.autosplit_reserve = [ - int(math.ceil(value * 1024**2)) + int(math.ceil(value/1024)) for value in autosplit_reserve_megabytes ] # TODO: speculative decoding @@ -171,6 +171,7 @@ class ExllamaV3Container(BaseModelContainer): @torch.inference_mode() def load_model_sync(self, progress_callback=None): for value in self.model.load_gen( + reserve_per_device=self.autosplit_reserve, use_per_device=self.gpu_split, callback=progress_callback ): diff --git a/common/config_models.py b/common/config_models.py index bb09e57..d2af39e 100644 --- a/common/config_models.py +++ b/common/config_models.py @@ -193,7 +193,7 @@ class ModelConfig(BaseConfigModel): "Not parsed for single GPU users." ), ) - autosplit_reserve: List[int] = Field( + autosplit_reserve: List[float] = Field( [96], description=( "Reserve VRAM used for autosplit loading (default: 96 MB on GPU 0).\n" From c0f268f33e0bbc4c829c93fb5b447d915fb2be17 Mon Sep 17 00:00:00 2001 From: randoentity <137087500+randoentity@users.noreply.github.com> Date: Wed, 30 Apr 2025 11:10:03 +0200 Subject: [PATCH 08/45] fixup: autosplit, start work on metrics --- backends/exllamav3/model.py | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index f2dfd4f..54c3547 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -1,8 +1,6 @@ import asyncio import gc -import math import pathlib -from loguru import logger from typing import ( Any, AsyncIterator, @@ -12,9 +10,14 @@ from typing import ( ) import torch +from exllamav3 import AsyncGenerator, AsyncJob, Cache, Config, Model, Tokenizer +from loguru import logger from backends.base_model_container import BaseModelContainer from common.concurrency import iterate_in_threadpool +from common.gen_logging import ( + log_metrics, +) from common.multimodal import MultimodalEmbeddingWrapper from common.sampling import BaseSamplerRequest from common.templating import PromptTemplate, find_prompt_template @@ -22,8 +25,6 @@ from common.transformers_utils import GenerationConfig from common.utils import unwrap from endpoints.core.types.model import ModelCard -from exllamav3 import AsyncGenerator, AsyncJob, Config, Model, Cache, Tokenizer - class ExllamaV3Container(BaseModelContainer): """Abstract base class for model containers.""" @@ -112,7 +113,7 @@ class ExllamaV3Container(BaseModelContainer): # Reserve VRAM for each GPU self.autosplit_reserve = [ - int(math.ceil(value/1024)) + value/1024 for value in autosplit_reserve_megabytes ] # TODO: speculative decoding @@ -504,15 +505,17 @@ class ExllamaV3Container(BaseModelContainer): ) generation = {} - print(max_tokens) job = AsyncJob( self.generator, input_ids=self.tokenizer.encode(prompt, add_bos=False), max_new_tokens=max_tokens, stop_conditions=stop_conditions, ) + generated_tokens = 0 full_response = "" + metrics_result = {} + async for result in job: chunk = unwrap(result.get("text"), "") if chunk: @@ -530,6 +533,25 @@ class ExllamaV3Container(BaseModelContainer): if result.get("eos"): generation = self.handle_finish_chunk(result, generation) + + # Save the final result for metrics logging + metrics_result = result + yield generation + break # Assign the active job to the request ID self.active_job_ids[request_id] = job + + # Log the metrics if present + if metrics_result: + log_metrics( + request_id, + metrics_result.get("time_enqueued"), + metrics_result.get("prompt_tokens"), + metrics_result.get("cached_tokens"), + metrics_result.get("time_prefill"), + metrics_result.get("new_tokens"), + metrics_result.get("time_generate"), + context_len, + self.max_seq_len, + ) From b35c48da377810187594eab92891d7646b5926a0 Mon Sep 17 00:00:00 2001 From: randoentity <137087500+randoentity@users.noreply.github.com> Date: Wed, 30 Apr 2025 11:56:24 +0200 Subject: [PATCH 09/45] fixup: some metrics --- backends/exllamav3/model.py | 118 +++++++++++++++++++++++++----------- 1 file changed, 84 insertions(+), 34 deletions(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index 54c3547..82f3045 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -18,6 +18,7 @@ from common.concurrency import iterate_in_threadpool from common.gen_logging import ( log_metrics, ) +from common.health import HealthManager from common.multimodal import MultimodalEmbeddingWrapper from common.sampling import BaseSamplerRequest from common.templating import PromptTemplate, find_prompt_template @@ -436,6 +437,37 @@ class ExllamaV3Container(BaseModelContainer): return finish_chunk + async def create_generator(self): + """Create and save a Exllama generator class.""" + + try: + # Don't acquire locks unless a model is loaded + if self.loaded: + await self.load_lock.acquire() + + # Immediately cancel all jobs + await self.wait_for_jobs(skip_wait=True) + + # Create new generator + self.generator = AsyncGenerator( + model=self.model, + cache=self.cache, + tokenizer=self.tokenizer, + max_batch_size=self.max_batch_size, + ) + + # Update the state of the container var + if self.max_batch_size is None: + self.max_batch_size = self.generator.generator.max_batch_size + finally: + # This means the generator is being recreated + # The load lock is already released in the load function + if self.loaded: + self.load_lock.release() + + async with self.load_condition: + self.load_condition.notify_all() + async def generate_gen( self, request_id: str, @@ -516,42 +548,60 @@ class ExllamaV3Container(BaseModelContainer): full_response = "" metrics_result = {} - async for result in job: - chunk = unwrap(result.get("text"), "") - if chunk: - chunk_tokens = result.get("token_ids", self.tokenizer.encode(chunk)) - full_response += chunk - if isinstance(chunk_tokens, torch.Tensor): - generated_tokens += chunk_tokens.size(dim=0) - generation = { - "text": chunk, - "prompt_tokens": context_len, - "generated_tokens": generated_tokens, - "offset": len(full_response), - } - yield generation + # Get the generation status once it's ready + try: + async for result in job: + chunk = unwrap(result.get("text"), "") + if chunk: + chunk_tokens = result.get("token_ids", self.tokenizer.encode(chunk)) + full_response += chunk + if isinstance(chunk_tokens, torch.Tensor): + generated_tokens += chunk_tokens.size(dim=0) + generation = { + "text": chunk, + "prompt_tokens": context_len, + "generated_tokens": generated_tokens, + "offset": len(full_response), + } + yield generation - if result.get("eos"): - generation = self.handle_finish_chunk(result, generation) + if result.get("eos"): + generation = self.handle_finish_chunk(result, generation) - # Save the final result for metrics logging - metrics_result = result + # Save the final result for metrics logging + metrics_result = result - yield generation - break - # Assign the active job to the request ID - self.active_job_ids[request_id] = job + yield generation + break + # Assign the active job to the request ID + self.active_job_ids[request_id] = job - # Log the metrics if present - if metrics_result: - log_metrics( - request_id, - metrics_result.get("time_enqueued"), - metrics_result.get("prompt_tokens"), - metrics_result.get("cached_tokens"), - metrics_result.get("time_prefill"), - metrics_result.get("new_tokens"), - metrics_result.get("time_generate"), - context_len, - self.max_seq_len, + except asyncio.CancelledError: + await job.cancel() + except Exception as ex: + # Create a new generator since the current state is broken + # No need to wait for this to finish + logger.error( + "FATAL ERROR with generation. " + "Attempting to recreate the generator. " + "If this fails, please restart the server.\n" ) + asyncio.ensure_future(self.create_generator()) + + await HealthManager.add_unhealthy_event(ex) + + raise ex + finally: + # Log the metrics if present + if metrics_result: + log_metrics( + request_id, + metrics_result.get("time_enqueued"), + metrics_result.get("prompt_tokens"), + metrics_result.get("cached_tokens"), + metrics_result.get("time_prefill"), + metrics_result.get("new_tokens"), + metrics_result.get("time_generate"), + context_len, + self.max_seq_len, + ) From c744790f142f6f83061e92242ac675790aa7348d Mon Sep 17 00:00:00 2001 From: randoentity <137087500+randoentity@users.noreply.github.com> Date: Wed, 30 Apr 2025 13:14:34 +0200 Subject: [PATCH 10/45] fixup: add sampler logs Also passing sampler to job with this, no idea if this is correct --- backends/exllamav3/model.py | 53 ++++++++++++++++++++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index 82f3045..b4f6fdc 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -10,12 +10,21 @@ from typing import ( ) import torch -from exllamav3 import AsyncGenerator, AsyncJob, Cache, Config, Model, Tokenizer +from exllamav3 import ( + AsyncGenerator, + AsyncJob, + Cache, + ComboSampler, + Config, + Model, + Tokenizer, +) from loguru import logger from backends.base_model_container import BaseModelContainer from common.concurrency import iterate_in_threadpool from common.gen_logging import ( + log_generation_params, log_metrics, ) from common.health import HealthManager @@ -483,6 +492,30 @@ class ExllamaV3Container(BaseModelContainer): """ chunk_tokens: torch.Tensor | tuple[torch.Tensor, torch.Tensor] + # FIXME: this is probably not right + base_sampler = BaseSamplerRequest() + sampler = ComboSampler( + rep_p=base_sampler.repetition_penalty, + pres_p=base_sampler.presence_penalty, + freq_p=base_sampler.frequency_penalty, + rep_sustain_range=base_sampler.penalty_range, + rep_decay_range=base_sampler.penalty_range, + temperature=base_sampler.temperature, + min_p=base_sampler.min_p, + top_k=base_sampler.top_k, + top_p=base_sampler.top_p, + temp_last=base_sampler.temperature_last, + ) + + # Dynamically scale penalty range to output tokens + # Only do this if freq/pres pen is enabled + # and the repetition range is -1 + # TODO: + # auto_scale_penalty_range = ( + # gen_settings.token_frequency_penalty != 0 + # or gen_settings.token_presence_penalty != 0 + # ) and gen_settings.token_repetition_range == -1 + prompts = [prompt] stop_conditions = params.stop add_bos_token = params.add_bos_token @@ -539,6 +572,7 @@ class ExllamaV3Container(BaseModelContainer): generation = {} job = AsyncJob( self.generator, + sampler=sampler, input_ids=self.tokenizer.encode(prompt, add_bos=False), max_new_tokens=max_tokens, stop_conditions=stop_conditions, @@ -557,6 +591,12 @@ class ExllamaV3Container(BaseModelContainer): full_response += chunk if isinstance(chunk_tokens, torch.Tensor): generated_tokens += chunk_tokens.size(dim=0) + + # Increase penalty range to generated token amount + # TODO: + # if auto_scale_penalty_range: + # gen_settings.token_repetition_range = generated_tokens + generation = { "text": chunk, "prompt_tokens": context_len, @@ -592,6 +632,17 @@ class ExllamaV3Container(BaseModelContainer): raise ex finally: + # Log generation options to console + # Some options are too large, so log the args instead + log_generation_params( + request_id=request_id, + bos_token_id=self.tokenizer.bos_token_id, + eos_token_id=eos_tokens, + prompt=prompt, + **params.model_dump(exclude={"prompt"}), + # auto_scale_penalty_range=auto_scale_penalty_range, # TODO + ) + # Log the metrics if present if metrics_result: log_metrics( From 303e2dde126722aed5b58b0747e32a4808e13cbc Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Wed, 30 Apr 2025 22:59:25 -0400 Subject: [PATCH 11/45] Model: Correct exl3 generation, add concurrency, and cleanup Fixes application of sampler parameters by adding a new sampler builder interface. Also expose the generator class-wide and add wait_for_jobs. Finally, allow inline loading to specify the backend. Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- backends/exllamav3/model.py | 236 +++++++++++++++++++++++------------- common/config_models.py | 4 +- 2 files changed, 155 insertions(+), 85 deletions(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index b4f6fdc..06d3b29 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -14,7 +14,6 @@ from exllamav3 import ( AsyncGenerator, AsyncJob, Cache, - ComboSampler, Config, Model, Tokenizer, @@ -22,6 +21,7 @@ from exllamav3 import ( from loguru import logger from backends.base_model_container import BaseModelContainer +from backends.exllamav3.sampler import ExllamaV3SamplerBuilder from common.concurrency import iterate_in_threadpool from common.gen_logging import ( log_generation_params, @@ -32,7 +32,7 @@ from common.multimodal import MultimodalEmbeddingWrapper from common.sampling import BaseSamplerRequest from common.templating import PromptTemplate, find_prompt_template from common.transformers_utils import GenerationConfig -from common.utils import unwrap +from common.utils import coalesce, unwrap from endpoints.core.types.model import ModelCard @@ -58,6 +58,7 @@ class ExllamaV3Container(BaseModelContainer): cache: Cache tokenizer: Tokenizer config: Config + generator: Optional[AsyncGenerator] = None gpu_split: List[float] | None = None gpu_split_auto: bool = True autosplit_reserve: List[float] = [96 / 1024] @@ -123,13 +124,47 @@ class ExllamaV3Container(BaseModelContainer): # Reserve VRAM for each GPU self.autosplit_reserve = [ - value/1024 - for value in autosplit_reserve_megabytes + value / 1024 for value in autosplit_reserve_megabytes ] # TODO: speculative decoding return self + def model_info(self) -> ModelCard: + """ + Returns a dictionary of the current model's configuration parameters. + + Returns: + Model parameters provided by the backend + """ + + pass + + async def wait_for_jobs(self, skip_wait: bool = False): + """ + Polling to wait for any active generation jobs to complete. + + Args: + skip_wait: If True, cancel jobs immediately instead of waiting. + """ + + if not self.generator: + return + + # Immediately abort all jobs if asked + if skip_wait: + logger.warning( + "Immediately terminating all jobs. " + "Clients will have their requests cancelled.\n" + ) + + for job in self.active_job_ids.values(): + if job: + await job.cancel() + + while len(self.active_job_ids) > 0: + await asyncio.sleep(0.01) + async def load(self, progress_callback=None, **kwargs): """ Loads the model into memory. @@ -161,8 +196,8 @@ class ExllamaV3Container(BaseModelContainer): await self.wait_for_jobs(kwargs.get("skip_wait")) generator = self.load_model_sync(progress_callback) - async for module, modules in iterate_in_threadpool(generator): - yield module, modules + async for value in iterate_in_threadpool(generator): + yield value # Clean up any extra vram usage from torch and cuda # (Helps reduce VRAM bottlenecking on Windows) @@ -184,11 +219,42 @@ class ExllamaV3Container(BaseModelContainer): for value in self.model.load_gen( reserve_per_device=self.autosplit_reserve, use_per_device=self.gpu_split, - callback=progress_callback + callback=progress_callback, ): if value: yield value + async def create_generator(self): + """Create and save a Exllama generator class.""" + + try: + # Don't acquire locks unless a model is loaded + if self.loaded: + await self.load_lock.acquire() + + # Immediately cancel all jobs + await self.wait_for_jobs(skip_wait=True) + + # Create new generator + self.generator = AsyncGenerator( + model=self.model, + cache=self.cache, + tokenizer=self.tokenizer, + max_batch_size=self.max_batch_size, + ) + + # Update the state of the container var + if self.max_batch_size is None: + self.max_batch_size = self.generator.generator.max_batch_size + finally: + # This means the generator is being recreated + # The load lock is already released in the load function + if self.loaded: + self.load_lock.release() + + async with self.load_condition: + self.load_condition.notify_all() + async def unload(self, loras_only: bool = False, **kwargs): """ Unloads the model and associated resources from memory. @@ -198,11 +264,15 @@ class ExllamaV3Container(BaseModelContainer): **kwargs: Additional unloading options (e.g., shutdown). """ - try: - await self.load_lock.acquire() + # Used when shutting down the server + do_shutdown = kwargs.get("shutdown") - # Wait for other jobs to finish - await self.wait_for_jobs(kwargs.get("skip_wait")) + try: + if not do_shutdown: + await self.load_lock.acquire() + + # Wait for other jobs to finish + await self.wait_for_jobs(kwargs.get("skip_wait")) self.model.unload() self.model = None @@ -211,15 +281,21 @@ class ExllamaV3Container(BaseModelContainer): self.cache = None self.tokenizer = None + # Cleanup the generator from any pending jobs + if self.generator is not None: + await self.generator.close() + self.generator = None + gc.collect() torch.cuda.empty_cache() logger.info("Model unloaded.") finally: - self.load_lock.release() + if not do_shutdown: + self.load_lock.release() - async with self.load_condition: - self.load_condition.notify_all() + async with self.load_condition: + self.load_condition.notify_all() def encode_tokens(self, text: str, **kwargs) -> List[int]: """ @@ -233,11 +309,15 @@ class ExllamaV3Container(BaseModelContainer): A list of integer token IDs. """ - return self.tokenizer.encode( - text, - add_bos=unwrap(kwargs.get("add_bos_token"), True), - encode_special_tokens=unwrap(kwargs.get("encode_special_tokens"), True), - ).flatten().tolist() + return ( + self.tokenizer.encode( + text, + add_bos=unwrap(kwargs.get("add_bos_token"), True), + encode_special_tokens=unwrap(kwargs.get("encode_special_tokens"), True), + ) + .flatten() + .tolist() + ) def decode_tokens(self, ids: List[int], **kwargs) -> str: """ @@ -278,26 +358,6 @@ class ExllamaV3Container(BaseModelContainer): "unk_token": self.tokenizer.unk_token, } - def model_info(self) -> ModelCard: - """ - Returns a dictionary of the current model's configuration parameters. - - Returns: - Model parameters provided by the backend - """ - - pass - - async def wait_for_jobs(self, skip_wait: bool = False): - """ - Waits for any active generation jobs to complete. - - Args: - skip_wait: If True, cancel jobs immediately instead of waiting. - """ - - pass - async def generate( self, request_id: str, @@ -446,37 +506,6 @@ class ExllamaV3Container(BaseModelContainer): return finish_chunk - async def create_generator(self): - """Create and save a Exllama generator class.""" - - try: - # Don't acquire locks unless a model is loaded - if self.loaded: - await self.load_lock.acquire() - - # Immediately cancel all jobs - await self.wait_for_jobs(skip_wait=True) - - # Create new generator - self.generator = AsyncGenerator( - model=self.model, - cache=self.cache, - tokenizer=self.tokenizer, - max_batch_size=self.max_batch_size, - ) - - # Update the state of the container var - if self.max_batch_size is None: - self.max_batch_size = self.generator.generator.max_batch_size - finally: - # This means the generator is being recreated - # The load lock is already released in the load function - if self.loaded: - self.load_lock.release() - - async with self.load_condition: - self.load_condition.notify_all() - async def generate_gen( self, request_id: str, @@ -492,25 +521,58 @@ class ExllamaV3Container(BaseModelContainer): """ chunk_tokens: torch.Tensor | tuple[torch.Tensor, torch.Tensor] - # FIXME: this is probably not right - base_sampler = BaseSamplerRequest() - sampler = ComboSampler( - rep_p=base_sampler.repetition_penalty, - pres_p=base_sampler.presence_penalty, - freq_p=base_sampler.frequency_penalty, - rep_sustain_range=base_sampler.penalty_range, - rep_decay_range=base_sampler.penalty_range, - temperature=base_sampler.temperature, - min_p=base_sampler.min_p, - top_k=base_sampler.top_k, - top_p=base_sampler.top_p, - temp_last=base_sampler.temperature_last, + sampler_builder = ExllamaV3SamplerBuilder() + + # Penalties + + # Set penalty range + penalty_range = unwrap(params.penalty_range, self.max_seq_len) + + # Exl3's version of including the entire context + if penalty_range < 0: + penalty_range = 10e7 + + # Always make sure the fallback is 0 if range < 0 + # It's technically fine to use -1, but this just validates the passed + # fallback + # Always default to 0 if something goes wrong + if params.penalty_range < 0: + fallback_decay = 0 + else: + fallback_decay = params.penalty_range + + repetition_decay = coalesce(params.repetition_decay, fallback_decay, 0) + + # Apply penalties to builder + sampler_builder.penalties( + params.repetition_penalty, + params.frequency_penalty, + params.presence_penalty, + penalty_range, + repetition_decay, ) + # Apply temperature first to builder + if not params.temperature_last: + sampler_builder.temperature(params.temperature) + + # Apply alphabet samplers to builder + sampler_builder.top_k(params.top_k) + sampler_builder.top_p(params.top_p) + sampler_builder.min_p(params.min_p) + + # Apply temperature last to builder + if params.temperature_last: + sampler_builder.temperature(params.temperature) + + # Build the sampler + # Set greedy if temperature is 0 + sampler = sampler_builder.build(params.temperature == 0) + # Dynamically scale penalty range to output tokens # Only do this if freq/pres pen is enabled # and the repetition range is -1 - # TODO: + # TODO: This currently does not work in exl3 # auto_scale_penalty_range = ( # gen_settings.token_frequency_penalty != 0 # or gen_settings.token_presence_penalty != 0 @@ -576,6 +638,7 @@ class ExllamaV3Container(BaseModelContainer): input_ids=self.tokenizer.encode(prompt, add_bos=False), max_new_tokens=max_tokens, stop_conditions=stop_conditions, + banned_strings=params.banned_strings, ) generated_tokens = 0 @@ -585,6 +648,11 @@ class ExllamaV3Container(BaseModelContainer): # Get the generation status once it's ready try: async for result in job: + # Abort if the event is set while streaming + if abort_event and abort_event.is_set(): + await job.cancel() + break + chunk = unwrap(result.get("text"), "") if chunk: chunk_tokens = result.get("token_ids", self.tokenizer.encode(chunk)) diff --git a/common/config_models.py b/common/config_models.py index d2af39e..2965fe0 100644 --- a/common/config_models.py +++ b/common/config_models.py @@ -163,8 +163,10 @@ class ModelConfig(BaseConfigModel): "Example: ['max_seq_len', 'cache_mode']." ), ) + + # Defaults to exllamav2 in common/model.py backend: Optional[str] = Field( - "exllamav2", + None, description=( "Backend to use for this model (default: exllamav2)\n" "Options: exllamav2, exllamav3", From bdc5189a4b460061386e24d53ae514edecc01b43 Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Wed, 30 Apr 2025 23:58:27 -0400 Subject: [PATCH 12/45] Exl3: Add chunk size, cache size, and model info Use the same algorithm for estimating and adjusting cache size based on multiples of 256 and above max seq len. Same applies for chunk size. Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- backends/exllamav2/model.py | 26 +++++--- backends/exllamav2/utils.py | 68 --------------------- backends/exllamav3/model.py | 119 ++++++++++++++++++++++++++++++++++-- 3 files changed, 130 insertions(+), 83 deletions(-) diff --git a/backends/exllamav2/model.py b/backends/exllamav2/model.py index 09da9a2..4745241 100644 --- a/backends/exllamav2/model.py +++ b/backends/exllamav2/model.py @@ -33,11 +33,7 @@ from backends.exllamav2.grammar import ( ExLlamaV2Grammar, clear_grammar_func_cache, ) -from backends.exllamav2.utils import ( - exllama_disabled_flash_attn, - hardware_supports_flash_attn, - supports_paged_attn, -) +from backends.exllamav2.utils import exllama_disabled_flash_attn from backends.exllamav2.vision import clear_image_embedding_cache from common.concurrency import iterate_in_threadpool from common.gen_logging import ( @@ -46,6 +42,7 @@ from common.gen_logging import ( log_prompt, log_response, ) +from common.hardware import hardware_supports_flash_attn from common.health import HealthManager from common.multimodal import MultimodalEmbeddingWrapper from common.sampling import BaseSamplerRequest @@ -278,11 +275,20 @@ class ExllamaV2Container(BaseModelContainer): # Check whether the user's configuration supports flash/paged attention # Also check if exl2 has disabled flash attention - if ( - exllama_disabled_flash_attn(self.config.no_flash_attn) - or not hardware_supports_flash_attn(gpu_device_list) - or not supports_paged_attn() - ): + if exllama_disabled_flash_attn( + self.config.no_flash_attn + ) or not hardware_supports_flash_attn(gpu_device_list): + gpu_unsupported_message = ( + "An unsupported GPU is found in this configuration. " + "Switching to compatibility mode. \n" + "This disables parallel batching " + "and features that rely on it (ex. CFG). \n" + "To disable compatability mode, all GPUs must be ampere " + "(30 series) or newer. AMD GPUs are not supported." + ) + + logger.warning(gpu_unsupported_message) + self.config.no_flash_attn = True if self.draft_config: self.draft_config.no_flash_attn = True diff --git a/backends/exllamav2/utils.py b/backends/exllamav2/utils.py index 0fd1fcc..1648c62 100644 --- a/backends/exllamav2/utils.py +++ b/backends/exllamav2/utils.py @@ -1,74 +1,6 @@ -import platform -import torch -from packaging import version -from importlib.metadata import PackageNotFoundError, version as package_version from loguru import logger -def hardware_supports_flash_attn(gpu_device_list: list[int]): - """ - Check whether all GPUs in list support FA2 - - Compute capability < 8 is not supported by FA2 - AMD is also unsupported until ROCm updates its FA2 fork - """ - - # Logged message if unsupported - unsupported_message = ( - "An unsupported GPU is found in this configuration. " - "Switching to compatibility mode. \n" - "This disables parallel batching " - "and features that rely on it (ex. CFG). \n" - "To disable compatability mode, all GPUs must be ampere " - "(30 series) or newer. AMD GPUs are not supported." - ) - - min_compute_capability = min( - torch.cuda.get_device_capability(device=device_idx)[0] - for device_idx in gpu_device_list - ) - - if torch.version.hip or min_compute_capability < 8: - logger.warning(unsupported_message) - return False - else: - return True - - -def supports_paged_attn(): - """Check whether the user's flash-attn version supports paged mode""" - - # Logged message if unsupported - unsupported_message = ( - "Flash attention version >=2.5.7 " - "is required to use paged attention. " - "Switching to compatibility mode. \n" - "This disables parallel batching " - "and features that rely on it (ex. CFG). \n" - "Please upgrade your environment by running an update script " - "(update_scripts/" - f"update_deps.{'bat' if platform.system() == 'Windows' else 'sh'})\n\n" - "Or you can manually run a requirements update " - "using the following command:\n\n" - "For CUDA 12.1:\n" - "pip install --upgrade .[cu121]\n\n" - "NOTE: Windows users must use CUDA 12.x to use flash-attn." - ) - - required_version = version.parse("2.5.7") - try: - current_version = version.parse(package_version("flash-attn").split("+")[0]) - except PackageNotFoundError: - logger.warning(unsupported_message) - return False - - if current_version < required_version: - logger.warning(unsupported_message) - return False - else: - return True - - def exllama_disabled_flash_attn(no_flash_attn: bool): unsupported_message = ( "ExllamaV2 has disabled Flash Attention. \n" diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index 06d3b29..ce27a85 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -27,13 +27,14 @@ from common.gen_logging import ( log_generation_params, log_metrics, ) +from common.hardware import hardware_supports_flash_attn from common.health import HealthManager from common.multimodal import MultimodalEmbeddingWrapper from common.sampling import BaseSamplerRequest from common.templating import PromptTemplate, find_prompt_template from common.transformers_utils import GenerationConfig from common.utils import coalesce, unwrap -from endpoints.core.types.model import ModelCard +from endpoints.core.types.model import ModelCard, ModelCardParameters class ExllamaV3Container(BaseModelContainer): @@ -59,11 +60,16 @@ class ExllamaV3Container(BaseModelContainer): tokenizer: Tokenizer config: Config generator: Optional[AsyncGenerator] = None + + # Class-specific vars gpu_split: List[float] | None = None gpu_split_auto: bool = True autosplit_reserve: List[float] = [96 / 1024] - max_seq_len: int use_tp: bool = False + max_seq_len: int = 4096 + cache_size: int = 4096 + chunk_size: int = 2048 + max_batch_size: Optional[int] = None # Required methods @classmethod @@ -90,8 +96,8 @@ class ExllamaV3Container(BaseModelContainer): self.model = Model.from_config(self.config) self.tokenizer = Tokenizer.from_config(self.config) - self.max_seq_len = kwargs.get("max_seq_len") - self.cache = Cache(self.model, max_num_tokens=self.max_seq_len) + # Fallback to 4096 since exl3 can't fetch from HF's config.json + self.max_seq_len = unwrap(kwargs.get("max_seq_len"), 4096) # Try to set prompt template self.prompt_template = await find_prompt_template( @@ -102,6 +108,7 @@ class ExllamaV3Container(BaseModelContainer): gpu_count = torch.cuda.device_count() gpu_split_auto = unwrap(kwargs.get("gpu_split_auto"), True) gpu_split = unwrap(kwargs.get("gpu_split"), None) + gpu_device_list = list(range(0, gpu_count)) # Set GPU split options if gpu_count == 1: @@ -114,6 +121,12 @@ class ExllamaV3Container(BaseModelContainer): # Enable manual GPU split if provided if gpu_split: self.gpu_split = gpu_split + + gpu_device_list = [ + device_idx + for device_idx, memory in enumerate(self.gpu_split) + if memory > 0 + ] elif gpu_split_auto and not self.use_tp: # Otherwise fallback to autosplit settings self.gpu_split_auto = gpu_split_auto @@ -126,10 +139,87 @@ class ExllamaV3Container(BaseModelContainer): self.autosplit_reserve = [ value / 1024 for value in autosplit_reserve_megabytes ] + + if not hardware_supports_flash_attn(gpu_device_list): + gpu_unsupported_message = ( + "Unable to run ExllamaV3 because an unsupported GPU is " + "found in this configuration. \n" + "All GPUs must be ampere " + "(30 series) or newer. AMD GPUs are not supported." + ) + + logger.warning(gpu_unsupported_message) + + raise RuntimeError(gpu_unsupported_message) + + # Cache + user_cache_size = unwrap(kwargs.get("cache_size"), self.max_seq_len) + self.cache_size = self.adjust_cache_size(user_cache_size) + self.cache = Cache(self.model, max_num_tokens=self.cache_size) + + # Max batch size + self.max_batch_size = kwargs.get("max_batch_size") + + # Make sure chunk size is >= 256, keep near or below max seq len + user_chunk_size = unwrap(kwargs.get("chunk_size"), 2048) + self.chunk_size = self.adjust_chunk_size(user_chunk_size) + # TODO: speculative decoding return self + def adjust_cache_size(self, cache_size): + if cache_size < self.max_seq_len: + logger.warning( + f"The given cache_size ({cache_size}) is smaller than the " + "desired context length.\n" + "Overriding cache_size to max_seq_len. " + ) + + cache_size = self.max_seq_len + + # Enforce a multiple of 256 for cache size + # Overestimate to ensure that the cache isn't below max_seq_len + cache_remainder = cache_size % 256 + if cache_remainder != 0: + rounded_cache_size = int(256 * ((cache_size - cache_remainder) / 256 + 1)) + + logger.warning( + f"The given cache size ({cache_size}) is " + "not a multiple of 256.\n" + "Overriding cache_size with an overestimated value of " + f"{rounded_cache_size} tokens." + ) + + cache_size = rounded_cache_size + + # Warn user if cache size may be inadequate for CFG + if cache_size < 2 * self.max_seq_len: + logger.warning( + f"The given cache_size ({cache_size}) is less than 2 * max_seq_len " + "and may be too small for requests using CFG. \n" + "Ignore this warning if you do not plan on using CFG." + ) + + return cache_size + + def adjust_chunk_size(self, user_chunk_size: int): + chunk_size = sorted((256, user_chunk_size, self.max_seq_len))[1] + chunk_remainder = chunk_size % 256 + if chunk_remainder != 0: + rounded_chunk_size = int(256 * ((chunk_size - chunk_remainder) / 256 + 1)) + + logger.warning( + f"The given chunk size ({chunk_size}) is " + "not a multiple of 256.\n" + "Overriding chunk_size with an overestimated value of " + f"{rounded_chunk_size} tokens." + ) + + chunk_size = rounded_chunk_size + + return chunk_size + def model_info(self) -> ModelCard: """ Returns a dictionary of the current model's configuration parameters. @@ -138,7 +228,25 @@ class ExllamaV3Container(BaseModelContainer): Model parameters provided by the backend """ - pass + model_params = ModelCardParameters( + max_seq_len=self.max_seq_len, + cache_size=self.cache_size, + max_batch_size=self.max_batch_size, + # cache_mode=self.cache_mode, + chunk_size=self.chunk_size, + use_vision=self.use_vision, + ) + + if self.prompt_template: + model_params.prompt_template = self.prompt_template.name + model_params.prompt_template_content = self.prompt_template.raw_template + + model_card = ModelCard( + id=self.model_dir.name, + parameters=model_params, + ) + + return model_card async def wait_for_jobs(self, skip_wait: bool = False): """ @@ -241,6 +349,7 @@ class ExllamaV3Container(BaseModelContainer): cache=self.cache, tokenizer=self.tokenizer, max_batch_size=self.max_batch_size, + max_chunk_size=self.chunk_size, ) # Update the state of the container var From eca403a0e4877e46431709a052d12035aa933cb7 Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Thu, 1 May 2025 18:23:45 -0400 Subject: [PATCH 13/45] Model: Add Exllamav3 sampler File was not included in previous commit. Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- backends/exllamav3/sampler.py | 54 +++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 backends/exllamav3/sampler.py diff --git a/backends/exllamav3/sampler.py b/backends/exllamav3/sampler.py new file mode 100644 index 0000000..7b08a9b --- /dev/null +++ b/backends/exllamav3/sampler.py @@ -0,0 +1,54 @@ +from dataclasses import dataclass, field +from typing import List +from exllamav3.generator.sampler import ( + CustomSampler, + SS_Temperature, + SS_RepP, + SS_PresFreqP, + SS_Argmax, + SS_MinP, + SS_TopK, + SS_TopP, + SS_Sample, + SS_Base, +) + + +@dataclass +class ExllamaV3SamplerBuilder: + """ + Custom sampler chain/stack for TabbyAPI + """ + + stack: List[SS_Base] = field(default_factory=list) + + def penalties(self, rep_p, freq_p, pres_p, penalty_range, rep_decay): + self.stack += [ + SS_RepP(rep_p, penalty_range, rep_decay), + SS_PresFreqP(pres_p, freq_p, penalty_range, rep_decay), + ] + + def temperature(self, temp): + self.stack.append(SS_Temperature(temp)) + + def top_k(self, top_k): + self.stack.append(SS_TopK(top_k)) + + def top_p(self, top_p): + self.stack.append(SS_TopP(top_p)) + + def min_p(self, min_p): + self.stack.append(SS_MinP(min_p)) + + def greedy(self): + self.stack.append(SS_Argmax()) + + def build(self, greedy): + """Builds the final sampler from stack.""" + + # Use greedy if temp is 0 + if greedy: + return CustomSampler([SS_Argmax()]) + else: + self.stack.append(SS_Sample()) + return CustomSampler(self.stack) From 59d081fe83fc6acc23545220d6028da9cf8d7f60 Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Thu, 1 May 2025 22:39:32 -0400 Subject: [PATCH 14/45] Common: Add hardware file Removed from a commit as well. Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- common/hardware.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 common/hardware.py diff --git a/common/hardware.py b/common/hardware.py new file mode 100644 index 0000000..10723c5 --- /dev/null +++ b/common/hardware.py @@ -0,0 +1,20 @@ +import torch + + +def hardware_supports_flash_attn(gpu_device_list: list[int]): + """ + Check whether all GPUs in list support FA2 + + Compute capability < 8 is not supported by FA2 + AMD is also unsupported until ROCm updates its FA2 fork + """ + + min_compute_capability = min( + torch.cuda.get_device_capability(device=device_idx)[0] + for device_idx in gpu_device_list + ) + + if torch.version.hip or min_compute_capability < 8: + return False + else: + return True From e8f00412f6d45f69d178dd06da56653ddba0d380 Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Fri, 2 May 2025 00:16:11 -0400 Subject: [PATCH 15/45] Model: Fetch from generation_config and tokenizer_config in Exl3 Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- backends/exllamav2/model.py | 1 - backends/exllamav3/model.py | 33 +++++++++++++++++++++++++++++++-- 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/backends/exllamav2/model.py b/backends/exllamav2/model.py index 4745241..27fe33f 100644 --- a/backends/exllamav2/model.py +++ b/backends/exllamav2/model.py @@ -120,7 +120,6 @@ class ExllamaV2Container(BaseModelContainer): self.config.max_seq_len = 4096 self.config.prepare() - print(self.config.max_seq_len) # Check if the model arch is compatible with various exl2 features self.config.arch_compat_overrides() diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index ce27a85..ee409d3 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -1,6 +1,7 @@ import asyncio import gc import pathlib +import traceback from typing import ( Any, AsyncIterator, @@ -32,7 +33,7 @@ from common.health import HealthManager from common.multimodal import MultimodalEmbeddingWrapper from common.sampling import BaseSamplerRequest from common.templating import PromptTemplate, find_prompt_template -from common.transformers_utils import GenerationConfig +from common.transformers_utils import GenerationConfig, TokenizerConfig from common.utils import coalesce, unwrap from endpoints.core.types.model import ModelCard, ModelCardParameters @@ -60,6 +61,8 @@ class ExllamaV3Container(BaseModelContainer): tokenizer: Tokenizer config: Config generator: Optional[AsyncGenerator] = None + generation_config: Optional[GenerationConfig] = None + tokenizer_config: Optional[TokenizerConfig] = None # Class-specific vars gpu_split: List[float] | None = None @@ -96,6 +99,30 @@ class ExllamaV3Container(BaseModelContainer): self.model = Model.from_config(self.config) self.tokenizer = Tokenizer.from_config(self.config) + # Load generation config overrides + generation_config_path = model_directory / "generation_config.json" + if generation_config_path.exists(): + try: + self.generation_config = await GenerationConfig.from_file( + model_directory + ) + except Exception: + logger.error(traceback.format_exc()) + logger.warning( + "Skipping generation config load because of an unexpected error." + ) + + # Load tokenizer config overrides + tokenizer_config_path = model_directory / "tokenizer_config.json" + if tokenizer_config_path.exists(): + try: + self.tokenizer_config = await TokenizerConfig.from_file(model_directory) + except Exception: + logger.error(traceback.format_exc()) + logger.warning( + "Skipping tokenizer config load because of an unexpected error." + ) + # Fallback to 4096 since exl3 can't fetch from HF's config.json self.max_seq_len = unwrap(kwargs.get("max_seq_len"), 4096) @@ -689,7 +716,9 @@ class ExllamaV3Container(BaseModelContainer): prompts = [prompt] stop_conditions = params.stop - add_bos_token = params.add_bos_token + add_bos_token = unwrap( + params.add_bos_token, self.tokenizer_config.add_bos_token, True + ) # Fetch EOS tokens from generation_config if they exist eos_tokens = ( From c89bea030e0a9fcf9b1129e61280b72aa12720e8 Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Fri, 2 May 2025 00:22:34 -0400 Subject: [PATCH 16/45] Model: Add template fetching to Exl3 Use the same functionality as exl2's loader. Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- backends/exllamav3/model.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index ee409d3..6558ddd 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -126,11 +126,6 @@ class ExllamaV3Container(BaseModelContainer): # Fallback to 4096 since exl3 can't fetch from HF's config.json self.max_seq_len = unwrap(kwargs.get("max_seq_len"), 4096) - # Try to set prompt template - self.prompt_template = await find_prompt_template( - kwargs.get("prompt_template"), model_directory - ) - # Turn off GPU split if the user is using 1 GPU gpu_count = torch.cuda.device_count() gpu_split_auto = unwrap(kwargs.get("gpu_split_auto"), True) @@ -191,6 +186,22 @@ class ExllamaV3Container(BaseModelContainer): user_chunk_size = unwrap(kwargs.get("chunk_size"), 2048) self.chunk_size = self.adjust_chunk_size(user_chunk_size) + # Template setup + self.prompt_template = await find_prompt_template( + kwargs.get("prompt_template"), model_directory + ) + + # Catch all for template lookup errors + if self.prompt_template: + logger.info( + f'Using template "{self.prompt_template.name}" for chat completions.' + ) + else: + logger.warning( + "Chat completions are disabled because a prompt " + "template wasn't provided or auto-detected." + ) + # TODO: speculative decoding return self From 0d02af3c81a055ce70c690a8d3618539adf29010 Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Fri, 2 May 2025 00:26:40 -0400 Subject: [PATCH 17/45] Model: Set model_dir on init Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- backends/exllamav3/model.py | 1 + 1 file changed, 1 insertion(+) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index 6558ddd..48ef1cc 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -95,6 +95,7 @@ class ExllamaV3Container(BaseModelContainer): "Please note that all config options may not work." ) + self.model_dir = model_directory self.config = Config.from_directory(model_directory.resolve()) self.model = Model.from_config(self.config) self.tokenizer = Tokenizer.from_config(self.config) From d8a8ccfc2a865e780a6bc6d6424862f9e326794d Mon Sep 17 00:00:00 2001 From: randoentity <137087500+randoentity@users.noreply.github.com> Date: Fri, 2 May 2025 14:53:44 +0200 Subject: [PATCH 18/45] Model: fix add_bos_token --- backends/exllamav3/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index 48ef1cc..725c486 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -728,7 +728,7 @@ class ExllamaV3Container(BaseModelContainer): prompts = [prompt] stop_conditions = params.stop - add_bos_token = unwrap( + add_bos_token = coalesce( params.add_bos_token, self.tokenizer_config.add_bos_token, True ) From 15cc480cb07030059ee81d17132618b345cadccb Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Fri, 2 May 2025 21:44:36 -0400 Subject: [PATCH 19/45] Exl3: Simplify add_bos_token handling Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- backends/exllamav3/model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index 725c486..5eaf755 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -728,8 +728,8 @@ class ExllamaV3Container(BaseModelContainer): prompts = [prompt] stop_conditions = params.stop - add_bos_token = coalesce( - params.add_bos_token, self.tokenizer_config.add_bos_token, True + add_bos_token = unwrap( + params.add_bos_token, self.tokenizer_config.add_bos_token ) # Fetch EOS tokens from generation_config if they exist From 8c75b299235481f3c3f4ee5a803b566aadcc356e Mon Sep 17 00:00:00 2001 From: turboderp <11859846+turboderp@users.noreply.github.com> Date: Sat, 3 May 2025 18:31:14 +0200 Subject: [PATCH 20/45] Model: Fix some warnings --- backends/exllamav3/model.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index 5eaf755..554a82d 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -56,12 +56,11 @@ class ExllamaV3Container(BaseModelContainer): load_condition: asyncio.Condition = asyncio.Condition() # Exl3 vars - model: Model - cache: Cache - tokenizer: Tokenizer - config: Config + model: Optional[Model] + cache: Optional[Cache] + tokenizer: Optional[Tokenizer] + config: Optional[Config] generator: Optional[AsyncGenerator] = None - generation_config: Optional[GenerationConfig] = None tokenizer_config: Optional[TokenizerConfig] = None # Class-specific vars @@ -96,7 +95,7 @@ class ExllamaV3Container(BaseModelContainer): ) self.model_dir = model_directory - self.config = Config.from_directory(model_directory.resolve()) + self.config = Config.from_directory(str(model_directory.resolve())) self.model = Model.from_config(self.config) self.tokenizer = Tokenizer.from_config(self.config) From 0d949d00b919f8781ce63fcb00382bf55d60c01b Mon Sep 17 00:00:00 2001 From: turboderp <11859846+turboderp@users.noreply.github.com> Date: Sat, 3 May 2025 18:32:30 +0200 Subject: [PATCH 21/45] Model: Set default max_batch_size --- backends/exllamav3/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index 554a82d..e430c04 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -180,7 +180,7 @@ class ExllamaV3Container(BaseModelContainer): self.cache = Cache(self.model, max_num_tokens=self.cache_size) # Max batch size - self.max_batch_size = kwargs.get("max_batch_size") + self.max_batch_size = unwrap(kwargs.get("max_batch_size"), 256) # Make sure chunk size is >= 256, keep near or below max seq len user_chunk_size = unwrap(kwargs.get("chunk_size"), 2048) From 58c380b8ca649aa1670fa9730b7935fc0143d2bd Mon Sep 17 00:00:00 2001 From: turboderp <11859846+turboderp@users.noreply.github.com> Date: Sat, 3 May 2025 18:32:51 +0200 Subject: [PATCH 22/45] Model: Create generator on load --- backends/exllamav3/model.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index e430c04..09e5585 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -345,6 +345,9 @@ class ExllamaV3Container(BaseModelContainer): async for value in iterate_in_threadpool(generator): yield value + # Create async generator + await self.create_generator() + # Clean up any extra vram usage from torch and cuda # (Helps reduce VRAM bottlenecking on Windows) gc.collect() @@ -774,12 +777,6 @@ class ExllamaV3Container(BaseModelContainer): f"max_seq_len {self.max_seq_len}" ) - self.generator = AsyncGenerator( - model=self.model, - cache=self.cache, - tokenizer=self.tokenizer, - ) - generation = {} job = AsyncJob( self.generator, From 0405a94a89a5ac4f3113d26d6fd59c354642576b Mon Sep 17 00:00:00 2001 From: turboderp <11859846+turboderp@users.noreply.github.com> Date: Sat, 3 May 2025 22:28:36 +0200 Subject: [PATCH 23/45] Model: Cast penalty range to int --- backends/exllamav3/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index 09e5585..ab308e5 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -680,7 +680,7 @@ class ExllamaV3Container(BaseModelContainer): # Exl3's version of including the entire context if penalty_range < 0: - penalty_range = 10e7 + penalty_range = int(10e7) # Always make sure the fallback is 0 if range < 0 # It's technically fine to use -1, but this just validates the passed From 1db2cb99cbde47c3c339a4dfe3efedd2e93aea1d Mon Sep 17 00:00:00 2001 From: turboderp <11859846+turboderp@users.noreply.github.com> Date: Sun, 4 May 2025 01:26:42 +0200 Subject: [PATCH 24/45] Model: Avoid initializing class variables --- backends/exllamav3/model.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index ab308e5..ca8412f 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -62,6 +62,8 @@ class ExllamaV3Container(BaseModelContainer): config: Optional[Config] generator: Optional[AsyncGenerator] = None tokenizer_config: Optional[TokenizerConfig] = None + generator: Optional[AsyncGenerator] + tokenizer_config: Optional[TokenizerConfig] # Class-specific vars gpu_split: List[float] | None = None @@ -89,6 +91,13 @@ class ExllamaV3Container(BaseModelContainer): self = cls() + self.model = None + self.cache = None + self.tokenizer = None + self.config = None + self.generator = None + self.tokenizer_config = None + logger.warning( "ExllamaV3 is currently in an alpha state. " "Please note that all config options may not work." From 92ea7ee7cd1b56b48df10a1391bfd797173f15cb Mon Sep 17 00:00:00 2001 From: turboderp <11859846+turboderp@users.noreply.github.com> Date: Sun, 4 May 2025 01:27:42 +0200 Subject: [PATCH 25/45] Model: Add draft model/speculative decoding --- backends/exllamav3/model.py | 61 +++++++++++++++++++++++++++++++++---- 1 file changed, 55 insertions(+), 6 deletions(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index ca8412f..fc10a3d 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -58,10 +58,11 @@ class ExllamaV3Container(BaseModelContainer): # Exl3 vars model: Optional[Model] cache: Optional[Cache] + draft_model: Optional[Model] + draft_cache: Optional[Cache] tokenizer: Optional[Tokenizer] config: Optional[Config] - generator: Optional[AsyncGenerator] = None - tokenizer_config: Optional[TokenizerConfig] = None + draft_config: Optional[Config] generator: Optional[AsyncGenerator] tokenizer_config: Optional[TokenizerConfig] @@ -93,8 +94,11 @@ class ExllamaV3Container(BaseModelContainer): self.model = None self.cache = None + self.draft_model = None + self.draft_cache = None self.tokenizer = None self.config = None + self.draft_config = None self.generator = None self.tokenizer_config = None @@ -135,6 +139,35 @@ class ExllamaV3Container(BaseModelContainer): # Fallback to 4096 since exl3 can't fetch from HF's config.json self.max_seq_len = unwrap(kwargs.get("max_seq_len"), 4096) + # Prepare the draft model config if necessary + draft_args = unwrap(kwargs.get("draft_model"), {}) + draft_model_name = draft_args.get("draft_model_name") + self.use_draft_model = draft_args and draft_model_name + + # Always disable draft if params are incorrectly configured + if draft_args and draft_model_name is None: + logger.warning( + "Draft model is disabled because a model name " + "wasn't provided. Please check your config.yml!" + ) + self.use_draft_model = False + + if self.use_draft_model: + draft_model_path = pathlib.Path( + unwrap(draft_args.get("draft_model_dir"), "models") + ) + draft_model_path = draft_model_path / draft_model_name + self.draft_gpu_split = unwrap(draft_args.get("draft_gpu_split"), []) + self.draft_model_dir = draft_model_path + self.draft_config = Config.from_directory(str(draft_model_path.resolve())) + self.draft_model = Model.from_config(self.draft_config) + logger.info( + f'Using draft model: {str(draft_model_path.resolve())}' + ) + else: + self.draft_model = None + self.craft_cache = None + # Turn off GPU split if the user is using 1 GPU gpu_count = torch.cuda.device_count() gpu_split_auto = unwrap(kwargs.get("gpu_split_auto"), True) @@ -188,6 +221,10 @@ class ExllamaV3Container(BaseModelContainer): self.cache_size = self.adjust_cache_size(user_cache_size) self.cache = Cache(self.model, max_num_tokens=self.cache_size) + # Draft cache + if self.use_draft_model: + self.draft_cache = Cache(self.draft_model, max_num_tokens = self.cache_size) + # Max batch size self.max_batch_size = unwrap(kwargs.get("max_batch_size"), 256) @@ -211,8 +248,6 @@ class ExllamaV3Container(BaseModelContainer): "template wasn't provided or auto-detected." ) - # TODO: speculative decoding - return self def adjust_cache_size(self, cache_size): @@ -371,9 +406,16 @@ class ExllamaV3Container(BaseModelContainer): async with self.load_condition: self.load_condition.notify_all() - # TODO: Add draft loading @torch.inference_mode() def load_model_sync(self, progress_callback=None): + if self.use_draft_model: + for value in self.draft_model.load_gen( + reserve_per_device=self.autosplit_reserve, + callback=progress_callback, + ): + if value: + yield value + for value in self.model.load_gen( reserve_per_device=self.autosplit_reserve, use_per_device=self.gpu_split, @@ -397,6 +439,8 @@ class ExllamaV3Container(BaseModelContainer): self.generator = AsyncGenerator( model=self.model, cache=self.cache, + draft_model=self.draft_model, + draft_cache=self.draft_cache, tokenizer=self.tokenizer, max_batch_size=self.max_batch_size, max_chunk_size=self.chunk_size, @@ -435,11 +479,16 @@ class ExllamaV3Container(BaseModelContainer): self.model.unload() self.model = None - self.config = None self.cache = None self.tokenizer = None + if self.use_draft_model: + self.draft_model.unload() + self.draft_model = None + self.draft_config = None + self.draft_cache = None + # Cleanup the generator from any pending jobs if self.generator is not None: await self.generator.close() From 036af02bf66448691686c118eba95d3d3a8cfe95 Mon Sep 17 00:00:00 2001 From: turboderp <11859846+turboderp@users.noreply.github.com> Date: Sun, 4 May 2025 05:25:58 +0200 Subject: [PATCH 26/45] Common: No default add_bos_token value for chat completion requests --- endpoints/OAI/types/chat_completion.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/endpoints/OAI/types/chat_completion.py b/endpoints/OAI/types/chat_completion.py index d1209a6..51695c2 100644 --- a/endpoints/OAI/types/chat_completion.py +++ b/endpoints/OAI/types/chat_completion.py @@ -82,10 +82,13 @@ class ChatCompletionRequest(CommonCompletionRequest): tool_call_end: SkipJsonSchema[Optional[str]] = None tool_call_schema: SkipJsonSchema[Optional[dict]] = tool_call_schema + # Chat completions requests do not have a BOS token preference. Backend + # respects the tokenization config for the individual model. + add_bos_token: Optional[bool] = Field(default = None) + @field_validator("add_bos_token", mode="after") def force_bos_token(cls, v): """Always disable add_bos_token with chat completions.""" - return None From 68a660bdb3689460d87430a9f277fde835e5c685 Mon Sep 17 00:00:00 2001 From: DocShotgun <126566557+DocShotgun@users.noreply.github.com> Date: Thu, 1 May 2025 22:55:51 -0700 Subject: [PATCH 27/45] Model: Initial Exl3 cache quantization support --- backends/exllamav2/model.py | 1 + backends/exllamav3/model.py | 32 ++++++++++++++++++++++++++++++-- common/config_models.py | 9 +++++++-- config_sample.yml | 6 ++++-- 4 files changed, 42 insertions(+), 6 deletions(-) diff --git a/backends/exllamav2/model.py b/backends/exllamav2/model.py index 27fe33f..5d79e4f 100644 --- a/backends/exllamav2/model.py +++ b/backends/exllamav2/model.py @@ -185,6 +185,7 @@ class ExllamaV2Container(BaseModelContainer): # MARK: User configuration # Get cache mode + # TODO: Separate validation for Exl2 and Exl3 q-cache options self.cache_mode = unwrap(kwargs.get("cache_mode"), "FP16") # Turn off GPU split if the user is using 1 GPU diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index fc10a3d..18e04e8 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -1,6 +1,7 @@ import asyncio import gc import pathlib +import re import traceback from typing import ( Any, @@ -19,6 +20,7 @@ from exllamav3 import ( Model, Tokenizer, ) +from exllamav3.cache import CacheLayer_fp16, CacheLayer_quant from loguru import logger from backends.base_model_container import BaseModelContainer @@ -73,6 +75,7 @@ class ExllamaV3Container(BaseModelContainer): use_tp: bool = False max_seq_len: int = 4096 cache_size: int = 4096 + cache_mode: str = "FP16" chunk_size: int = 2048 max_batch_size: Optional[int] = None @@ -219,7 +222,32 @@ class ExllamaV3Container(BaseModelContainer): # Cache user_cache_size = unwrap(kwargs.get("cache_size"), self.max_seq_len) self.cache_size = self.adjust_cache_size(user_cache_size) - self.cache = Cache(self.model, max_num_tokens=self.cache_size) + self.cache_mode = unwrap(kwargs.get("cache_mode"), "FP16") + + # Alias Exl2 q-cache settings + match self.cache_mode: + case "Q4": + self.cache_mode = "4,4" + case "Q6": + self.cache_mode = "6,6" + case "Q8": + self.cache_mode = "8,8" + + split_cache_mode = re.search(r"^([2-8]),([2-8])$", self.cache_mode) + if split_cache_mode: + k_bits = int(split_cache_mode.group(1)) + v_bits = int(split_cache_mode.group(2)) + self.cache = Cache( + self.model, + max_num_tokens=self.cache_size, + layer_type=CacheLayer_quant, + k_bits=k_bits, + v_bits=v_bits, + ) + else: + self.cache = Cache( + self.model, max_num_tokens=self.cache_size, layer_type=CacheLayer_fp16 + ) # Draft cache if self.use_draft_model: @@ -314,7 +342,7 @@ class ExllamaV3Container(BaseModelContainer): max_seq_len=self.max_seq_len, cache_size=self.cache_size, max_batch_size=self.max_batch_size, - # cache_mode=self.cache_mode, + cache_mode=self.cache_mode, chunk_size=self.chunk_size, use_vision=self.use_vision, ) diff --git a/common/config_models.py b/common/config_models.py index 2965fe0..8ee4ff7 100644 --- a/common/config_models.py +++ b/common/config_models.py @@ -1,6 +1,7 @@ from pydantic import ( BaseModel, ConfigDict, + constr, Field, PrivateAttr, field_validator, @@ -9,6 +10,7 @@ from typing import List, Literal, Optional, Union CACHE_SIZES = Literal["FP16", "Q8", "Q6", "Q4"] +CACHE_TYPE = Union[CACHE_SIZES, constr(pattern=r"^[2-8],[2-8]$")] class Metadata(BaseModel): @@ -227,11 +229,14 @@ class ModelConfig(BaseConfigModel): "or auto-calculate." ), ) - cache_mode: Optional[CACHE_SIZES] = Field( + # TODO: Separate validation for Exl2 and Exl3 q-cache options + cache_mode: Optional[CACHE_TYPE] = Field( "FP16", description=( "Enable different cache modes for VRAM savings (default: FP16).\n" - f"Possible values: {str(CACHE_SIZES)[15:-1]}." + f"Possible values for exllamav2: {str(CACHE_SIZES)[15:-1]}.\n" + "For exllamav3, specify the pair k_bits,v_bits where k_bits and v_bits " + "are integers from 2-8 (i.e. 8,8)." ), ) cache_size: Optional[int] = Field( diff --git a/config_sample.yml b/config_sample.yml index a743c2c..045db51 100644 --- a/config_sample.yml +++ b/config_sample.yml @@ -114,7 +114,8 @@ model: rope_alpha: # Enable different cache modes for VRAM savings (default: FP16). - # Possible values: 'FP16', 'Q8', 'Q6', 'Q4'. + # Possible values for exllamav2: 'FP16', 'Q8', 'Q6', 'Q4'. + # For exllamav3, specify the pair k_bits,v_bits where k_bits and v_bits are integers from 2-8 (i.e. 8,8). cache_mode: FP16 # Size of the prompt cache to allocate (default: max_seq_len). @@ -164,7 +165,8 @@ draft_model: draft_rope_alpha: # Cache mode for draft models to save VRAM (default: FP16). - # Possible values: 'FP16', 'Q8', 'Q6', 'Q4'. + # Possible values for exllamav2: 'FP16', 'Q8', 'Q6', 'Q4'. + # For exllamav3, specify the pair k_bits,v_bits where k_bits and v_bits are integers from 2-8 (i.e. 8,8). draft_cache_mode: FP16 # An integer array of GBs of VRAM to split between GPUs (default: []). From 58e34ba4c5ca38cfba4bbced01e85c69c5c7c07f Mon Sep 17 00:00:00 2001 From: DocShotgun <126566557+DocShotgun@users.noreply.github.com> Date: Thu, 1 May 2025 23:05:41 -0700 Subject: [PATCH 28/45] Model: Exl3 cache quant settings lenient with whitespace --- backends/exllamav3/model.py | 2 +- common/config_models.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index 18e04e8..330c4e1 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -233,7 +233,7 @@ class ExllamaV3Container(BaseModelContainer): case "Q8": self.cache_mode = "8,8" - split_cache_mode = re.search(r"^([2-8]),([2-8])$", self.cache_mode) + split_cache_mode = re.search(r"^([2-8])\s*,\s*([2-8])$", self.cache_mode) if split_cache_mode: k_bits = int(split_cache_mode.group(1)) v_bits = int(split_cache_mode.group(2)) diff --git a/common/config_models.py b/common/config_models.py index 8ee4ff7..b4a245e 100644 --- a/common/config_models.py +++ b/common/config_models.py @@ -10,7 +10,7 @@ from typing import List, Literal, Optional, Union CACHE_SIZES = Literal["FP16", "Q8", "Q6", "Q4"] -CACHE_TYPE = Union[CACHE_SIZES, constr(pattern=r"^[2-8],[2-8]$")] +CACHE_TYPE = Union[CACHE_SIZES, constr(pattern=r"^[2-8]\s*,\s*[2-8]$")] class Metadata(BaseModel): From a635a719d73d0eb52e29890870a1029568353a78 Mon Sep 17 00:00:00 2001 From: DocShotgun <126566557+DocShotgun@users.noreply.github.com> Date: Sat, 3 May 2025 20:59:36 -0700 Subject: [PATCH 29/45] Model: Enable draft model q-cache in Exl3 * Remove unneeded default fp16 cache layer import --- backends/exllamav3/model.py | 33 ++++++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index 330c4e1..1026faf 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -20,7 +20,7 @@ from exllamav3 import ( Model, Tokenizer, ) -from exllamav3.cache import CacheLayer_fp16, CacheLayer_quant +from exllamav3.cache import CacheLayer_quant from loguru import logger from backends.base_model_container import BaseModelContainer @@ -76,6 +76,7 @@ class ExllamaV3Container(BaseModelContainer): max_seq_len: int = 4096 cache_size: int = 4096 cache_mode: str = "FP16" + draft_cache_mode: str = "FP16" chunk_size: int = 2048 max_batch_size: Optional[int] = None @@ -245,13 +246,35 @@ class ExllamaV3Container(BaseModelContainer): v_bits=v_bits, ) else: - self.cache = Cache( - self.model, max_num_tokens=self.cache_size, layer_type=CacheLayer_fp16 - ) + self.cache = Cache(self.model, max_num_tokens=self.cache_size) # Draft cache if self.use_draft_model: - self.draft_cache = Cache(self.draft_model, max_num_tokens = self.cache_size) + # Set draft cache mode + self.draft_cache_mode = unwrap(draft_args.get("draft_cache_mode"), "FP16") + + # Alias Exl2 q-cache settings + match self.draft_cache_mode: + case "Q4": + self.draft_cache_mode = "4,4" + case "Q6": + self.draft_cache_mode = "6,6" + case "Q8": + self.draft_cache_mode = "8,8" + + split_draft_cache_mode = re.search(r"^([2-8])\s*,\s*([2-8])$", self.draft_cache_mode) + if split_draft_cache_mode: + draft_k_bits = int(split_draft_cache_mode.group(1)) + draft_v_bits = int(split_draft_cache_mode.group(2)) + self.draft_cache = Cache( + self.draft_model, + max_num_tokens=self.cache_size, + layer_type=CacheLayer_quant, + k_bits=draft_k_bits, + v_bits=draft_v_bits, + ) + else: + self.draft_cache = Cache(self.draft_model, max_num_tokens = self.cache_size) # Max batch size self.max_batch_size = unwrap(kwargs.get("max_batch_size"), 256) From 45b966363ec78d5a667a9b38cb571e4c2da6da37 Mon Sep 17 00:00:00 2001 From: DocShotgun <126566557+DocShotgun@users.noreply.github.com> Date: Sat, 3 May 2025 21:01:03 -0700 Subject: [PATCH 30/45] Tree: Format --- backends/exllamav3/model.py | 12 +++++++----- endpoints/OAI/types/chat_completion.py | 2 +- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index 1026faf..61986c1 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -165,9 +165,7 @@ class ExllamaV3Container(BaseModelContainer): self.draft_model_dir = draft_model_path self.draft_config = Config.from_directory(str(draft_model_path.resolve())) self.draft_model = Model.from_config(self.draft_config) - logger.info( - f'Using draft model: {str(draft_model_path.resolve())}' - ) + logger.info(f"Using draft model: {str(draft_model_path.resolve())}") else: self.draft_model = None self.craft_cache = None @@ -262,7 +260,9 @@ class ExllamaV3Container(BaseModelContainer): case "Q8": self.draft_cache_mode = "8,8" - split_draft_cache_mode = re.search(r"^([2-8])\s*,\s*([2-8])$", self.draft_cache_mode) + split_draft_cache_mode = re.search( + r"^([2-8])\s*,\s*([2-8])$", self.draft_cache_mode + ) if split_draft_cache_mode: draft_k_bits = int(split_draft_cache_mode.group(1)) draft_v_bits = int(split_draft_cache_mode.group(2)) @@ -274,7 +274,9 @@ class ExllamaV3Container(BaseModelContainer): v_bits=draft_v_bits, ) else: - self.draft_cache = Cache(self.draft_model, max_num_tokens = self.cache_size) + self.draft_cache = Cache( + self.draft_model, max_num_tokens=self.cache_size + ) # Max batch size self.max_batch_size = unwrap(kwargs.get("max_batch_size"), 256) diff --git a/endpoints/OAI/types/chat_completion.py b/endpoints/OAI/types/chat_completion.py index 51695c2..fb73eb9 100644 --- a/endpoints/OAI/types/chat_completion.py +++ b/endpoints/OAI/types/chat_completion.py @@ -84,7 +84,7 @@ class ChatCompletionRequest(CommonCompletionRequest): # Chat completions requests do not have a BOS token preference. Backend # respects the tokenization config for the individual model. - add_bos_token: Optional[bool] = Field(default = None) + add_bos_token: Optional[bool] = Field(default=None) @field_validator("add_bos_token", mode="after") def force_bos_token(cls, v): From ff383051459ee06b39d854b2607938f9908672f0 Mon Sep 17 00:00:00 2001 From: turboderp <11859846+turboderp@users.noreply.github.com> Date: Mon, 5 May 2025 02:01:16 +0200 Subject: [PATCH 31/45] Common: Fix exception f-string --- common/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/model.py b/common/model.py index 9de86f0..2c7bd65 100644 --- a/common/model.py +++ b/common/model.py @@ -137,7 +137,7 @@ async def load_model_gen(model_path: pathlib.Path, **kwargs): else: raise ValueError( f"Invalid backend '{backend_name}'. " - "Available backends: {available_backends}" + f"Available backends: {available_backends}" ) new_container: BaseModelContainer = await container_class.create( From b683545d0e4a9b757fdb2bc74e59f2077681bfd2 Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Mon, 5 May 2025 21:52:30 -0400 Subject: [PATCH 32/45] Config: Fix argparse help Adding a comma in the description converts the string to a tuple, which isn't parseable by argparse's help. Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- common/config_models.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/common/config_models.py b/common/config_models.py index 2965fe0..9d88eb1 100644 --- a/common/config_models.py +++ b/common/config_models.py @@ -163,13 +163,11 @@ class ModelConfig(BaseConfigModel): "Example: ['max_seq_len', 'cache_mode']." ), ) - - # Defaults to exllamav2 in common/model.py backend: Optional[str] = Field( None, description=( "Backend to use for this model (default: exllamav2)\n" - "Options: exllamav2, exllamav3", + "Options: exllamav2, exllamav3" ), ) max_seq_len: Optional[int] = Field( From bc0a84241a1819d3b0f87b343295df69b602b0cf Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Mon, 5 May 2025 22:11:21 -0400 Subject: [PATCH 33/45] API: Patch kobold generation call Calling the model requires different args now. Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- endpoints/Kobold/utils/generation.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/endpoints/Kobold/utils/generation.py b/endpoints/Kobold/utils/generation.py index 39ddb9f..0d345d8 100644 --- a/endpoints/Kobold/utils/generation.py +++ b/endpoints/Kobold/utils/generation.py @@ -53,8 +53,12 @@ async def _stream_collector(data: GenerateRequest, request: Request): logger.info(f"Received Kobold generation request {data.genkey}") generator = model.container.stream_generate( - request_id=data.genkey, abort_event=abort_event, **data.model_dump() + request_id=data.genkey, + prompt=data.prompt, + params=data, + abort_event=abort_event, ) + async for generation in generator: if disconnect_task.done(): abort_event.set() From 9dcde59c57e2afa7d7c19afb60f51ec25f3ccf36 Mon Sep 17 00:00:00 2001 From: DocShotgun <126566557+DocShotgun@users.noreply.github.com> Date: Tue, 6 May 2025 01:18:15 -0700 Subject: [PATCH 34/45] Model: Check for unsupported cache mode in exllamav2 --- backends/exllamav2/model.py | 19 ++++++++++++++++++- common/config_models.py | 1 - 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/backends/exllamav2/model.py b/backends/exllamav2/model.py index 5d79e4f..22200ec 100644 --- a/backends/exllamav2/model.py +++ b/backends/exllamav2/model.py @@ -185,9 +185,17 @@ class ExllamaV2Container(BaseModelContainer): # MARK: User configuration # Get cache mode - # TODO: Separate validation for Exl2 and Exl3 q-cache options self.cache_mode = unwrap(kwargs.get("cache_mode"), "FP16") + # Catch exllamav3 cache_mode + if not self.cache_mode.startswith("Q"): + logger.warning( + f"Provided cache mode '{self.cache_mode}' is not a " + "valid choice for exllamav2, please check your settings. " + "Defaulting to FP16." + ) + self.cache_mode = "FP16" + # Turn off GPU split if the user is using 1 GPU gpu_count = torch.cuda.device_count() gpu_split_auto = unwrap(kwargs.get("gpu_split_auto"), True) @@ -393,6 +401,15 @@ class ExllamaV2Container(BaseModelContainer): # Set draft cache mode self.draft_cache_mode = unwrap(draft_args.get("draft_cache_mode"), "FP16") + # Catch exllamav3 draft_cache_mode + if not self.draft_cache_mode.startswith("Q"): + logger.warning( + f"Provided draft cache mode '{self.draft_cache_mode}' is not a " + "valid choice for exllamav2, please check your settings. " + "Defaulting to FP16." + ) + self.draft_cache_mode = "FP16" + # Edit the draft config size if chunk_size: self.draft_config.max_input_len = chunk_size diff --git a/common/config_models.py b/common/config_models.py index b4a245e..e228552 100644 --- a/common/config_models.py +++ b/common/config_models.py @@ -229,7 +229,6 @@ class ModelConfig(BaseConfigModel): "or auto-calculate." ), ) - # TODO: Separate validation for Exl2 and Exl3 q-cache options cache_mode: Optional[CACHE_TYPE] = Field( "FP16", description=( From f8070e77077f360631179c896808e6fb4658297f Mon Sep 17 00:00:00 2001 From: DocShotgun <126566557+DocShotgun@users.noreply.github.com> Date: Tue, 6 May 2025 18:51:58 -0700 Subject: [PATCH 35/45] Model: Auto detect model backend from config * Use exllamav3 for exl3 models, exllamav2 otherwise --- backends/exllamav3/model.py | 6 ++---- common/config_models.py | 2 +- common/model.py | 19 ++++++++++++++++++- common/transformers_utils.py | 12 +++++++++++- config_sample.yml | 4 ++-- endpoints/OAI/types/chat_completion.py | 2 +- 6 files changed, 35 insertions(+), 10 deletions(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index fc10a3d..f04c218 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -161,9 +161,7 @@ class ExllamaV3Container(BaseModelContainer): self.draft_model_dir = draft_model_path self.draft_config = Config.from_directory(str(draft_model_path.resolve())) self.draft_model = Model.from_config(self.draft_config) - logger.info( - f'Using draft model: {str(draft_model_path.resolve())}' - ) + logger.info(f"Using draft model: {str(draft_model_path.resolve())}") else: self.draft_model = None self.craft_cache = None @@ -223,7 +221,7 @@ class ExllamaV3Container(BaseModelContainer): # Draft cache if self.use_draft_model: - self.draft_cache = Cache(self.draft_model, max_num_tokens = self.cache_size) + self.draft_cache = Cache(self.draft_model, max_num_tokens=self.cache_size) # Max batch size self.max_batch_size = unwrap(kwargs.get("max_batch_size"), 256) diff --git a/common/config_models.py b/common/config_models.py index 9d88eb1..de1f803 100644 --- a/common/config_models.py +++ b/common/config_models.py @@ -166,7 +166,7 @@ class ModelConfig(BaseConfigModel): backend: Optional[str] = Field( None, description=( - "Backend to use for this model (default: exllamav2)\n" + "Backend to use for this model (auto-detect if not specified)\n" "Options: exllamav2, exllamav3" ), ) diff --git a/common/model.py b/common/model.py index 2c7bd65..161c7dc 100644 --- a/common/model.py +++ b/common/model.py @@ -17,6 +17,7 @@ from common.logger import get_loading_progress_bar from common.networking import handle_request_error from common.tabby_config import config from common.optional_dependencies import dependencies +from common.transformers_utils import HuggingFaceConfig from common.utils import unwrap # Global variables for model container @@ -123,8 +124,24 @@ async def load_model_gen(model_path: pathlib.Path, **kwargs): kwargs = {**config.model_defaults, **kwargs} kwargs = await apply_inline_overrides(model_path, **kwargs) + # Read config.json and detect the quant method + hf_config_path = model_path / "config.json" + if hf_config_path.exists(): + try: + hf_config = await HuggingFaceConfig.from_file(model_path) + except Exception as exc: + raise ValueError( + "Failed to read the model's config.json. " + f"Please check your model directory at {model_path}." + ) from exc + quant_method = hf_config.quant_method() + if quant_method == "exl3": + backend_name = "exllamav3" + else: + backend_name = "exllamav2" + # Create a new container and check if the right dependencies are installed - backend_name = unwrap(kwargs.get("backend"), "exllamav2").lower() + backend_name = unwrap(kwargs.get("backend"), backend_name).lower() container_class = _BACKEND_REGISTRY.get(backend_name) if not container_class: diff --git a/common/transformers_utils.py b/common/transformers_utils.py index 045312c..cd79f00 100644 --- a/common/transformers_utils.py +++ b/common/transformers_utils.py @@ -1,7 +1,7 @@ import aiofiles import json import pathlib -from typing import List, Optional, Union +from typing import Dict, List, Optional, Union from pydantic import BaseModel @@ -42,6 +42,8 @@ class HuggingFaceConfig(BaseModel): Will be expanded as needed. """ + quantization_config: Optional[Dict] = None + @classmethod async def from_file(cls, model_directory: pathlib.Path): """Create an instance from a generation config file.""" @@ -54,6 +56,14 @@ class HuggingFaceConfig(BaseModel): hf_config_dict = json.loads(contents) return cls.model_validate(hf_config_dict) + def quant_method(self): + """Wrapper method to fetch quant type""" + + if isinstance(self.quantization_config, Dict): + return self.quantization_config.get("quant_method") + else: + return None + class TokenizerConfig(BaseModel): """ diff --git a/config_sample.yml b/config_sample.yml index a743c2c..025d2ee 100644 --- a/config_sample.yml +++ b/config_sample.yml @@ -74,9 +74,9 @@ model: # Example: ['max_seq_len', 'cache_mode']. use_as_default: [] - # Backend to use for the model (default: exllamav2) + # Backend to use for this model (auto-detect if not specified) # Options: exllamav2, exllamav3 - backend: exllamav2 + backend: # Max sequence length (default: Empty). # Fetched from the model's base sequence length in config.json by default. diff --git a/endpoints/OAI/types/chat_completion.py b/endpoints/OAI/types/chat_completion.py index 51695c2..fb73eb9 100644 --- a/endpoints/OAI/types/chat_completion.py +++ b/endpoints/OAI/types/chat_completion.py @@ -84,7 +84,7 @@ class ChatCompletionRequest(CommonCompletionRequest): # Chat completions requests do not have a BOS token preference. Backend # respects the tokenization config for the individual model. - add_bos_token: Optional[bool] = Field(default = None) + add_bos_token: Optional[bool] = Field(default=None) @field_validator("add_bos_token", mode="after") def force_bos_token(cls, v): From 638eef401acf02e1460e1bb2c01b277089b22378 Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Thu, 8 May 2025 23:10:03 -0400 Subject: [PATCH 36/45] Model: Move cache creation to a common function Prevents repetitiveness while also creating a Cache class. Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- backends/exllamav3/model.py | 81 ++++++++++++++----------------------- 1 file changed, 31 insertions(+), 50 deletions(-) diff --git a/backends/exllamav3/model.py b/backends/exllamav3/model.py index 61986c1..c386a6e 100644 --- a/backends/exllamav3/model.py +++ b/backends/exllamav3/model.py @@ -168,7 +168,7 @@ class ExllamaV3Container(BaseModelContainer): logger.info(f"Using draft model: {str(draft_model_path.resolve())}") else: self.draft_model = None - self.craft_cache = None + self.draft_cache = None # Turn off GPU split if the user is using 1 GPU gpu_count = torch.cuda.device_count() @@ -222,61 +222,15 @@ class ExllamaV3Container(BaseModelContainer): user_cache_size = unwrap(kwargs.get("cache_size"), self.max_seq_len) self.cache_size = self.adjust_cache_size(user_cache_size) self.cache_mode = unwrap(kwargs.get("cache_mode"), "FP16") - - # Alias Exl2 q-cache settings - match self.cache_mode: - case "Q4": - self.cache_mode = "4,4" - case "Q6": - self.cache_mode = "6,6" - case "Q8": - self.cache_mode = "8,8" - - split_cache_mode = re.search(r"^([2-8])\s*,\s*([2-8])$", self.cache_mode) - if split_cache_mode: - k_bits = int(split_cache_mode.group(1)) - v_bits = int(split_cache_mode.group(2)) - self.cache = Cache( - self.model, - max_num_tokens=self.cache_size, - layer_type=CacheLayer_quant, - k_bits=k_bits, - v_bits=v_bits, - ) - else: - self.cache = Cache(self.model, max_num_tokens=self.cache_size) + self.cache = self.create_cache(self.cache_mode, self.model) # Draft cache if self.use_draft_model: # Set draft cache mode self.draft_cache_mode = unwrap(draft_args.get("draft_cache_mode"), "FP16") - - # Alias Exl2 q-cache settings - match self.draft_cache_mode: - case "Q4": - self.draft_cache_mode = "4,4" - case "Q6": - self.draft_cache_mode = "6,6" - case "Q8": - self.draft_cache_mode = "8,8" - - split_draft_cache_mode = re.search( - r"^([2-8])\s*,\s*([2-8])$", self.draft_cache_mode + self.draft_cache = self.create_cache( + self.draft_cache_mode, self.draft_model ) - if split_draft_cache_mode: - draft_k_bits = int(split_draft_cache_mode.group(1)) - draft_v_bits = int(split_draft_cache_mode.group(2)) - self.draft_cache = Cache( - self.draft_model, - max_num_tokens=self.cache_size, - layer_type=CacheLayer_quant, - k_bits=draft_k_bits, - v_bits=draft_v_bits, - ) - else: - self.draft_cache = Cache( - self.draft_model, max_num_tokens=self.cache_size - ) # Max batch size self.max_batch_size = unwrap(kwargs.get("max_batch_size"), 256) @@ -355,6 +309,33 @@ class ExllamaV3Container(BaseModelContainer): return chunk_size + def create_cache(self, raw_cache_mode: str, model: Model): + # Cast exl2 types to exl3 + match raw_cache_mode: + case "Q4": + raw_cache_mode = "4,4" + case "Q6": + raw_cache_mode = "6,6" + case "Q8": + raw_cache_mode = "8,8" + + split_cache_mode = re.search(r"^([2-8])\s*,\s*([2-8])$", raw_cache_mode) + + if split_cache_mode: + draft_k_bits = int(split_cache_mode.group(1)) + draft_v_bits = int(split_cache_mode.group(2)) + cache = Cache( + model, + max_num_tokens=self.cache_size, + layer_type=CacheLayer_quant, + k_bits=draft_k_bits, + v_bits=draft_v_bits, + ) + else: + cache = Cache(model, max_num_tokens=self.cache_size) + + return cache + def model_info(self) -> ModelCard: """ Returns a dictionary of the current model's configuration parameters. From cfee16905bf63d7a6fa2bb33f12c4e878ab251d6 Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Thu, 8 May 2025 23:42:39 -0400 Subject: [PATCH 37/45] Model: Migrate backend detection to a separate function Seemed out of place in the common load function. In addition, rename the transformers utils signature which actually takes a directory instead of a file. Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- common/model.py | 39 ++++++++++++++++++++---------------- common/transformers_utils.py | 2 +- 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/common/model.py b/common/model.py index 161c7dc..67af212 100644 --- a/common/model.py +++ b/common/model.py @@ -57,6 +57,24 @@ def load_progress(module, modules): yield module, modules +async def detect_backend(model_path: pathlib.Path) -> str: + """Determine the appropriate backend based on model files and configuration.""" + + try: + hf_config = await HuggingFaceConfig.from_directory(model_path) + quant_method = hf_config.quant_method() + + if quant_method == "exl3": + return "exllamav3" + else: + return "exllamav2" + except Exception as exc: + raise ValueError( + "Failed to read the model's config.json. " + f"Please check your model directory at {model_path}." + ) from exc + + async def apply_inline_overrides(model_dir: pathlib.Path, **kwargs): """Sets overrides from a model folder's config yaml.""" @@ -124,24 +142,11 @@ async def load_model_gen(model_path: pathlib.Path, **kwargs): kwargs = {**config.model_defaults, **kwargs} kwargs = await apply_inline_overrides(model_path, **kwargs) - # Read config.json and detect the quant method - hf_config_path = model_path / "config.json" - if hf_config_path.exists(): - try: - hf_config = await HuggingFaceConfig.from_file(model_path) - except Exception as exc: - raise ValueError( - "Failed to read the model's config.json. " - f"Please check your model directory at {model_path}." - ) from exc - quant_method = hf_config.quant_method() - if quant_method == "exl3": - backend_name = "exllamav3" - else: - backend_name = "exllamav2" - # Create a new container and check if the right dependencies are installed - backend_name = unwrap(kwargs.get("backend"), backend_name).lower() + backend_name = unwrap( + kwargs.get("backend"), await detect_backend(model_path) + ).lower() + print(backend_name) container_class = _BACKEND_REGISTRY.get(backend_name) if not container_class: diff --git a/common/transformers_utils.py b/common/transformers_utils.py index cd79f00..d1e5ac1 100644 --- a/common/transformers_utils.py +++ b/common/transformers_utils.py @@ -45,7 +45,7 @@ class HuggingFaceConfig(BaseModel): quantization_config: Optional[Dict] = None @classmethod - async def from_file(cls, model_directory: pathlib.Path): + async def from_directory(cls, model_directory: pathlib.Path): """Create an instance from a generation config file.""" hf_config_path = model_directory / "config.json" From d5963007f026c959d5d3a8ecc86d55b02b4d07b4 Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Thu, 8 May 2025 23:45:04 -0400 Subject: [PATCH 38/45] Model: Add backend print Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- common/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/model.py b/common/model.py index 67af212..9cdfdeb 100644 --- a/common/model.py +++ b/common/model.py @@ -146,7 +146,6 @@ async def load_model_gen(model_path: pathlib.Path, **kwargs): backend_name = unwrap( kwargs.get("backend"), await detect_backend(model_path) ).lower() - print(backend_name) container_class = _BACKEND_REGISTRY.get(backend_name) if not container_class: @@ -162,6 +161,7 @@ async def load_model_gen(model_path: pathlib.Path, **kwargs): f"Available backends: {available_backends}" ) + logger.info(f"Using backend {backend_name}") new_container: BaseModelContainer = await container_class.create( model_path.resolve(), **kwargs ) From 33ac016023e1c3b076ebff1f1edc3d730f7c26cc Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Fri, 9 May 2025 21:42:07 -0400 Subject: [PATCH 39/45] Dependencies: Add ExllamaV3 v0.0.1 Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- pyproject.toml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index b16a22c..cee9673 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -77,6 +77,16 @@ cu121 = [ "exllamav2 @ https://github.com/turboderp-org/exllamav2/releases/download/v0.2.9/exllamav2-0.2.9+cu128.torch2.7.0-cp311-cp311-linux_x86_64.whl ; platform_system == 'Linux' and platform_machine == 'x86_64' and python_version == '3.11'", "exllamav2 @ https://github.com/turboderp-org/exllamav2/releases/download/v0.2.9/exllamav2-0.2.9+cu128.torch2.7.0-cp310-cp310-linux_x86_64.whl ; platform_system == 'Linux' and platform_machine == 'x86_64' and python_version == '3.10'", + # Exl3 + "exllamav3 @ https://github.com/turboderp-org/exllamav3/releases/download/v0.0.1/exllamav3-0.0.1+cu128.torch2.7.0-cp313-cp313-win_amd64.whl ; platform_system == 'Windows' and python_version == '3.13'", + "exllamav3 @ https://github.com/turboderp-org/exllamav3/releases/download/v0.0.1/exllamav3-0.0.1+cu128.torch2.7.0-cp312-cp312-win_amd64.whl ; platform_system == 'Windows' and python_version == '3.12'", + "exllamav3 @ https://github.com/turboderp-org/exllamav3/releases/download/v0.0.1/exllamav3-0.0.1+cu128.torch2.7.0-cp311-cp311-win_amd64.whl ; platform_system == 'Windows' and python_version == '3.11'", + "exllamav3 @ https://github.com/turboderp-org/exllamav3/releases/download/v0.0.1/exllamav3-0.0.1+cu128.torch2.7.0-cp310-cp310-win_amd64.whl ; platform_system == 'Windows' and python_version == '3.10'", + "exllamav3 @ https://github.com/turboderp-org/exllamav3/releases/download/v0.0.1/exllamav3-0.0.1+cu128.torch2.7.0-cp313-cp313-linux_x86_64.whl ; platform_system == 'Linux' and platform_machine == 'x86_64' and python_version == '3.13'", + "exllamav3 @ https://github.com/turboderp-org/exllamav3/releases/download/v0.0.1/exllamav3-0.0.1+cu128.torch2.7.0-cp312-cp312-linux_x86_64.whl ; platform_system == 'Linux' and platform_machine == 'x86_64' and python_version == '3.12'", + "exllamav3 @ https://github.com/turboderp-org/exllamav3/releases/download/v0.0.1/exllamav3-0.0.1+cu128.torch2.7.0-cp311-cp311-linux_x86_64.whl ; platform_system == 'Linux' and platform_machine == 'x86_64' and python_version == '3.11'", + "exllamav3 @ https://github.com/turboderp-org/exllamav3/releases/download/v0.0.1/exllamav3-0.0.1+cu128.torch2.7.0-cp310-cp310-linux_x86_64.whl ; platform_system == 'Linux' and platform_machine == 'x86_64' and python_version == '3.10'", + # Windows FA2 from https://github.com/kingbri1/flash-attention/releases "flash_attn @ https://github.com/kingbri1/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu128torch2.7.0cxx11abiFALSE-cp313-cp313-win_amd64.whl ; platform_system == 'Windows' and python_version == '3.13'", "flash_attn @ https://github.com/kingbri1/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu128torch2.7.0cxx11abiFALSE-cp312-cp312-win_amd64.whl ; platform_system == 'Windows' and python_version == '3.12'", From 48ea1737cfa3b9b092fa49d0335fcf810c06cd41 Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Fri, 9 May 2025 21:59:00 -0400 Subject: [PATCH 40/45] Startup: Check agnostically for inference deps If an inference dep isn't present, force exit the application. This occurs after all subcommands have been appropriately processed. Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- common/optional_dependencies.py | 2 +- main.py | 18 ++++++++++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/common/optional_dependencies.py b/common/optional_dependencies.py index 0c1e7ff..b449c2e 100644 --- a/common/optional_dependencies.py +++ b/common/optional_dependencies.py @@ -26,7 +26,7 @@ class DependenciesModel(BaseModel): @computed_field @property def inference(self) -> bool: - return self.torch and (self.exllamav2 or self.exllamav3) and self.flash_attn + return self.torch and (self.exllamav2 or (self.exllamav3 and self.flash_attn)) def is_installed(package_name: str) -> bool: diff --git a/main.py b/main.py index df4e472..0115421 100644 --- a/main.py +++ b/main.py @@ -15,6 +15,7 @@ from common.auth import load_auth_keys from common.actions import run_subcommand from common.logger import setup_logger from common.networking import is_port_in_use +from common.optional_dependencies import dependencies from common.signals import signal_handler from common.tabby_config import config from endpoints.server import start_api @@ -139,8 +140,21 @@ def entrypoint( "UNSAFE: Skipping ExllamaV2 version check.\n" "If you aren't a developer, please keep this off!" ) - else: - check_exllama_version() + elif not dependencies.inference: + install_message = ( + f"ERROR: Inference dependencies for TabbyAPI are not installed.\n" + "Please update your environment by running an update script " + "(update_scripts/" + f"update_deps.{'bat' if platform.system() == 'Windows' else 'sh'})\n\n" + "Or you can manually run a requirements update " + "using the following command:\n\n" + "For CUDA 12.1:\n" + "pip install --upgrade .[cu121]\n\n" + "For ROCm:\n" + "pip install --upgrade .[amd]\n\n" + ) + + raise SystemExit(install_message) # Enable CUDA malloc backend if config.developer.cuda_malloc_backend: From 25c77ebf7792bdd65fcfa2ed96cd19e5d8ff4759 Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Fri, 9 May 2025 22:08:15 -0400 Subject: [PATCH 41/45] Model: Remove exllamav2-specific version check No longer necessary thanks to the agnostic check. Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- backends/exllamav2/version.py | 37 ----------------------------------- 1 file changed, 37 deletions(-) delete mode 100644 backends/exllamav2/version.py diff --git a/backends/exllamav2/version.py b/backends/exllamav2/version.py deleted file mode 100644 index 08d0bda..0000000 --- a/backends/exllamav2/version.py +++ /dev/null @@ -1,37 +0,0 @@ -import platform -from packaging import version -from importlib.metadata import version as package_version -from loguru import logger -from common.optional_dependencies import dependencies - - -def check_exllama_version(): - """Verifies the exllama version""" - - install_message = ( - "Please update your environment by running an update script " - "(update_scripts/" - f"update_deps.{'bat' if platform.system() == 'Windows' else 'sh'})\n\n" - "Or you can manually run a requirements update " - "using the following command:\n\n" - "For CUDA 12.1:\n" - "pip install --upgrade .[cu121]\n\n" - "For ROCm:\n" - "pip install --upgrade .[amd]\n\n" - ) - - if not dependencies.exllamav2: - raise SystemExit(("Exllamav2 is not installed.\n" + install_message)) - - required_version = version.parse("0.2.8") - current_version = version.parse(package_version("exllamav2").split("+")[0]) - - unsupported_message = ( - f"ERROR: TabbyAPI requires ExLlamaV2 {required_version} " - f"or greater. Your current version is {current_version}.\n" + install_message - ) - - if current_version < required_version: - raise SystemExit(unsupported_message) - else: - logger.info(f"ExllamaV2 version: {current_version}") From 42346c6b39077aeadd29147f7b783418d409127e Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Fri, 9 May 2025 22:11:05 -0400 Subject: [PATCH 42/45] Sampling: Remove skip_special_tokens This parameter is way too confusing and does not make sense in the modern LLM space. Change approved by all maintainers. Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- backends/exllamav2/model.py | 1 - common/sampling.py | 5 ----- sampler_overrides/sample_preset.yml | 3 --- 3 files changed, 9 deletions(-) diff --git a/backends/exllamav2/model.py b/backends/exllamav2/model.py index 22200ec..c2a084b 100644 --- a/backends/exllamav2/model.py +++ b/backends/exllamav2/model.py @@ -1373,7 +1373,6 @@ class ExllamaV2Container(BaseModelContainer): min_new_tokens=params.min_tokens, gen_settings=gen_settings, stop_conditions=stop_conditions, - decode_special_tokens=not params.skip_special_tokens, filters=grammar_handler.filters, filter_prefer_eos=bool(grammar_handler.filters), return_probs=params.logprobs > 0, diff --git a/common/sampling.py b/common/sampling.py index fc9f9bc..7e1d810 100644 --- a/common/sampling.py +++ b/common/sampling.py @@ -215,11 +215,6 @@ class BaseSamplerRequest(BaseModel): examples=[False], ) - skip_special_tokens: Optional[bool] = Field( - default_factory=lambda: get_default_sampler_value("skip_special_tokens", True), - examples=[True], - ) - logit_bias: Optional[Dict[int, float]] = Field( default_factory=lambda: get_default_sampler_value("logit_bias"), examples=[{"1": 10, "2": 50}], diff --git a/sampler_overrides/sample_preset.yml b/sampler_overrides/sample_preset.yml index 9225976..907bbdd 100644 --- a/sampler_overrides/sample_preset.yml +++ b/sampler_overrides/sample_preset.yml @@ -136,9 +136,6 @@ add_bos_token: ban_eos_token: override: false force: false -skip_special_tokens: - override: true - force: false logit_bias: override: force: false From 83826b56bee25e3a7db3fddff6aeb0e8062d6d4f Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Fri, 9 May 2025 22:14:11 -0400 Subject: [PATCH 43/45] Main: Remove unnecessary import Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- main.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/main.py b/main.py index 0115421..2159508 100644 --- a/main.py +++ b/main.py @@ -20,8 +20,6 @@ from common.signals import signal_handler from common.tabby_config import config from endpoints.server import start_api -from backends.exllamav2.version import check_exllama_version - async def entrypoint_async(): """Async entry function for program startup""" From 656af41b5d24520f37ceec5147ba36c3ae469f01 Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Fri, 9 May 2025 22:25:50 -0400 Subject: [PATCH 44/45] Model: Always enable decode_special_tokens The frontend should handle the special tokens if they get emitted. Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- backends/exllamav2/model.py | 1 + 1 file changed, 1 insertion(+) diff --git a/backends/exllamav2/model.py b/backends/exllamav2/model.py index c2a084b..65689f4 100644 --- a/backends/exllamav2/model.py +++ b/backends/exllamav2/model.py @@ -1373,6 +1373,7 @@ class ExllamaV2Container(BaseModelContainer): min_new_tokens=params.min_tokens, gen_settings=gen_settings, stop_conditions=stop_conditions, + decode_special_tokens=True, filters=grammar_handler.filters, filter_prefer_eos=bool(grammar_handler.filters), return_probs=params.logprobs > 0, From 6379081dd83eef22db5966ae224c61753e365af3 Mon Sep 17 00:00:00 2001 From: kingbri <8082010+kingbri1@users.noreply.github.com> Date: Sat, 10 May 2025 19:07:35 -0400 Subject: [PATCH 45/45] Sampling: Make add_bos_token override concise Also set the default to None so text completions follows the same pattern. Signed-off-by: kingbri <8082010+kingbri1@users.noreply.github.com> --- common/sampling.py | 2 +- endpoints/OAI/types/chat_completion.py | 2 +- sampler_overrides/sample_preset.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/common/sampling.py b/common/sampling.py index 7e1d810..49be5b9 100644 --- a/common/sampling.py +++ b/common/sampling.py @@ -205,7 +205,7 @@ class BaseSamplerRequest(BaseModel): ) add_bos_token: Optional[bool] = Field( - default_factory=lambda: get_default_sampler_value("add_bos_token", True) + default_factory=lambda: get_default_sampler_value("add_bos_token") ) ban_eos_token: Optional[bool] = Field( diff --git a/endpoints/OAI/types/chat_completion.py b/endpoints/OAI/types/chat_completion.py index fb73eb9..36934a8 100644 --- a/endpoints/OAI/types/chat_completion.py +++ b/endpoints/OAI/types/chat_completion.py @@ -84,7 +84,7 @@ class ChatCompletionRequest(CommonCompletionRequest): # Chat completions requests do not have a BOS token preference. Backend # respects the tokenization config for the individual model. - add_bos_token: Optional[bool] = Field(default=None) + add_bos_token: Optional[bool] = None @field_validator("add_bos_token", mode="after") def force_bos_token(cls, v): diff --git a/sampler_overrides/sample_preset.yml b/sampler_overrides/sample_preset.yml index 907bbdd..0a2d05c 100644 --- a/sampler_overrides/sample_preset.yml +++ b/sampler_overrides/sample_preset.yml @@ -131,7 +131,7 @@ mirostat_eta: # MARK: Token options add_bos_token: - override: true + override: force: false ban_eos_token: override: false