OAI: Add cache_mode parameter to model
Mistakenly forgot that the user can choose what cache mode to use when loading a model. Also add when fetching model info. Signed-off-by: kingbri <bdashore3@proton.me>
This commit is contained in:
parent
ed868fd262
commit
1a331afe3a
2 changed files with 3 additions and 0 deletions
|
|
@ -8,6 +8,7 @@ class ModelCardParameters(BaseModel):
|
|||
rope_scale: Optional[float] = 1.0
|
||||
rope_alpha: Optional[float] = 1.0
|
||||
prompt_template: Optional[str] = None
|
||||
cache_mode: Optional[str] = "FP16"
|
||||
draft: Optional['ModelCard'] = None
|
||||
|
||||
class ModelCard(BaseModel):
|
||||
|
|
@ -37,6 +38,7 @@ class ModelLoadRequest(BaseModel):
|
|||
rope_alpha: Optional[float] = 1.0
|
||||
no_flash_attention: Optional[bool] = False
|
||||
# low_mem: Optional[bool] = False
|
||||
cache_mode: Optional[str] = "FP16"
|
||||
prompt_template: Optional[str] = None
|
||||
draft: Optional[DraftModelLoadRequest] = None
|
||||
|
||||
|
|
|
|||
1
main.py
1
main.py
|
|
@ -82,6 +82,7 @@ async def get_current_model():
|
|||
rope_scale = model_container.config.scale_pos_emb,
|
||||
rope_alpha = model_container.config.scale_alpha_value,
|
||||
max_seq_len = model_container.config.max_seq_len,
|
||||
cache_mode = "FP8" if model_container.cache_fp8 else "FP16",
|
||||
prompt_template = unwrap(model_container.prompt_template, "auto")
|
||||
),
|
||||
logging = gen_logging.config
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue