diff --git a/endpoints/OAI/utils/completion.py b/endpoints/OAI/utils/completion.py index df4bf19..c8b02c8 100644 --- a/endpoints/OAI/utils/completion.py +++ b/endpoints/OAI/utils/completion.py @@ -130,7 +130,7 @@ async def load_inline_model(model_name: str, request: Request): raise HTTPException(401, error_message) - if not unwrap(config.model.get("inline_model_loading"), False): + if not config.model.inline_model_loading: logger.warning( f"Unable to switch model to {model_name} because " '"inline_model_loading" is not True in config.yml.' @@ -138,7 +138,7 @@ async def load_inline_model(model_name: str, request: Request): return - model_path = pathlib.Path(unwrap(config.model.get("model_dir"), "models")) + model_path = pathlib.Path(config.model.model_dir) model_path = model_path / model_name # Model path doesn't exist diff --git a/endpoints/core/types/model.py b/endpoints/core/types/model.py index dc5da0d..eb2d431 100644 --- a/endpoints/core/types/model.py +++ b/endpoints/core/types/model.py @@ -118,11 +118,7 @@ class EmbeddingModelLoadRequest(BaseModel): name: str # Set default from the config - embeddings_device: Optional[str] = Field( - default_factory=lambda: unwrap( - config.embeddings.get("embeddings_device"), "cpu" - ) - ) + embeddings_device: Optional[str] = Field(config.embeddings.embeddings_device) class ModelLoadResponse(BaseModel):