API: Split into separate folder

Moving the API into its own directory helps compartmentalize it
and allows for cleaning up the main file to just contain bootstrapping
and the entry point.

Signed-off-by: kingbri <bdashore3@proton.me>
This commit is contained in:
kingbri 2024-03-11 22:45:30 -04:00 committed by Brian Dashore
parent 5a2de30066
commit 104a6121cb
13 changed files with 635 additions and 621 deletions

620
endpoints/OAI/app.py Normal file
View file

@ -0,0 +1,620 @@
import pathlib
from sse_starlette import EventSourceResponse
import uvicorn
from asyncio import CancelledError
from uuid import uuid4
from jinja2 import TemplateError
from fastapi import FastAPI, Depends, HTTPException, Request
from fastapi.concurrency import run_in_threadpool
from fastapi.middleware.cors import CORSMiddleware
from functools import partial
from loguru import logger
from common import config, model, gen_logging, sampling
from common.auth import check_admin_key, check_api_key
from common.generators import (
call_with_semaphore,
generate_with_semaphore,
release_semaphore,
)
from common.logger import UVICORN_LOG_CONFIG
from common.templating import (
get_all_templates,
get_prompt_from_template,
get_template_from_file,
)
from common.utils import (
get_generator_error,
handle_request_error,
unwrap,
)
from endpoints.OAI.types.completion import CompletionRequest
from endpoints.OAI.types.chat_completion import ChatCompletionRequest
from endpoints.OAI.types.lora import (
LoraCard,
LoraList,
LoraLoadRequest,
LoraLoadResponse,
)
from endpoints.OAI.types.model import (
ModelCard,
ModelLoadRequest,
ModelLoadResponse,
ModelCardParameters,
)
from endpoints.OAI.types.sampler_overrides import SamplerOverrideSwitchRequest
from endpoints.OAI.types.template import TemplateList, TemplateSwitchRequest
from endpoints.OAI.types.token import (
TokenEncodeRequest,
TokenEncodeResponse,
TokenDecodeRequest,
TokenDecodeResponse,
)
from endpoints.OAI.utils.completion import (
create_completion_response,
create_chat_completion_response,
create_chat_completion_stream_chunk,
)
from endpoints.OAI.utils.model import get_model_list
from endpoints.OAI.utils.lora import get_lora_list
app = FastAPI(
title="TabbyAPI",
summary="An OAI compatible exllamav2 API that's both lightweight and fast",
description=(
"This docs page is not meant to send requests! Please use a service "
"like Postman or a frontend UI."
),
)
# ALlow CORS requests
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
async def check_model_container():
"""FastAPI depends that checks if a model isn't loaded or currently loading."""
if model.container is None or not (
model.container.model_is_loading or model.container.model_loaded
):
error_message = handle_request_error(
"No models are currently loaded.",
exc_info=False,
).error.message
raise HTTPException(400, error_message)
# Model list endpoint
@app.get("/v1/models", dependencies=[Depends(check_api_key)])
@app.get("/v1/model/list", dependencies=[Depends(check_api_key)])
async def list_models():
"""Lists all models in the model directory."""
model_config = config.model_config()
model_dir = unwrap(model_config.get("model_dir"), "models")
model_path = pathlib.Path(model_dir)
draft_model_dir = config.draft_model_config().get("draft_model_dir")
models = get_model_list(model_path.resolve(), draft_model_dir)
if unwrap(model_config.get("use_dummy_models"), False):
models.data.insert(0, ModelCard(id="gpt-3.5-turbo"))
return models
# Currently loaded model endpoint
@app.get(
"/v1/model",
dependencies=[Depends(check_api_key), Depends(check_model_container)],
)
async def get_current_model():
"""Returns the currently loaded model."""
model_params = model.container.get_model_parameters()
draft_model_params = model_params.pop("draft", {})
if draft_model_params:
model_params["draft"] = ModelCard(
id=unwrap(draft_model_params.get("name"), "unknown"),
parameters=ModelCardParameters.model_validate(draft_model_params),
)
else:
draft_model_params = None
model_card = ModelCard(
id=unwrap(model_params.pop("name", None), "unknown"),
parameters=ModelCardParameters.model_validate(model_params),
logging=gen_logging.PREFERENCES,
)
if draft_model_params:
draft_card = ModelCard(
id=unwrap(draft_model_params.pop("name", None), "unknown"),
parameters=ModelCardParameters.model_validate(draft_model_params),
)
model_card.parameters.draft = draft_card
return model_card
@app.get("/v1/model/draft/list", dependencies=[Depends(check_api_key)])
async def list_draft_models():
"""Lists all draft models in the model directory."""
draft_model_dir = unwrap(
config.draft_model_config().get("draft_model_dir"), "models"
)
draft_model_path = pathlib.Path(draft_model_dir)
models = get_model_list(draft_model_path.resolve())
return models
# Load model endpoint
@app.post("/v1/model/load", dependencies=[Depends(check_admin_key)])
async def load_model(request: Request, data: ModelLoadRequest):
"""Loads a model into the model container."""
# Verify request parameters
if not data.name:
raise HTTPException(400, "A model name was not provided.")
model_path = pathlib.Path(unwrap(config.model_config().get("model_dir"), "models"))
model_path = model_path / data.name
load_data = data.model_dump()
if data.draft:
if not data.draft.draft_model_name:
raise HTTPException(
400, "draft_model_name was not found inside the draft object."
)
load_data["draft"]["draft_model_dir"] = unwrap(
config.draft_model_config().get("draft_model_dir"), "models"
)
if not model_path.exists():
raise HTTPException(400, "model_path does not exist. Check model_name?")
async def generator():
"""Request generation wrapper for the loading process."""
load_status = model.load_model_gen(model_path, **load_data)
try:
async for module, modules, model_type in load_status:
if await request.is_disconnected():
release_semaphore()
logger.error(
"Model load cancelled by user. "
"Please make sure to run unload to free up resources."
)
return
if module != 0:
response = ModelLoadResponse(
model_type=model_type,
module=module,
modules=modules,
status="processing",
)
yield response.model_dump_json()
if module == modules:
response = ModelLoadResponse(
model_type=model_type,
module=module,
modules=modules,
status="finished",
)
yield response.model_dump_json()
except CancelledError:
logger.error(
"Model load cancelled by user. "
"Please make sure to run unload to free up resources."
)
except Exception as exc:
yield get_generator_error(str(exc))
# Determine whether to use or skip the queue
if data.skip_queue:
logger.warning(
"Model load request is skipping the completions queue. "
"Unexpected results may occur."
)
generator_callback = generator
else:
generator_callback = partial(generate_with_semaphore, generator)
return EventSourceResponse(generator_callback())
# Unload model endpoint
@app.post(
"/v1/model/unload",
dependencies=[Depends(check_admin_key), Depends(check_model_container)],
)
async def unload_model():
"""Unloads the currently loaded model."""
await model.unload_model()
@app.get("/v1/templates", dependencies=[Depends(check_api_key)])
@app.get("/v1/template/list", dependencies=[Depends(check_api_key)])
async def get_templates():
templates = get_all_templates()
template_strings = list(map(lambda template: template.stem, templates))
return TemplateList(data=template_strings)
@app.post(
"/v1/template/switch",
dependencies=[Depends(check_admin_key), Depends(check_model_container)],
)
async def switch_template(data: TemplateSwitchRequest):
"""Switch the currently loaded template"""
if not data.name:
raise HTTPException(400, "New template name not found.")
try:
template = get_template_from_file(data.name)
model.container.prompt_template = template
except FileNotFoundError as e:
raise HTTPException(400, "Template does not exist. Check the name?") from e
@app.post(
"/v1/template/unload",
dependencies=[Depends(check_admin_key), Depends(check_model_container)],
)
async def unload_template():
"""Unloads the currently selected template"""
model.container.prompt_template = None
# Sampler override endpoints
@app.get("/v1/sampling/overrides", dependencies=[Depends(check_api_key)])
@app.get("/v1/sampling/override/list", dependencies=[Depends(check_api_key)])
async def list_sampler_overrides():
"""API wrapper to list all currently applied sampler overrides"""
return sampling.overrides
@app.post(
"/v1/sampling/override/switch",
dependencies=[Depends(check_admin_key)],
)
async def switch_sampler_override(data: SamplerOverrideSwitchRequest):
"""Switch the currently loaded override preset"""
if data.preset:
try:
sampling.overrides_from_file(data.preset)
except FileNotFoundError as e:
raise HTTPException(
400, "Sampler override preset does not exist. Check the name?"
) from e
elif data.overrides:
sampling.overrides_from_dict(data.overrides)
else:
raise HTTPException(
400, "A sampler override preset or dictionary wasn't provided."
)
@app.post(
"/v1/sampling/override/unload",
dependencies=[Depends(check_admin_key)],
)
async def unload_sampler_override():
"""Unloads the currently selected override preset"""
sampling.overrides_from_dict({})
# Lora list endpoint
@app.get("/v1/loras", dependencies=[Depends(check_api_key)])
@app.get("/v1/lora/list", dependencies=[Depends(check_api_key)])
async def get_all_loras():
"""Lists all LoRAs in the lora directory."""
lora_path = pathlib.Path(unwrap(config.lora_config().get("lora_dir"), "loras"))
loras = get_lora_list(lora_path.resolve())
return loras
# Currently loaded loras endpoint
@app.get(
"/v1/lora",
dependencies=[Depends(check_api_key), Depends(check_model_container)],
)
async def get_active_loras():
"""Returns the currently loaded loras."""
active_loras = LoraList(
data=list(
map(
lambda lora: LoraCard(
id=pathlib.Path(lora.lora_path).parent.name,
scaling=lora.lora_scaling * lora.lora_r / lora.lora_alpha,
),
model.container.active_loras,
)
)
)
return active_loras
# Load lora endpoint
@app.post(
"/v1/lora/load",
dependencies=[Depends(check_admin_key), Depends(check_model_container)],
)
async def load_lora(data: LoraLoadRequest):
"""Loads a LoRA into the model container."""
if not data.loras:
raise HTTPException(400, "List of loras to load is not found.")
lora_dir = pathlib.Path(unwrap(config.lora_config().get("lora_dir"), "loras"))
if not lora_dir.exists():
raise HTTPException(
400,
"A parent lora directory does not exist. Check your config.yml?",
)
# Clean-up existing loras if present
def load_loras_internal():
if len(model.container.active_loras) > 0:
unload_loras()
result = model.container.load_loras(lora_dir, **data.model_dump())
return LoraLoadResponse(
success=unwrap(result.get("success"), []),
failure=unwrap(result.get("failure"), []),
)
internal_callback = partial(run_in_threadpool, load_loras_internal)
# Determine whether to skip the queue
if data.skip_queue:
logger.warning(
"Lora load request is skipping the completions queue. "
"Unexpected results may occur."
)
return await internal_callback()
else:
return await call_with_semaphore(internal_callback)
# Unload lora endpoint
@app.post(
"/v1/lora/unload",
dependencies=[Depends(check_admin_key), Depends(check_model_container)],
)
async def unload_loras():
"""Unloads the currently loaded loras."""
model.container.unload(loras_only=True)
# Encode tokens endpoint
@app.post(
"/v1/token/encode",
dependencies=[Depends(check_api_key), Depends(check_model_container)],
)
async def encode_tokens(data: TokenEncodeRequest):
"""Encodes a string into tokens."""
raw_tokens = model.container.encode_tokens(data.text, **data.get_params())
tokens = unwrap(raw_tokens, [])
response = TokenEncodeResponse(tokens=tokens, length=len(tokens))
return response
# Decode tokens endpoint
@app.post(
"/v1/token/decode",
dependencies=[Depends(check_api_key), Depends(check_model_container)],
)
async def decode_tokens(data: TokenDecodeRequest):
"""Decodes tokens into a string."""
message = model.container.decode_tokens(data.tokens, **data.get_params())
response = TokenDecodeResponse(text=unwrap(message, ""))
return response
# Completions endpoint
@app.post(
"/v1/completions",
dependencies=[Depends(check_api_key), Depends(check_model_container)],
)
async def generate_completion(request: Request, data: CompletionRequest):
"""Generates a completion from a prompt."""
model_path = model.container.get_model_path()
if isinstance(data.prompt, list):
data.prompt = "\n".join(data.prompt)
disable_request_streaming = unwrap(
config.developer_config().get("disable_request_streaming"), False
)
if data.stream and not disable_request_streaming:
async def generator():
try:
new_generation = model.container.generate_gen(
data.prompt, **data.to_gen_params()
)
for generation in new_generation:
# Get out if the request gets disconnected
if await request.is_disconnected():
release_semaphore()
logger.error("Completion generation cancelled by user.")
return
response = create_completion_response(generation, model_path.name)
yield response.model_dump_json()
# Yield a finish response on successful generation
yield "[DONE]"
except Exception:
yield get_generator_error(
"Completion aborted. Please check the server console."
)
return EventSourceResponse(generate_with_semaphore(generator))
try:
generation = await call_with_semaphore(
partial(
run_in_threadpool,
model.container.generate,
data.prompt,
**data.to_gen_params(),
)
)
response = create_completion_response(generation, model_path.name)
return response
except Exception as exc:
error_message = handle_request_error(
"Completion aborted. Maybe the model was unloaded? "
"Please check the server console."
).error.message
# Server error if there's a generation exception
raise HTTPException(503, error_message) from exc
# Chat completions endpoint
@app.post(
"/v1/chat/completions",
dependencies=[Depends(check_api_key), Depends(check_model_container)],
)
async def generate_chat_completion(request: Request, data: ChatCompletionRequest):
"""Generates a chat completion from a prompt."""
if model.container.prompt_template is None:
raise HTTPException(
422,
"This endpoint is disabled because a prompt template is not set.",
)
model_path = model.container.get_model_path()
if isinstance(data.messages, str):
prompt = data.messages
else:
try:
special_tokens_dict = model.container.get_special_tokens(
unwrap(data.add_bos_token, True),
unwrap(data.ban_eos_token, False),
)
prompt = get_prompt_from_template(
data.messages,
model.container.prompt_template,
data.add_generation_prompt,
special_tokens_dict,
)
except KeyError as exc:
raise HTTPException(
400,
"Could not find a Conversation from prompt template "
f"'{model.container.prompt_template.name}'. "
"Check your spelling?",
) from exc
except TemplateError as exc:
raise HTTPException(
400,
f"TemplateError: {str(exc)}",
) from exc
disable_request_streaming = unwrap(
config.developer_config().get("disable_request_streaming"), False
)
if data.stream and not disable_request_streaming:
const_id = f"chatcmpl-{uuid4().hex}"
async def generator():
"""Generator for the generation process."""
try:
new_generation = model.container.generate_gen(
prompt, **data.to_gen_params()
)
for generation in new_generation:
# Get out if the request gets disconnected
if await request.is_disconnected():
release_semaphore()
logger.error("Chat completion generation cancelled by user.")
return
response = create_chat_completion_stream_chunk(
const_id, generation, model_path.name
)
yield response.model_dump_json()
# Yield a finish response on successful generation
finish_response = create_chat_completion_stream_chunk(
const_id, finish_reason="stop"
)
yield finish_response.model_dump_json()
except Exception:
yield get_generator_error(
"Chat completion aborted. Please check the server console."
)
return EventSourceResponse(generate_with_semaphore(generator))
try:
generation = await call_with_semaphore(
partial(
run_in_threadpool,
model.container.generate,
prompt,
**data.to_gen_params(),
)
)
response = create_chat_completion_response(generation, model_path.name)
return response
except Exception as exc:
error_message = handle_request_error(
"Chat completion aborted. Maybe the model was unloaded? "
"Please check the server console."
).error.message
# Server error if there's a generation exception
raise HTTPException(503, error_message) from exc
def start_api(host: str, port: int):
"""Isolated function to start the API server"""
# TODO: Move OAI API to a separate folder
logger.info(f"Developer documentation: http://{host}:{port}/redoc")
logger.info(f"Completions: http://{host}:{port}/v1/completions")
logger.info(f"Chat completions: http://{host}:{port}/v1/chat/completions")
uvicorn.run(
app,
host=host,
port=port,
log_config=UVICORN_LOG_CONFIG,
)

View file

@ -0,0 +1,63 @@
from pydantic import BaseModel, Field
from time import time
from typing import Union, List, Optional, Dict
from uuid import uuid4
from endpoints.OAI.types.common import UsageStats, CommonCompletionRequest
class ChatCompletionLogprob(BaseModel):
token: str
logprob: float
top_logprobs: Optional[List["ChatCompletionLogprob"]] = None
class ChatCompletionLogprobs(BaseModel):
content: List[ChatCompletionLogprob] = Field(default_factory=list)
class ChatCompletionMessage(BaseModel):
role: Optional[str] = None
content: Optional[str] = None
class ChatCompletionRespChoice(BaseModel):
# Index is 0 since we aren't using multiple choices
index: int = 0
finish_reason: str
message: ChatCompletionMessage
logprobs: Optional[ChatCompletionLogprobs] = None
class ChatCompletionStreamChoice(BaseModel):
# Index is 0 since we aren't using multiple choices
index: int = 0
finish_reason: Optional[str]
delta: Union[ChatCompletionMessage, dict] = {}
logprobs: Optional[ChatCompletionLogprobs] = None
# Inherited from common request
class ChatCompletionRequest(CommonCompletionRequest):
# Messages
# Take in a string as well even though it's not part of the OAI spec
messages: Union[str, List[Dict[str, str]]]
prompt_template: Optional[str] = None
add_generation_prompt: Optional[bool] = True
class ChatCompletionResponse(BaseModel):
id: str = Field(default_factory=lambda: f"chatcmpl-{uuid4().hex}")
choices: List[ChatCompletionRespChoice]
created: int = Field(default_factory=lambda: int(time()))
model: str
object: str = "chat.completion"
usage: Optional[UsageStats] = None
class ChatCompletionStreamChunk(BaseModel):
id: str = Field(default_factory=lambda: f"chatcmpl-{uuid4().hex}")
choices: List[ChatCompletionStreamChoice]
created: int = Field(default_factory=lambda: int(time()))
model: str
object: str = "chat.completion.chunk"

View file

@ -0,0 +1,47 @@
""" Common types for OAI. """
from pydantic import BaseModel, Field
from typing import Optional
from common.sampling import BaseSamplerRequest
class UsageStats(BaseModel):
"""Represents usage stats."""
prompt_tokens: int
completion_tokens: int
total_tokens: int
class CommonCompletionRequest(BaseSamplerRequest):
"""Represents a common completion request."""
# Model information
# This parameter is not used, the loaded model is used instead
model: Optional[str] = None
# Generation info (remainder is in BaseSamplerRequest superclass)
stream: Optional[bool] = False
logprobs: Optional[int] = 0
# Extra OAI request stuff
best_of: Optional[int] = Field(
description="Not parsed. Only used for OAI compliance.", default=None
)
echo: Optional[bool] = Field(
description="Not parsed. Only used for OAI compliance.", default=False
)
n: Optional[int] = Field(
description="Not parsed. Only used for OAI compliance.", default=1
)
suffix: Optional[str] = Field(
description="Not parsed. Only used for OAI compliance.", default=None
)
user: Optional[str] = Field(
description="Not parsed. Only used for OAI compliance.", default=None
)
def to_gen_params(self):
extra_gen_params = {"logprobs": self.logprobs}
return super().to_gen_params(**extra_gen_params)

View file

@ -0,0 +1,46 @@
""" Completion API protocols """
from pydantic import BaseModel, Field
from time import time
from typing import Dict, List, Optional, Union
from uuid import uuid4
from endpoints.OAI.types.common import CommonCompletionRequest, UsageStats
class CompletionLogProbs(BaseModel):
"""Represents log probabilities for a completion request."""
text_offset: List[int] = Field(default_factory=list)
token_logprobs: List[Optional[float]] = Field(default_factory=list)
tokens: List[str] = Field(default_factory=list)
top_logprobs: List[Optional[Dict[str, float]]] = Field(default_factory=list)
class CompletionRespChoice(BaseModel):
"""Represents a single choice in a completion response."""
# Index is 0 since we aren't using multiple choices
index: int = 0
finish_reason: str
logprobs: Optional[CompletionLogProbs] = None
text: str
# Inherited from common request
class CompletionRequest(CommonCompletionRequest):
"""Represents a completion request."""
# Prompt can also contain token ids, but that's out of scope
# for this project.
prompt: Union[str, List[str]]
class CompletionResponse(BaseModel):
"""Represents a completion response."""
id: str = Field(default_factory=lambda: f"cmpl-{uuid4().hex}")
choices: List[CompletionRespChoice]
created: int = Field(default_factory=lambda: int(time()))
model: str
object: str = "text_completion"
usage: Optional[UsageStats] = None

View file

@ -0,0 +1,42 @@
""" Lora types """
from pydantic import BaseModel, Field
from time import time
from typing import Optional, List
class LoraCard(BaseModel):
"""Represents a single Lora card."""
id: str = "test"
object: str = "lora"
created: int = Field(default_factory=lambda: int(time()))
owned_by: str = "tabbyAPI"
scaling: Optional[float] = None
class LoraList(BaseModel):
"""Represents a list of Lora cards."""
object: str = "list"
data: List[LoraCard] = Field(default_factory=list)
class LoraLoadInfo(BaseModel):
"""Represents a single Lora load info."""
name: str
scaling: Optional[float] = 1.0
class LoraLoadRequest(BaseModel):
"""Represents a Lora load request."""
loras: List[LoraLoadInfo]
skip_queue: bool = False
class LoraLoadResponse(BaseModel):
"""Represents a Lora load response."""
success: List[str] = Field(default_factory=list)
failure: List[str] = Field(default_factory=list)

View file

@ -0,0 +1,109 @@
""" Contains model card types. """
from pydantic import BaseModel, Field, ConfigDict
from time import time
from typing import List, Optional
from common.gen_logging import GenLogPreferences
class ModelCardParameters(BaseModel):
"""Represents model card parameters."""
# Safe to do this since it's guaranteed to fetch a max seq len
# from model_container
max_seq_len: Optional[int] = None
rope_scale: Optional[float] = 1.0
rope_alpha: Optional[float] = 1.0
cache_mode: Optional[str] = "FP16"
prompt_template: Optional[str] = None
num_experts_per_token: Optional[int] = None
use_cfg: Optional[bool] = None
# Draft is another model, so include it in the card params
draft: Optional["ModelCard"] = None
class ModelCard(BaseModel):
"""Represents a single model card."""
id: str = "test"
object: str = "model"
created: int = Field(default_factory=lambda: int(time()))
owned_by: str = "tabbyAPI"
logging: Optional[GenLogPreferences] = None
parameters: Optional[ModelCardParameters] = None
class ModelList(BaseModel):
"""Represents a list of model cards."""
object: str = "list"
data: List[ModelCard] = Field(default_factory=list)
class DraftModelLoadRequest(BaseModel):
"""Represents a draft model load request."""
draft_model_name: str
draft_rope_scale: Optional[float] = 1.0
draft_rope_alpha: Optional[float] = Field(
description="Automatically calculated if not present",
default=None,
examples=[1.0],
)
class ModelLoadRequest(BaseModel):
"""Represents a model load request."""
name: str
# Max seq len is fetched from config.json of the model by default
max_seq_len: Optional[int] = Field(
description="Leave this blank to use the model's base sequence length",
default=None,
examples=[4096],
)
override_base_seq_len: Optional[int] = Field(
description=(
"Overrides the model's base sequence length. " "Leave blank if unsure"
),
default=None,
examples=[4096],
)
gpu_split_auto: Optional[bool] = True
autosplit_reserve: Optional[List[float]] = [96]
gpu_split: Optional[List[float]] = Field(
default_factory=list, examples=[[24.0, 20.0]]
)
rope_scale: Optional[float] = Field(
description="Automatically pulled from the model's config if not present",
default=None,
examples=[1.0],
)
rope_alpha: Optional[float] = Field(
description="Automatically calculated if not present",
default=None,
examples=[1.0],
)
no_flash_attention: Optional[bool] = False
# low_mem: Optional[bool] = False
cache_mode: Optional[str] = "FP16"
prompt_template: Optional[str] = None
num_experts_per_token: Optional[int] = None
use_cfg: Optional[bool] = None
fasttensors: Optional[bool] = False
draft: Optional[DraftModelLoadRequest] = None
skip_queue: Optional[bool] = False
class ModelLoadResponse(BaseModel):
"""Represents a model load response."""
# Avoids pydantic namespace warning
model_config = ConfigDict(protected_namespaces=[])
model_type: str = "model"
module: int
modules: int
status: str

View file

@ -0,0 +1,26 @@
from pydantic import BaseModel, Field
from typing import Optional
class SamplerOverrideSwitchRequest(BaseModel):
"""Sampler override switch request"""
preset: Optional[str] = Field(
default=None, description="Pass a sampler override preset name"
)
overrides: Optional[dict] = Field(
default=None,
description=(
"Sampling override parent takes in individual keys and overrides. "
+ "Ignored if preset is provided."
),
examples=[
{
"top_p": {
"override": 1.5,
"force": False,
}
}
],
)

View file

@ -0,0 +1,15 @@
from pydantic import BaseModel, Field
from typing import List
class TemplateList(BaseModel):
"""Represents a list of templates."""
object: str = "list"
data: List[str] = Field(default_factory=list)
class TemplateSwitchRequest(BaseModel):
"""Request to switch a template."""
name: str

View file

@ -0,0 +1,50 @@
""" Tokenization types """
from pydantic import BaseModel
from typing import List
class CommonTokenRequest(BaseModel):
"""Represents a common tokenization request."""
add_bos_token: bool = True
encode_special_tokens: bool = True
decode_special_tokens: bool = True
def get_params(self):
"""Get the parameters for tokenization."""
return {
"add_bos_token": self.add_bos_token,
"encode_special_tokens": self.encode_special_tokens,
"decode_special_tokens": self.decode_special_tokens,
}
class TokenEncodeRequest(CommonTokenRequest):
"""Represents a tokenization request."""
text: str
class TokenEncodeResponse(BaseModel):
"""Represents a tokenization response."""
tokens: List[int]
length: int
class TokenDecodeRequest(CommonTokenRequest):
""" " Represents a detokenization request."""
tokens: List[int]
class TokenDecodeResponse(BaseModel):
"""Represents a detokenization response."""
text: str
class TokenCountResponse(BaseModel):
"""Represents a token count response."""
length: int

View file

@ -0,0 +1,154 @@
""" Utility functions for the OpenAI server. """
from typing import Optional
from common.utils import unwrap
from endpoints.OAI.types.chat_completion import (
ChatCompletionLogprobs,
ChatCompletionLogprob,
ChatCompletionMessage,
ChatCompletionRespChoice,
ChatCompletionStreamChunk,
ChatCompletionResponse,
ChatCompletionStreamChoice,
)
from endpoints.OAI.types.completion import (
CompletionResponse,
CompletionRespChoice,
CompletionLogProbs,
)
from endpoints.OAI.types.common import UsageStats
def create_completion_response(generation: dict, model_name: Optional[str]):
"""Create a completion response from the provided text."""
logprob_response = None
token_probs = unwrap(generation.get("token_probs"), {})
if token_probs:
logprobs = unwrap(generation.get("logprobs"), [])
offset = unwrap(generation.get("offset"), [])
logprob_response = CompletionLogProbs(
text_offset=offset if isinstance(offset, list) else [offset],
token_logprobs=token_probs.values(),
tokens=token_probs.keys(),
top_logprobs=logprobs if isinstance(logprobs, list) else [logprobs],
)
choice = CompletionRespChoice(
finish_reason="Generated",
text=unwrap(generation.get("text"), ""),
logprobs=logprob_response,
)
prompt_tokens = unwrap(generation.get("prompt_tokens"), 0)
completion_tokens = unwrap(generation.get("generated_tokens"), 0)
response = CompletionResponse(
choices=[choice],
model=unwrap(model_name, ""),
usage=UsageStats(
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
total_tokens=prompt_tokens + completion_tokens,
),
)
return response
def create_chat_completion_response(generation: dict, model_name: Optional[str]):
"""Create a chat completion response from the provided text."""
message = ChatCompletionMessage(
role="assistant", content=unwrap(generation.get("text"), "")
)
logprob_response = None
token_probs = unwrap(generation.get("token_probs"), {})
if token_probs:
logprobs = unwrap(generation.get("logprobs"), [])
collected_token_probs = []
for index, token in enumerate(token_probs.keys()):
top_logprobs = [
ChatCompletionLogprob(token=token, logprob=logprob)
for token, logprob in logprobs[index].items()
]
collected_token_probs.append(
ChatCompletionLogprob(
token=token,
logprob=token_probs[token],
top_logprobs=top_logprobs,
)
)
logprob_response = ChatCompletionLogprobs(content=collected_token_probs)
choice = ChatCompletionRespChoice(
finish_reason="Generated", message=message, logprobs=logprob_response
)
prompt_tokens = unwrap(generation.get("prompt_tokens"), 0)
completion_tokens = unwrap(generation.get("completion_tokens"), 0)
response = ChatCompletionResponse(
choices=[choice],
model=unwrap(model_name, ""),
usage=UsageStats(
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
total_tokens=prompt_tokens + completion_tokens,
),
)
return response
def create_chat_completion_stream_chunk(
const_id: str,
generation: Optional[dict] = None,
model_name: Optional[str] = None,
finish_reason: Optional[str] = None,
):
"""Create a chat completion stream chunk from the provided text."""
logprob_response = None
if finish_reason:
message = {}
else:
message = ChatCompletionMessage(
role="assistant", content=unwrap(generation.get("text"), "")
)
token_probs = unwrap(generation.get("token_probs"), {})
if token_probs:
logprobs = unwrap(generation.get("logprobs"), {})
top_logprobs = [
ChatCompletionLogprob(token=token, logprob=logprob)
for token, logprob in logprobs.items()
]
generated_token = next(iter(token_probs))
token_prob_response = ChatCompletionLogprob(
token=generated_token,
logprob=token_probs[generated_token],
top_logprobs=top_logprobs,
)
logprob_response = ChatCompletionLogprobs(content=[token_prob_response])
# The finish reason can be None
choice = ChatCompletionStreamChoice(
finish_reason=finish_reason, delta=message, logprobs=logprob_response
)
chunk = ChatCompletionStreamChunk(
id=const_id, choices=[choice], model=unwrap(model_name, "")
)
return chunk

View file

@ -0,0 +1,14 @@
import pathlib
from endpoints.OAI.types.lora import LoraCard, LoraList
def get_lora_list(lora_path: pathlib.Path):
"""Get the list of Lora cards from the provided path."""
lora_list = LoraList()
for path in lora_path.iterdir():
if path.is_dir():
lora_card = LoraCard(id=path.name)
lora_list.data.append(lora_card) # pylint: disable=no-member
return lora_list

View file

@ -0,0 +1,22 @@
import pathlib
from typing import Optional
from endpoints.OAI.types.model import ModelCard, ModelList
def get_model_list(model_path: pathlib.Path, draft_model_path: Optional[str] = None):
"""Get the list of models from the provided path."""
# Convert the provided draft model path to a pathlib path for
# equality comparisons
if draft_model_path:
draft_model_path = pathlib.Path(draft_model_path).resolve()
model_card_list = ModelList()
for path in model_path.iterdir():
# Don't include the draft models path
if path.is_dir() and path != draft_model_path:
model_card = ModelCard(id=path.name)
model_card_list.data.append(model_card) # pylint: disable=no-member
return model_card_list