Refactor the sampling class (#199)
* improve validation * remove to_gen_params functions * update changes for all endpoint types * OAI: Fix calls to generation Chat completion and completion need to have prompt split out before pushing to the backend. Signed-off-by: kingbri <bdashore3@proton.me> * Sampling: Convert Top-K values of -1 to 0 Some OAI implementations use -1 as disabled instead of 0. Therefore, add a coalesce case. Signed-off-by: kingbri <bdashore3@proton.me> * Sampling: Format and space out Make the code more readable. Signed-off-by: kingbri <bdashore3@proton.me> * Sampling: Fix mirostat Field items are nested in data within a Pydantic FieldInfo Signed-off-by: kingbri <bdashore3@proton.me> * Sampling: Format Signed-off-by: kingbri <bdashore3@proton.me> * Sampling: Fix banned_tokens and allowed_tokens conversion If the provided string has whitespace, trim it before splitting. Signed-off-by: kingbri <bdashore3@proton.me> * Sampling: Add helpful log to dry_sequence_breakers Let the user know if the sequence errors out. Signed-off-by: kingbri <bdashore3@proton.me> * Sampling: Apply validators in right order Validators need to be applied in order from top to bottom, this is why the after validator was not being applied properly. Set the model to validate default params for sampler override purposes. This can be turned off if there are unclear errors. Signed-off-by: kingbri <bdashore3@proton.me> * Endpoints: Format Cleanup and semantically fix field validators Signed-off-by: kingbri <bdashore3@proton.me> * Kobold: Update validators and fix parameter application Validators on parent fields cannot see child fields. Therefore, validate using the child fields instead and alter the parent field data from there. Also fix badwordsids casting. Signed-off-by: kingbri <bdashore3@proton.me> * Sampling: Remove validate defaults and fix mirostat If a user sets an override to a non-default value, that's their own fault. Run validator on the actual mirostat_mode parameter rather than the alternate mirostat parameter. Signed-off-by: kingbri <bdashore3@proton.me> * Kobold: Rework badwordsids Currently, this serves to ban the EOS token. All other functionality was legacy, so remove it. Signed-off-by: kingbri <bdashore3@proton.me> * Model: Remove HuggingfaceConfig This was only necessary for badwordsids. All other fields are handled by exl2. Keep the class as a stub if it's needed again. Signed-off-by: kingbri <bdashore3@proton.me> * Kobold: Bump kcpp impersonation TabbyAPI supports XTC now. Signed-off-by: kingbri <bdashore3@proton.me> * Sampling: Change alias to validation_alias Reduces the probability for errors and makes the class consistent. Signed-off-by: kingbri <bdashore3@proton.me> * OAI: Use constraints for validation Instead of adding a model_validator, use greater than or equal to constraints provided by Pydantic. Signed-off-by: kingbri <bdashore3@proton.me> * Tree: Lint Signed-off-by: kingbri <bdashore3@proton.me> --------- Co-authored-by: SecretiveShell <84923604+SecretiveShell@users.noreply.github.com> Co-authored-by: kingbri <bdashore3@proton.me>
This commit is contained in:
parent
6e48bb420a
commit
7d18d2e2ca
10 changed files with 148 additions and 255 deletions
|
|
@ -137,7 +137,7 @@ async def get_version():
|
|||
async def get_extra_version():
|
||||
"""Impersonate Koboldcpp."""
|
||||
|
||||
return {"result": "KoboldCpp", "version": "1.71"}
|
||||
return {"result": "KoboldCpp", "version": "1.74"}
|
||||
|
||||
|
||||
@kai_router.get("/config/soft_prompts_list")
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
from functools import partial
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from common import model
|
||||
from common.sampling import BaseSamplerRequest, get_default_sampler_value
|
||||
from common.utils import flat_map, unwrap
|
||||
from common.utils import unwrap
|
||||
|
||||
|
||||
class GenerateRequest(BaseSamplerRequest):
|
||||
|
|
@ -11,29 +11,31 @@ class GenerateRequest(BaseSamplerRequest):
|
|||
genkey: Optional[str] = None
|
||||
use_default_badwordsids: Optional[bool] = False
|
||||
dynatemp_range: Optional[float] = Field(
|
||||
default_factory=get_default_sampler_value("dynatemp_range")
|
||||
default_factory=partial(get_default_sampler_value, "dynatemp_range")
|
||||
)
|
||||
|
||||
def to_gen_params(self, **kwargs):
|
||||
# Exl2 uses -1 to include all tokens in repetition penalty
|
||||
if self.penalty_range == 0:
|
||||
self.penalty_range = -1
|
||||
# Validate on the parent class's fields
|
||||
@field_validator("penalty_range", mode="before")
|
||||
def validate_penalty_range(cls, v):
|
||||
return -1 if v == 0 else v
|
||||
|
||||
if self.dynatemp_range:
|
||||
self.min_temp = self.temperature - self.dynatemp_range
|
||||
self.max_temp = self.temperature + self.dynatemp_range
|
||||
@field_validator("dynatemp_range", mode="before")
|
||||
def validate_temp_range(cls, v, field_info):
|
||||
if v > 0:
|
||||
# A default temperature is always 1
|
||||
temperature = unwrap(field_info.data.get("temperature"), 1)
|
||||
|
||||
# Move badwordsids into banned tokens for generation
|
||||
if self.use_default_badwordsids:
|
||||
bad_words_ids = unwrap(
|
||||
model.container.generation_config.bad_words_ids,
|
||||
model.container.hf_config.get_badwordsids(),
|
||||
)
|
||||
field_info.data["min_temp"] = temperature - v
|
||||
field_info.data["max_temp"] = temperature + v
|
||||
|
||||
if bad_words_ids:
|
||||
self.banned_tokens += flat_map(bad_words_ids)
|
||||
return v
|
||||
|
||||
return super().to_gen_params(**kwargs)
|
||||
# Currently only serves to ban EOS token, but can change
|
||||
@field_validator("use_default_badwordsids", mode="before")
|
||||
def validate_badwordsids(cls, v, field_info):
|
||||
field_info.data["ban_eos_token"] = v
|
||||
|
||||
return v
|
||||
|
||||
|
||||
class GenerateResponseResult(BaseModel):
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ async def _stream_collector(data: GenerateRequest, request: Request):
|
|||
logger.info(f"Received Kobold generation request {data.genkey}")
|
||||
|
||||
generator = model.container.generate_gen(
|
||||
data.prompt, data.genkey, abort_event, **data.to_gen_params()
|
||||
request_id=data.genkey, abort_event=abort_event, **data.model_dump()
|
||||
)
|
||||
async for generation in generator:
|
||||
if disconnect_task.done():
|
||||
|
|
|
|||
|
|
@ -33,12 +33,16 @@ class CommonCompletionRequest(BaseSamplerRequest):
|
|||
stream: Optional[bool] = False
|
||||
stream_options: Optional[ChatCompletionStreamOptions] = None
|
||||
logprobs: Optional[int] = Field(
|
||||
default_factory=lambda: get_default_sampler_value("logprobs", 0)
|
||||
default_factory=lambda: get_default_sampler_value("logprobs", 0),
|
||||
ge=0,
|
||||
)
|
||||
response_format: Optional[CompletionResponseFormat] = Field(
|
||||
default_factory=CompletionResponseFormat
|
||||
)
|
||||
n: Optional[int] = Field(default_factory=lambda: get_default_sampler_value("n", 1))
|
||||
n: Optional[int] = Field(
|
||||
default_factory=lambda: get_default_sampler_value("n", 1),
|
||||
ge=1,
|
||||
)
|
||||
|
||||
# Extra OAI request stuff
|
||||
best_of: Optional[int] = Field(
|
||||
|
|
@ -53,18 +57,3 @@ class CommonCompletionRequest(BaseSamplerRequest):
|
|||
user: Optional[str] = Field(
|
||||
description="Not parsed. Only used for OAI compliance.", default=None
|
||||
)
|
||||
|
||||
def validate_params(self):
|
||||
# Temperature
|
||||
if self.n < 1:
|
||||
raise ValueError(f"n must be greater than or equal to 1. Got {self.n}")
|
||||
|
||||
return super().validate_params()
|
||||
|
||||
def to_gen_params(self):
|
||||
extra_gen_params = {
|
||||
"stream": self.stream,
|
||||
"logprobs": self.logprobs,
|
||||
}
|
||||
|
||||
return super().to_gen_params(**extra_gen_params)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@
|
|||
import asyncio
|
||||
import pathlib
|
||||
from asyncio import CancelledError
|
||||
from copy import deepcopy
|
||||
from typing import List, Optional
|
||||
import json
|
||||
|
||||
|
|
@ -291,13 +290,8 @@ async def stream_generate_chat_completion(
|
|||
try:
|
||||
logger.info(f"Received chat completion streaming request {request.state.id}")
|
||||
|
||||
gen_params = data.to_gen_params()
|
||||
|
||||
for n in range(0, data.n):
|
||||
if n > 0:
|
||||
task_gen_params = deepcopy(gen_params)
|
||||
else:
|
||||
task_gen_params = gen_params
|
||||
task_gen_params = data.model_copy(deep=True)
|
||||
|
||||
gen_task = asyncio.create_task(
|
||||
_stream_collector(
|
||||
|
|
@ -306,7 +300,7 @@ async def stream_generate_chat_completion(
|
|||
prompt,
|
||||
request.state.id,
|
||||
abort_event,
|
||||
**task_gen_params,
|
||||
**task_gen_params.model_dump(exclude={"prompt"}),
|
||||
)
|
||||
)
|
||||
|
||||
|
|
@ -381,21 +375,13 @@ async def generate_chat_completion(
|
|||
prompt: str, data: ChatCompletionRequest, request: Request, model_path: pathlib.Path
|
||||
):
|
||||
gen_tasks: List[asyncio.Task] = []
|
||||
gen_params = data.to_gen_params()
|
||||
|
||||
try:
|
||||
for n in range(0, data.n):
|
||||
# Deepcopy gen params above the first index
|
||||
# to ensure nested structures aren't shared
|
||||
if n > 0:
|
||||
task_gen_params = deepcopy(gen_params)
|
||||
else:
|
||||
task_gen_params = gen_params
|
||||
|
||||
for _ in range(0, data.n):
|
||||
gen_tasks.append(
|
||||
asyncio.create_task(
|
||||
model.container.generate(
|
||||
prompt, request.state.id, **task_gen_params
|
||||
prompt, request.state.id, **data.model_dump(exclude={"prompt"})
|
||||
)
|
||||
)
|
||||
)
|
||||
|
|
@ -433,9 +419,9 @@ async def generate_tool_calls(
|
|||
|
||||
# Copy to make sure the parent JSON schema doesn't get modified
|
||||
# FIXME: May not be necessary depending on how the codebase evolves
|
||||
tool_data = deepcopy(data)
|
||||
tool_data = data.model_copy(deep=True)
|
||||
tool_data.json_schema = tool_data.tool_call_schema
|
||||
gen_params = tool_data.to_gen_params()
|
||||
gen_params = tool_data.model_dump()
|
||||
|
||||
for idx, gen in enumerate(generations):
|
||||
if gen["stop_str"] in tool_data.tool_call_start:
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ Also serves as a common module for completions and chat completions.
|
|||
import asyncio
|
||||
import pathlib
|
||||
from asyncio import CancelledError
|
||||
from copy import deepcopy
|
||||
from fastapi import HTTPException, Request
|
||||
from typing import List, Union
|
||||
|
||||
|
|
@ -169,13 +168,8 @@ async def stream_generate_completion(
|
|||
try:
|
||||
logger.info(f"Received streaming completion request {request.state.id}")
|
||||
|
||||
gen_params = data.to_gen_params()
|
||||
|
||||
for n in range(0, data.n):
|
||||
if n > 0:
|
||||
task_gen_params = deepcopy(gen_params)
|
||||
else:
|
||||
task_gen_params = gen_params
|
||||
task_gen_params = data.model_copy(deep=True)
|
||||
|
||||
gen_task = asyncio.create_task(
|
||||
_stream_collector(
|
||||
|
|
@ -184,7 +178,7 @@ async def stream_generate_completion(
|
|||
data.prompt,
|
||||
request.state.id,
|
||||
abort_event,
|
||||
**task_gen_params,
|
||||
**task_gen_params.model_dump(exclude={"prompt"}),
|
||||
)
|
||||
)
|
||||
|
||||
|
|
@ -232,23 +226,19 @@ async def generate_completion(
|
|||
"""Non-streaming generate for completions"""
|
||||
|
||||
gen_tasks: List[asyncio.Task] = []
|
||||
gen_params = data.to_gen_params()
|
||||
|
||||
try:
|
||||
logger.info(f"Recieved completion request {request.state.id}")
|
||||
|
||||
for n in range(0, data.n):
|
||||
# Deepcopy gen params above the first index
|
||||
# to ensure nested structures aren't shared
|
||||
if n > 0:
|
||||
task_gen_params = deepcopy(gen_params)
|
||||
else:
|
||||
task_gen_params = gen_params
|
||||
for _ in range(0, data.n):
|
||||
task_gen_params = data.model_copy(deep=True)
|
||||
|
||||
gen_tasks.append(
|
||||
asyncio.create_task(
|
||||
model.container.generate(
|
||||
data.prompt, request.state.id, **task_gen_params
|
||||
data.prompt,
|
||||
request.state.id,
|
||||
**task_gen_params.model_dump(exclude={"prompt"}),
|
||||
)
|
||||
)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -36,27 +36,28 @@ def setup_app(host: Optional[str] = None, port: Optional[int] = None):
|
|||
)
|
||||
|
||||
api_servers = config.network.api_servers
|
||||
api_servers = (
|
||||
api_servers
|
||||
if api_servers
|
||||
else [
|
||||
"oai",
|
||||
]
|
||||
)
|
||||
|
||||
# Map for API id to server router
|
||||
router_mapping = {"oai": OAIRouter, "kobold": KoboldRouter}
|
||||
|
||||
# Include the OAI api by default
|
||||
if api_servers:
|
||||
for server in api_servers:
|
||||
selected_server = router_mapping.get(server.lower())
|
||||
for server in api_servers:
|
||||
selected_server = router_mapping.get(server.lower())
|
||||
|
||||
if selected_server:
|
||||
app.include_router(selected_server.setup())
|
||||
if selected_server:
|
||||
app.include_router(selected_server.setup())
|
||||
|
||||
logger.info(f"Starting {selected_server.api_name} API")
|
||||
for path, url in selected_server.urls.items():
|
||||
formatted_url = url.format(host=host, port=port)
|
||||
logger.info(f"{path}: {formatted_url}")
|
||||
else:
|
||||
app.include_router(OAIRouter.setup())
|
||||
for path, url in OAIRouter.urls.items():
|
||||
formatted_url = url.format(host=host, port=port)
|
||||
logger.info(f"{path}: {formatted_url}")
|
||||
logger.info(f"Starting {selected_server.api_name} API")
|
||||
for path, url in selected_server.urls.items():
|
||||
formatted_url = url.format(host=host, port=port)
|
||||
logger.info(f"{path}: {formatted_url}")
|
||||
|
||||
# Include core API request paths
|
||||
app.include_router(CoreRouter)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue