Model: Remove override_base_seq_len
This commit is contained in:
parent
7d18d2e2ca
commit
603760cecb
5 changed files with 0 additions and 33 deletions
|
|
@ -92,7 +92,6 @@
|
|||
"# @markdown ---\n",
|
||||
"# @markdown Model parameters:\n",
|
||||
"ContextSize = 4096 # @param {type:\"integer\"}\n",
|
||||
"OverrideBaseSeqLen = 4096 # @param {type:\"integer\"}\n",
|
||||
"RopeScale = 1.0 # @param {type:\"number\"}\n",
|
||||
"RopeAlpha = 1.0 # @param {type:\"number\"}\n",
|
||||
"NumExpertsPerToken = 2 # @param {type:\"integer\"}\n",
|
||||
|
|
@ -169,11 +168,6 @@
|
|||
" # Fetched from the model's base sequence length in config.json by default\n",
|
||||
" max_seq_len: {ContextSize}\n",
|
||||
"\n",
|
||||
" # Overrides base model context length (default: None)\n",
|
||||
" # WARNING: Don't set this unless you know what you're doing!\n",
|
||||
" # Only use this if the model's base sequence length in config.json is incorrect (ex. Mistral/Mixtral models)\n",
|
||||
" override_base_seq_len: {OverrideBaseSeqLen}\n",
|
||||
"\n",
|
||||
" # Automatically allocate resources to GPUs (default: True)\n",
|
||||
" gpu_split_auto: True\n",
|
||||
"\n",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue