Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 30 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -340,9 +340,8 @@ END
" :AI
" - provider: AI provider
" - prompt: optional prepended prompt
" - options: openai config (see https://platform.openai.com/docs/api-reference/completions)
" - options: openai config (see https://platform.openai.com/docs/api-reference/chat)
" - options.initial_prompt: prompt prepended to every chat request (list of lines or string)
" - options.temperature: use -1 to disable this parameter
" - options.request_timeout: request timeout in seconds
" - options.auth_type: API authentication method (bearer, api-key, none)
" - options.token_file_path: override global token configuration
Expand All @@ -365,6 +364,15 @@ let g:vim_ai_complete = {
\ "token_load_fn": "",
\ "selection_boundary": "#####",
\ "initial_prompt": s:initial_complete_prompt,
\ "frequency_penalty": "",
\ "logit_bias": "",
\ "logprobs": "",
\ "presence_penalty": "",
\ "reasoning_effort": "",
\ "seed": "",
\ "stop": "",
\ "top_logprobs": "",
\ "top_p": "",
\ },
\ "ui": {
\ "paste_mode": 1,
Expand All @@ -374,9 +382,8 @@ let g:vim_ai_complete = {
" :AIEdit
" - provider: AI provider
" - prompt: optional prepended prompt
" - options: openai config (see https://platform.openai.com/docs/api-reference/completions)
" - options: openai config (see https://platform.openai.com/docs/api-reference/chat)
" - options.initial_prompt: prompt prepended to every chat request (list of lines or string)
" - options.temperature: use -1 to disable this parameter
" - options.request_timeout: request timeout in seconds
" - options.auth_type: API authentication method (bearer, api-key, none)
" - options.token_file_path: override global token configuration
Expand All @@ -399,6 +406,15 @@ let g:vim_ai_edit = {
\ "token_load_fn": "",
\ "selection_boundary": "#####",
\ "initial_prompt": s:initial_complete_prompt,
\ "frequency_penalty": "",
\ "logit_bias": "",
\ "logprobs": "",
\ "presence_penalty": "",
\ "reasoning_effort": "",
\ "seed": "",
\ "stop": "",
\ "top_logprobs": "",
\ "top_p": "",
\ },
\ "ui": {
\ "paste_mode": 1,
Expand All @@ -418,7 +434,6 @@ END
" - prompt: optional prepended prompt
" - options: openai config (see https://platform.openai.com/docs/api-reference/chat)
" - options.initial_prompt: prompt prepended to every chat request (list of lines or string)
" - options.temperature: use -1 to disable this parameter
" - options.request_timeout: request timeout in seconds
" - options.auth_type: API authentication method (bearer, api-key, none)
" - options.token_file_path: override global token configuration
Expand All @@ -445,6 +460,15 @@ let g:vim_ai_chat = {
\ "token_load_fn": "",
\ "selection_boundary": "",
\ "initial_prompt": s:initial_chat_prompt,
\ "frequency_penalty": "",
\ "logit_bias": "",
\ "logprobs": "",
\ "presence_penalty": "",
\ "reasoning_effort": "",
\ "seed": "",
\ "stop": "",
\ "top_logprobs": "",
\ "top_p": "",
\ },
\ "ui": {
\ "open_chat_command": "preset_below",
Expand Down Expand Up @@ -510,7 +534,7 @@ let g:vim_ai_debug_log_file = "/tmp/vim_ai_debug.log"
" - find out more in vim's help `:help paste`
" options.max_tokens
" - note that prompt + max_tokens must be less than model's token limit, see #42, #46
" - setting max tokens to 0 will exclude it from the OpenAI API request parameters, it is
" - setting max tokens to "" will exclude it from the OpenAI API request parameters, it is
" unclear/undocumented what it exactly does, but it seems to resolve issues when the model
" hits token limit, which respond with `OpenAI: HTTPError 400`
" options.selection_boundary
Expand Down
27 changes: 27 additions & 0 deletions doc/vim-ai.txt
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,15 @@ Options: >
\ "token_load_fn": "",
\ "selection_boundary": "#####",
\ "initial_prompt": s:initial_complete_prompt,
\ "frequency_penalty": "",
\ "logit_bias": "",
\ "logprobs": "",
\ "presence_penalty": "",
\ "reasoning_effort": "",
\ "seed": "",
\ "stop": "",
\ "top_logprobs": "",
\ "top_p": "",
\ },
\ "ui": {
\ "paste_mode": 1,
Expand Down Expand Up @@ -86,6 +95,15 @@ Options: >
\ "token_load_fn": "",
\ "selection_boundary": "#####",
\ "initial_prompt": s:initial_complete_prompt,
\ "frequency_penalty": "",
\ "logit_bias": "",
\ "logprobs": "",
\ "presence_penalty": "",
\ "reasoning_effort": "",
\ "seed": "",
\ "stop": "",
\ "top_logprobs": "",
\ "top_p": "",
\ },
\ "ui": {
\ "paste_mode": 1,
Expand Down Expand Up @@ -125,6 +143,15 @@ Options: >
\ "token_load_fn": "",
\ "selection_boundary": "",
\ "initial_prompt": s:initial_chat_prompt,
\ "frequency_penalty": "",
\ "logit_bias": "",
\ "logprobs": "",
\ "presence_penalty": "",
\ "reasoning_effort": "",
\ "seed": "",
\ "stop": "",
\ "top_logprobs": "",
\ "top_p": "",
\ },
\ "ui": {
\ "populate_options": 0,
Expand Down
4 changes: 4 additions & 0 deletions py/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,10 @@ def _populate_options(config):

if default_value == value:
continue # do not show default values

if not isinstance(value, str):
value = str(value)

vim.command("normal! ioptions." + key + "=" + value + "\n")

def run_ai_chat(context):
Expand Down
80 changes: 62 additions & 18 deletions py/providers/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def request(self, messages: Sequence[AIMessage]) -> Iterator[AIResponseChunk]:
options = self.options
openai_options = self._make_openai_options(options)
http_options = {
'request_timeout': options['request_timeout'],
'request_timeout': options.get('request_timeout') or 20,
'auth_type': options['auth_type'],
'token_file_path': options['token_file_path'],
'token_load_fn': options['token_load_fn'],
Expand All @@ -49,7 +49,7 @@ def _flatten_content(messages):
url = options['endpoint_url']
response = self._openai_request(url, request, http_options)

_choice_key = 'delta' if openai_options['stream'] else 'message'
_choice_key = 'delta' if openai_options.get('stream') else 'message'

def _get_delta(resp):
choices = resp.get('choices') or [{}]
Expand Down Expand Up @@ -96,31 +96,75 @@ def _parse_raw_options(self, raw_options: Mapping[str, Any]):
raise self.utils.make_known_error("`enable_auth = 0` option is no longer supported. use `auth_type = none` instead")

options = {**raw_options}
options['request_timeout'] = float(options['request_timeout'])

def _convert_option(name, converter):
if name in options and isinstance(options[name], str) and options[name] != '':
try:
options[name] = converter(options[name])
except (ValueError, TypeError, json.JSONDecodeError) as e:
raise self.utils.make_known_error(f"Invalid value for option '{name}': {options[name]}. Error: {e}")

_convert_option('request_timeout', float)

if self.command_type != 'image':
options['max_tokens'] = int(options['max_tokens'])
options['max_completion_tokens'] = int(options['max_completion_tokens'])
options['temperature'] = float(options['temperature'])
options['stream'] = bool(int(options['stream']))
_convert_option('stream', lambda x: bool(int(x)))
_convert_option('max_tokens', int)
_convert_option('max_completion_tokens', int)
_convert_option('temperature', float)
_convert_option('frequency_penalty', float)
_convert_option('presence_penalty', float)
_convert_option('top_p', float)
_convert_option('seed', int)
_convert_option('top_logprobs', int)
_convert_option('logprobs', lambda x: bool(int(x)))
_convert_option('stop', json.loads)
_convert_option('logit_bias', json.loads)
# reasoning_effort is a string, no conversion needed

return options

def _make_openai_options(self, options):
max_tokens = options['max_tokens']
max_completion_tokens = options['max_completion_tokens']
result = {
'model': options['model'],
'stream': options['stream'],
}
if options['temperature'] > -1:
result['temperature'] = options['temperature']

if 'web_search_options' in options:
result['web_search_options'] = options['web_search_options']
option_keys = [
'stream',
'temperature',
'max_tokens',
'max_completion_tokens',
'web_search_options',
'frequency_penalty',
'logit_bias',
'logprobs',
'presence_penalty',
'reasoning_effort',
'seed',
'stop',
'top_logprobs',
'top_p',
]

for key in option_keys:
if key not in options:
continue

value = options[key]

if value == '':
continue

# Backward compatibility: before using empty string "", values below
# were used to exclude these params from the request
if key == 'temperature' and value == -1:
continue
if key == 'max_tokens' and value == 0:
continue
if key == 'max_completion_tokens' and value == 0:
continue

result[key] = value

if max_tokens > 0:
result['max_tokens'] = max_tokens
if max_completion_tokens > 0:
result['max_completion_tokens'] = max_completion_tokens
return result

def request_image(self, prompt: str) -> list[AIImageResponseChunk]:
Expand Down
42 changes: 35 additions & 7 deletions tests/context_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,15 @@
"token_load_fn": "",
"selection_boundary": "",
"initial_prompt": "You are a general assistant.",
"frequency_penalty": "",
"logit_bias": "",
"logprobs": "",
"presence_penalty": "",
"reasoning_effort": "",
"seed": "",
"stop": "",
"top_logprobs": "",
"top_p": "",
},
"ui": {
"open_chat_command": "preset_below",
Expand Down Expand Up @@ -195,14 +204,33 @@ def test_selection_boundary():
assert 'fix grammar:\n###\nhelo word\n###' == make_prompt( '', 'fix grammar', 'helo word', '###')
assert 'fix grammar:\n###\nhelo word\n###' == make_prompt( 'fix grammar', '', 'helo word', '###')

def test_markdown_selection_boundary(mocker):
def test_markdown_selection_boundary():
# add file type to markdown boundary
mocker.patch('vim.eval').return_value = "python"
assert 'fix grammar:\n```python\nhelo word\n```' == make_prompt( '', 'fix grammar', 'helo word', '```')
with patch('vim.eval', return_value = "python") as mock_eval:
assert 'fix grammar:\n```python\nhelo word\n```' == make_prompt( '', 'fix grammar', 'helo word', '```')

# do not add filetype if not appropriate
mocker.patch('vim.eval').return_value = "aichat"
assert 'fix grammar:\n```\nhelo word\n```' == make_prompt( '', 'fix grammar', 'helo word', '```')
mocker.patch('vim.eval').return_value = ""
assert 'fix grammar:\n```\nhelo word\n```' == make_prompt( '', 'fix grammar', 'helo word', '```')
with patch('vim.eval', return_value = "aichat") as mock_eval:
assert 'fix grammar:\n```\nhelo word\n```' == make_prompt( '', 'fix grammar', 'helo word', '```')
with patch('vim.eval', return_value = "") as mock_eval:
assert 'fix grammar:\n```\nhelo word\n```' == make_prompt( '', 'fix grammar', 'helo word', '```')

def test_role_config_all_params():
context = make_ai_context({
'config_default': default_config,
'config_extension': {},
'user_instruction': '/all_params user instruction',
'user_selection': '',
'command_type': 'chat',
})
actual_options = context['config']['options']
assert actual_options['frequency_penalty'] == '0.5'
assert actual_options['logit_bias'] == '{"2435": -100}'
assert actual_options['logprobs'] == '1'
assert actual_options['presence_penalty'] == '-0.5'
assert actual_options['reasoning_effort'] == 'low'
assert actual_options['seed'] == '12345'
assert actual_options['stop'] == 'stop_sequence'
assert actual_options['top_logprobs'] == '5'
assert actual_options['top_p'] == '0.9'

11 changes: 11 additions & 0 deletions tests/resources/roles.ini
Original file line number Diff line number Diff line change
Expand Up @@ -40,3 +40,14 @@ endpoint_url = https://localhost/edit
paste_mode = 0
[deprecated-test-role.ui-chat]
open_chat_command = preset_tab

[all_params.chat]
options.frequency_penalty = 0.5
options.logit_bias = {"2435": -100}
options.logprobs = 1
options.presence_penalty = -0.5
options.reasoning_effort = low
options.seed = 12345
options.stop = stop_sequence
options.top_logprobs = 5
options.top_p = 0.9
1 change: 1 addition & 0 deletions tests/roles_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ def test_role_chat_only():
'chat-only-role',
'deprecated-test-role-simple',
'deprecated-test-role',
'all_params',
# default roles
'right',
'below',
Expand Down