From 8c4d8d82434560f6386fd445d2bb4fe90bf409d8 Mon Sep 17 00:00:00 2001 From: km-tr Date: Sun, 3 Mar 2024 23:15:04 +0900 Subject: [PATCH 1/2] fix(config.ts): improve code formatting for better readability feat(config.ts): add additional supported models for OCO_MODEL configuration feat(config.ts): update default OCO_MODEL value to 'gpt-3.5-turbo' for better compatibility --- src/commands/config.ts | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/src/commands/config.ts b/src/commands/config.ts index 81c27d38..21e40623 100644 --- a/src/commands/config.ts +++ b/src/commands/config.ts @@ -54,7 +54,11 @@ const validateConfig = ( export const configValidators = { [CONFIG_KEYS.OCO_OPENAI_API_KEY](value: any, config: any = {}) { //need api key unless running locally with ollama - validateConfig('API_KEY', value || config.OCO_AI_PROVIDER == 'ollama', 'You need to provide an API key'); + validateConfig( + 'API_KEY', + value || config.OCO_AI_PROVIDER == 'ollama', + 'You need to provide an API key' + ); validateConfig( CONFIG_KEYS.OCO_OPENAI_API_KEY, value.startsWith('sk-'), @@ -150,12 +154,13 @@ export const configValidators = { CONFIG_KEYS.OCO_MODEL, [ 'gpt-3.5-turbo', + 'gpt-3.5-turbo-0125', 'gpt-4', - 'gpt-3.5-turbo-16k', - 'gpt-3.5-turbo-0613', - 'gpt-4-1106-preview' + 'gpt-4-1106-preview', + 'gpt-4-turbo-preview', + 'gpt-4-0125-preview' ].includes(value), - `${value} is not supported yet, use 'gpt-4', 'gpt-3.5-turbo-16k' (default), 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo' or 'gpt-4-1106-preview'` + `${value} is not supported yet, use 'gpt-4', 'gpt-3.5-turbo' (default), 'gpt-3.5-turbo-0125', 'gpt-4-1106-preview', 'gpt-4-turbo-preview' or 'gpt-4-0125-preview'` ); return value; }, @@ -210,7 +215,7 @@ export const getConfig = (): ConfigType | null => { OCO_OPENAI_BASE_PATH: process.env.OCO_OPENAI_BASE_PATH, OCO_DESCRIPTION: process.env.OCO_DESCRIPTION === 'true' ? true : false, OCO_EMOJI: process.env.OCO_EMOJI === 'true' ? true : false, - OCO_MODEL: process.env.OCO_MODEL || 'gpt-3.5-turbo-16k', + OCO_MODEL: process.env.OCO_MODEL || 'gpt-3.5-turbo', OCO_LANGUAGE: process.env.OCO_LANGUAGE || 'en', OCO_MESSAGE_TEMPLATE_PLACEHOLDER: process.env.OCO_MESSAGE_TEMPLATE_PLACEHOLDER || '$msg', From da671d20c18c75fcfc882559012c17035f88cb99 Mon Sep 17 00:00:00 2001 From: km-tr Date: Sun, 3 Mar 2024 23:19:35 +0900 Subject: [PATCH 2/2] docs(README.md): update OCO_MODEL options to include new GPT-4 models and remove mention of 'gpt-3.5-turbo-16k' model docs(README.md): update default OCO_MODEL to 'gpt-3.5-turbo' and provide instructions for switching to GPT-4 models, specifically 'gpt-4-0125-preview' and 'gpt-4-turbo-preview' docs(README.md): update OCO_MODEL configuration in CI/CD pipeline to 'gpt-3.5-turbo' docs(README.md): update cost information related to default and GPT-4 models in OpenCommit --- README.md | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 2e2e3891..3d6440b4 100644 --- a/README.md +++ b/README.md @@ -72,16 +72,18 @@ AI_PROVIDER='ollama' opencommit ``` ### Flags + There are multiple optional flags that can be used with the `oco` command: #### Use Full GitMoji Specification + This flag can only be used if the `OCO_EMOJI` configuration item is set to `true`. This flag allows users to use all emojis in the GitMoji specification, By default, the GitMoji full specification is set to `false`, which only includes 10 emojis (🐛✨📝🚀✅♻️⬆️🔧🌐💡). This is due to limit the number of tokens sent in each request. However, if you would like to use the full GitMoji specification, you can use the `--fgm` flag. ``` oco --fgm ``` - + ## Configuration ### Local per repo configuration @@ -95,7 +97,7 @@ OCO_TOKENS_MAX_OUTPUT= OCO_OPENAI_BASE_PATH= OCO_DESCRIPTION= OCO_EMOJI= -OCO_MODEL= +OCO_MODEL= OCO_LANGUAGE= OCO_MESSAGE_TEMPLATE_PLACEHOLDER= OCO_PROMPT_MODULE= @@ -125,7 +127,7 @@ oco config set OCO_EMOJI=false ### Switch to GPT-4 or other models -By default, OpenCommit uses `gpt-3.5-turbo-16k` model. +By default, OpenCommit uses `gpt-3.5-turbo` model. You may switch to GPT-4 which performs better, but costs ~x15 times more 🤠 @@ -142,7 +144,7 @@ oco config set OCO_MODEL=gpt-3.5-turbo or for GPT-4 Turbo (Preview) which is more capable, has knowledge of world events up to April 2023, a 128k context window and 2-3x cheaper vs GPT-4: ```sh -oco config set OCO_MODEL=gpt-4-1106-preview +oco config set OCO_MODEL=gpt-4-0125-preview ``` Make sure that you spell it `gpt-4` (lowercase) and that you have API access to the 4th model. Even if you have ChatGPT+, that doesn't necessarily mean that you have API access to GPT-4. @@ -348,7 +350,7 @@ jobs: OCO_OPENAI_BASE_PATH: '' OCO_DESCRIPTION: false OCO_EMOJI: false - OCO_MODEL: gpt-3.5-turbo-16k + OCO_MODEL: gpt-3.5-turbo OCO_LANGUAGE: en OCO_PROMPT_MODULE: conventional-commit ``` @@ -365,6 +367,6 @@ You pay for your requests to OpenAI API on your own. OpenCommit stores your key locally. -OpenCommit by default uses 3.5-turbo-16k model, it should not exceed $0.10 per casual working day. +OpenCommit by default uses 3.5-turbo model, it should not exceed $0.10 per casual working day. You may switch to gpt-4, it's better, but more expensive.