diff --git a/apps/landing/src/components/Header.svelte b/apps/landing/src/components/Header.svelte index c6d7004..b9f50a6 100644 --- a/apps/landing/src/components/Header.svelte +++ b/apps/landing/src/components/Header.svelte @@ -9,7 +9,9 @@ import { SITE_URLS } from "@bashbuddy/consts"; - const BANNER_STORAGE_KEY = "bashbuddy-qwen-banner-closed"; + // const BANNER_STORAGE_KEY = "bashbuddy-qwen-banner-closed"; + const BANNER_STORAGE_KEY = + "bashbuddy-banner-better-cloud-models-and-upcoming-agent-mode"; let isMenuOpen = $state(false); let isBannerVisible = $state(false); @@ -99,16 +101,17 @@ {#if isBrowserEnv && isBannerVisible}
- We've added Qwen 2.5 models to BashBuddy Local! + We're introducing better AI models on BashBuddy Cloud and upcoming agent + mode. Read more ➡️
diff --git a/packages/api/src/router/chat.ts b/packages/api/src/router/chat.ts index eb81dcf..c4bc528 100644 --- a/packages/api/src/router/chat.ts +++ b/packages/api/src/router/chat.ts @@ -76,7 +76,7 @@ class GroqLLM implements LLM { const generation = trace?.generation({ name: "chat.ask", - model: "llama-3.1-8b-instant", + model: "llama-3.3-70b-versatile", input: messages, modelParameters: { temperature, @@ -87,7 +87,7 @@ class GroqLLM implements LLM { const chatCompletion = await this.groq.chat.completions.create({ messages, - model: "llama-3.1-8b-instant", + model: "llama-3.3-70b-versatile", temperature, max_completion_tokens: maxCompletionTokens, top_p: topP,