diff --git a/components/components/SettingsDialog.tsx b/components/components/SettingsDialog.tsx
index 38fe4af..74f988b 100644
--- a/components/components/SettingsDialog.tsx
+++ b/components/components/SettingsDialog.tsx
@@ -99,6 +99,22 @@ function SettingsDialog({ settings, setSettings, openDialog, setOpenDialog }: Pr
+
+
+
+ setSettings({
+ ...settings,
+ modelName: e.target.value,
+ })
+ }
+ />
+
{
settings.llm === 'openai' ? (
diff --git a/components/contexts/SettingContext.tsx b/components/contexts/SettingContext.tsx
index 0b49ffa..2324805 100644
--- a/components/contexts/SettingContext.tsx
+++ b/components/contexts/SettingContext.tsx
@@ -26,6 +26,7 @@ const initialValue = {
init: false,
llm: 'openai',
geminiApiKey: '',
+ modelName: '',
},
initCreate: false,
setSettings: () => {},
diff --git a/components/types.ts b/components/types.ts
index 783d7c9..5f04a43 100644
--- a/components/types.ts
+++ b/components/types.ts
@@ -32,6 +32,7 @@ export interface Settings {
init: boolean;
llm: string;
geminiApiKey: string;
+ modelName: string | null;
}
export enum AppState {
diff --git a/service/events/generateCode.ts b/service/events/generateCode.ts
index 6997d1f..95c14c0 100644
--- a/service/events/generateCode.ts
+++ b/service/events/generateCode.ts
@@ -21,6 +21,7 @@ export interface IGenerateCodeParams {
llm: string;
geminiApiKey: string;
slug?: string;
+ modelName: string;
}
const encoder = new TextEncoder();
@@ -118,6 +119,7 @@ export async function streamGenerateCode(
openAiBaseURL: params.openAiBaseURL,
llm: params.llm, // 'Gemini'
geminiApiKey: params.geminiApiKey,
+ modelName: params.modelName,
},
);
} catch (e) {
diff --git a/service/events/llm.ts b/service/events/llm.ts
index 95e2d3b..954a2e7 100644
--- a/service/events/llm.ts
+++ b/service/events/llm.ts
@@ -95,6 +95,10 @@ async function useGeminiResponse([messages, callback, params]: Parameters<
modelType = "gemini-pro-vision"
}
+ if (params.modelName) {
+ modelType = params.modelName
+ }
+
const model = genAI.getGenerativeModel({ model: modelType});
const result = await model.generateContentStream({
@@ -133,6 +137,7 @@ export async function streamingOpenAIResponses(
openAiBaseURL: any;
llm: string;
geminiApiKey: any;
+ modelName: any;
}
) {
@@ -153,8 +158,13 @@ export async function streamingOpenAIResponses(
'https://api.openai.com/v1',
});
+ let modelName = 'gpt-4-vision';
+ if (params.modelName) {
+ modelName = params.modelName;
+ }
+
const stream = await openai.chat.completions.create({
- model: 'gpt-4-vision-preview',
+ model: modelName,
temperature: 0,
max_tokens: 4096,
messages,