From b4c6766ae9ebd719466e1b4c8595783e29205116 Mon Sep 17 00:00:00 2001 From: Stanislav Khromov Date: Sun, 14 Dec 2025 21:04:21 +0100 Subject: [PATCH 1/8] add lmstudio local provider --- index.ts | 112 ++++++++++++++++++++++++++++++++++++-- lib/providers/index.ts | 1 + lib/providers/lmstudio.ts | 25 +++++++++ 3 files changed, 134 insertions(+), 4 deletions(-) create mode 100644 lib/providers/index.ts create mode 100644 lib/providers/lmstudio.ts diff --git a/index.ts b/index.ts index 99c68b0..18e7c07 100644 --- a/index.ts +++ b/index.ts @@ -26,6 +26,7 @@ import { formatCost, formatMTokCost, } from "./lib/pricing.ts"; +import { createLMStudioProvider } from "./lib/providers/lmstudio.ts"; import type { LanguageModel } from "ai"; import { intro, @@ -39,6 +40,12 @@ import { } from "@clack/prompts"; import { gateway } from "ai"; +interface LMStudioConfig { + enabled: boolean; + baseURL: string; + modelId: string; +} + async function validateAndConfirmPricing( models: string[], pricingMap: ReturnType, @@ -139,7 +146,10 @@ async function selectOptions() { const models = await multiselect({ message: "Select model(s) to benchmark", - options: [{ value: "custom", label: "Custom" }].concat( + options: [ + { value: "custom", label: "Custom" }, + { value: "lmstudio", label: "LM Studio (Local)" }, + ].concat( available_models.models.reduce>( (arr, model) => { if (model.modelType === "language") { @@ -157,6 +167,65 @@ async function selectOptions() { process.exit(0); } + // Handle LM Studio selection + let lmstudioConfig: LMStudioConfig = { + enabled: false, + baseURL: "http://localhost:1234/v1", + modelId: "", + }; + + if (models.includes("lmstudio")) { + note( + "LM Studio uses a local OpenAI-compatible server.\nMake sure you have LM Studio running with a model loaded.\nDefault port is 1234.", + "๐Ÿ–ฅ๏ธ LM Studio Configuration", + ); + + const customUrl = await confirm({ + message: "Use custom LM Studio URL? (default: http://localhost:1234/v1)", + initialValue: false, + }); + + if (isCancel(customUrl)) { + cancel("Operation cancelled."); + process.exit(0); + } + + if (customUrl) { + const baseURL = await text({ + message: "Enter LM Studio server URL", + placeholder: "http://localhost:1234/v1", + }); + + if (isCancel(baseURL)) { + cancel("Operation cancelled."); + process.exit(0); + } + + lmstudioConfig.baseURL = baseURL || "http://localhost:1234/v1"; + } + + const modelId = await text({ + message: "Enter the model ID loaded in LM Studio", + placeholder: "e.g., llama-3.2-1b, qwen2.5-7b-instruct", + }); + + if (isCancel(modelId)) { + cancel("Operation cancelled."); + process.exit(0); + } + + if (!modelId) { + cancel("Model ID is required for LM Studio."); + process.exit(0); + } + + lmstudioConfig = { + enabled: true, + baseURL: lmstudioConfig.baseURL, + modelId, + }; + } + if (models.includes("custom")) { const custom_model = await text({ message: "Enter custom model id", @@ -168,7 +237,14 @@ async function selectOptions() { models.push(custom_model); } - const selectedModels = models.filter((model) => model !== "custom"); + const selectedModels = models.filter( + (model) => model !== "custom" && model !== "lmstudio", + ); + + // Add LM Studio as a model identifier if enabled + if (lmstudioConfig.enabled) { + selectedModels.push(`lmstudio/${lmstudioConfig.modelId}`); + } const pricing = await validateAndConfirmPricing(selectedModels, pricingMap); @@ -233,6 +309,7 @@ async function selectOptions() { mcp, testingTool, pricing, + lmstudioConfig, }; } @@ -246,6 +323,19 @@ function parseCommandString(commandString: string): { return { command, args }; } +function getModelForId( + modelId: string, + lmstudioConfig: LMStudioConfig, +): LanguageModel { + if (modelId.startsWith("lmstudio/")) { + const lmstudioModelId = modelId.replace("lmstudio/", ""); + const provider = createLMStudioProvider(lmstudioConfig.baseURL); + return provider(lmstudioModelId); + } + + return gateway.languageModel(modelId); +} + async function runSingleTest( test: TestDefinition, model: LanguageModel, @@ -375,7 +465,8 @@ async function runSingleTest( } async function main() { - const { models, mcp, testingTool, pricing } = await selectOptions(); + const { models, mcp, testingTool, pricing, lmstudioConfig } = + await selectOptions(); const mcpServerUrl = mcp; const mcpEnabled = !!mcp; @@ -408,6 +499,9 @@ async function main() { ); } else { console.log(` ${modelId}`); + if (modelId.startsWith("lmstudio/")) { + console.log(` ๐Ÿ–ฅ๏ธ Local model via LM Studio`); + } } } @@ -427,6 +521,10 @@ async function main() { `๐Ÿงช TestComponent Tool: ${testComponentEnabled ? "Enabled" : "Disabled"}`, ); + if (lmstudioConfig.enabled) { + console.log(`๐Ÿ–ฅ๏ธ LM Studio: ${lmstudioConfig.baseURL}`); + } + console.log("\n๐Ÿ“ Discovering tests..."); const tests = discoverTests(); console.log( @@ -486,7 +584,7 @@ async function main() { ); } - const model = gateway.languageModel(modelId); + const model = getModelForId(modelId, lmstudioConfig); const testResults = []; const startTime = Date.now(); @@ -624,6 +722,12 @@ async function main() { pricing: pricingInfo, totalCost, cacheSimulation, + lmstudio: modelId.startsWith("lmstudio/") + ? { + baseURL: lmstudioConfig.baseURL, + modelId: lmstudioConfig.modelId, + } + : null, }, }; diff --git a/lib/providers/index.ts b/lib/providers/index.ts new file mode 100644 index 0000000..fb54a7e --- /dev/null +++ b/lib/providers/index.ts @@ -0,0 +1 @@ +export { createLMStudioProvider, lmstudio } from "./lmstudio.ts"; diff --git a/lib/providers/lmstudio.ts b/lib/providers/lmstudio.ts new file mode 100644 index 0000000..6cce9a4 --- /dev/null +++ b/lib/providers/lmstudio.ts @@ -0,0 +1,25 @@ +import { createOpenAICompatible } from "@ai-sdk/openai-compatible"; + +/** + * Creates an LM Studio provider instance. + * + * LM Studio is a user interface for running local models. + * It contains an OpenAI compatible API server that you can use with the AI SDK. + * You can start the local server under the Local Server tab in the LM Studio UI. + * + * @param baseURL - The base URL of the LM Studio server (default: http://localhost:1234/v1) + * @returns An LM Studio provider instance + */ +export function createLMStudioProvider( + baseURL: string = "http://localhost:1234/v1", +) { + return createOpenAICompatible({ + name: "lmstudio", + baseURL, + }); +} + +/** + * Default LM Studio provider instance using the default port (1234). + */ +export const lmstudio = createLMStudioProvider(); From f35742b055f81206e4508d81fa11192f7acffe42 Mon Sep 17 00:00:00 2001 From: Stanislav Khromov Date: Sun, 14 Dec 2025 21:11:26 +0100 Subject: [PATCH 2/8] wip --- index.ts | 249 +++++++++++++++++++++++++------------- lib/providers/index.ts | 7 +- lib/providers/lmstudio.ts | 46 +++++++ 3 files changed, 217 insertions(+), 85 deletions(-) diff --git a/index.ts b/index.ts index 18e7c07..63ab913 100644 --- a/index.ts +++ b/index.ts @@ -26,7 +26,10 @@ import { formatCost, formatMTokCost, } from "./lib/pricing.ts"; -import { createLMStudioProvider } from "./lib/providers/lmstudio.ts"; +import { + createLMStudioProvider, + fetchLMStudioModels, +} from "./lib/providers/lmstudio.ts"; import type { LanguageModel } from "ai"; import { intro, @@ -37,13 +40,15 @@ import { select, confirm, note, + spinner, } from "@clack/prompts"; import { gateway } from "ai"; -interface LMStudioConfig { - enabled: boolean; - baseURL: string; - modelId: string; +type ProviderType = "gateway" | "lmstudio"; + +interface ProviderConfig { + type: ProviderType; + lmstudioBaseURL?: string; } async function validateAndConfirmPricing( @@ -137,49 +142,30 @@ async function validateAndConfirmPricing( } } -async function selectOptions() { - intro("๐Ÿš€ Svelte AI Bench"); - - const available_models = await gateway.getAvailableModels(); - - const pricingMap = buildPricingMap(available_models.models); - - const models = await multiselect({ - message: "Select model(s) to benchmark", +async function selectProvider(): Promise { + const provider = await select({ + message: "Select model provider", options: [ - { value: "custom", label: "Custom" }, - { value: "lmstudio", label: "LM Studio (Local)" }, - ].concat( - available_models.models.reduce>( - (arr, model) => { - if (model.modelType === "language") { - arr.push({ value: model.id, label: model.name }); - } - return arr; - }, - [], - ), - ), + { + value: "gateway", + label: "Vercel AI Gateway", + hint: "Cloud-hosted models via Vercel", + }, + { + value: "lmstudio", + label: "LM Studio", + hint: "Local models via LM Studio", + }, + ], + initialValue: "gateway", }); - if (isCancel(models)) { + if (isCancel(provider)) { cancel("Operation cancelled."); process.exit(0); } - // Handle LM Studio selection - let lmstudioConfig: LMStudioConfig = { - enabled: false, - baseURL: "http://localhost:1234/v1", - modelId: "", - }; - - if (models.includes("lmstudio")) { - note( - "LM Studio uses a local OpenAI-compatible server.\nMake sure you have LM Studio running with a model loaded.\nDefault port is 1234.", - "๐Ÿ–ฅ๏ธ LM Studio Configuration", - ); - + if (provider === "lmstudio") { const customUrl = await confirm({ message: "Use custom LM Studio URL? (default: http://localhost:1234/v1)", initialValue: false, @@ -190,40 +176,51 @@ async function selectOptions() { process.exit(0); } + let baseURL = "http://localhost:1234/v1"; + if (customUrl) { - const baseURL = await text({ + const urlInput = await text({ message: "Enter LM Studio server URL", placeholder: "http://localhost:1234/v1", }); - if (isCancel(baseURL)) { + if (isCancel(urlInput)) { cancel("Operation cancelled."); process.exit(0); } - lmstudioConfig.baseURL = baseURL || "http://localhost:1234/v1"; + baseURL = urlInput || "http://localhost:1234/v1"; } - const modelId = await text({ - message: "Enter the model ID loaded in LM Studio", - placeholder: "e.g., llama-3.2-1b, qwen2.5-7b-instruct", - }); + return { type: "lmstudio", lmstudioBaseURL: baseURL }; + } - if (isCancel(modelId)) { - cancel("Operation cancelled."); - process.exit(0); - } + return { type: "gateway" }; +} - if (!modelId) { - cancel("Model ID is required for LM Studio."); - process.exit(0); - } +async function selectModelsFromGateway( + pricingMap: ReturnType, +) { + const available_models = await gateway.getAvailableModels(); - lmstudioConfig = { - enabled: true, - baseURL: lmstudioConfig.baseURL, - modelId, - }; + const models = await multiselect({ + message: "Select model(s) to benchmark", + options: [{ value: "custom", label: "Custom" }].concat( + available_models.models.reduce>( + (arr, model) => { + if (model.modelType === "language") { + arr.push({ value: model.id, label: model.name }); + } + return arr; + }, + [], + ), + ), + }); + + if (isCancel(models)) { + cancel("Operation cancelled."); + process.exit(0); } if (models.includes("custom")) { @@ -237,16 +234,96 @@ async function selectOptions() { models.push(custom_model); } - const selectedModels = models.filter( - (model) => model !== "custom" && model !== "lmstudio", - ); + const selectedModels = models.filter((model) => model !== "custom"); - // Add LM Studio as a model identifier if enabled - if (lmstudioConfig.enabled) { - selectedModels.push(`lmstudio/${lmstudioConfig.modelId}`); + const pricing = await validateAndConfirmPricing(selectedModels, pricingMap); + + return { selectedModels, pricing }; +} + +async function selectModelsFromLMStudio(baseURL: string) { + const s = spinner(); + s.start("Connecting to LM Studio..."); + + const lmstudioModels = await fetchLMStudioModels(baseURL); + + if (lmstudioModels === null) { + s.stop("Failed to connect to LM Studio"); + note( + `Could not connect to LM Studio at ${baseURL}\n\nMake sure:\n1. LM Studio is running\n2. A model is loaded\n3. The local server is started (Local Server tab โ†’ Start Server)`, + "โŒ Connection Failed", + ); + cancel("Cannot proceed without LM Studio connection."); + process.exit(1); } - const pricing = await validateAndConfirmPricing(selectedModels, pricingMap); + if (lmstudioModels.length === 0) { + s.stop("No models found"); + note( + `LM Studio is running but no models are loaded.\n\nPlease load a model in LM Studio and try again.`, + "โš ๏ธ No Models Available", + ); + cancel("Cannot proceed without loaded models."); + process.exit(1); + } + + s.stop(`Found ${lmstudioModels.length} model(s)`); + + const models = await multiselect({ + message: "Select model(s) to benchmark", + options: lmstudioModels.map((model) => ({ + value: model.id, + label: model.id, + hint: model.owned_by !== "unknown" ? `by ${model.owned_by}` : undefined, + })), + }); + + if (isCancel(models)) { + cancel("Operation cancelled."); + process.exit(0); + } + + // LM Studio models are free (local), so no pricing + const pricing = { + enabled: false, + lookups: new Map>(), + }; + + // Prefix with lmstudio/ for identification + const selectedModels = models.map((m) => `lmstudio/${m}`); + + return { selectedModels, pricing }; +} + +async function selectOptions() { + intro("๐Ÿš€ Svelte AI Bench"); + + // Step 1: Select provider + const providerConfig = await selectProvider(); + + // Get pricing map for gateway (needed even if using LM Studio, for the type) + let pricingMap: ReturnType; + let selectedModels: string[]; + let pricing: { + enabled: boolean; + lookups: Map>; + }; + + // Step 2: Select models based on provider + if (providerConfig.type === "gateway") { + const available_models = await gateway.getAvailableModels(); + pricingMap = buildPricingMap(available_models.models); + const result = await selectModelsFromGateway(pricingMap); + selectedModels = result.selectedModels; + pricing = result.pricing; + } else { + pricingMap = buildPricingMap([]); + const result = await selectModelsFromLMStudio( + providerConfig.lmstudioBaseURL!, + ); + selectedModels = result.selectedModels; + pricing = result.pricing; + } const mcp_integration = await select({ message: "Which MCP integration to use?", @@ -309,7 +386,7 @@ async function selectOptions() { mcp, testingTool, pricing, - lmstudioConfig, + providerConfig, }; } @@ -325,11 +402,11 @@ function parseCommandString(commandString: string): { function getModelForId( modelId: string, - lmstudioConfig: LMStudioConfig, + providerConfig: ProviderConfig, ): LanguageModel { if (modelId.startsWith("lmstudio/")) { const lmstudioModelId = modelId.replace("lmstudio/", ""); - const provider = createLMStudioProvider(lmstudioConfig.baseURL); + const provider = createLMStudioProvider(providerConfig.lmstudioBaseURL); return provider(lmstudioModelId); } @@ -465,7 +542,7 @@ async function runSingleTest( } async function main() { - const { models, mcp, testingTool, pricing, lmstudioConfig } = + const { models, mcp, testingTool, pricing, providerConfig } = await selectOptions(); const mcpServerUrl = mcp; @@ -480,6 +557,13 @@ async function main() { console.log("โ•‘ SvelteBench 2.0 - Multi-Test โ•‘"); console.log("โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•"); + console.log( + `\n๐Ÿ”Œ Provider: ${providerConfig.type === "gateway" ? "Vercel AI Gateway" : "LM Studio"}`, + ); + if (providerConfig.type === "lmstudio" && providerConfig.lmstudioBaseURL) { + console.log(` URL: ${providerConfig.lmstudioBaseURL}`); + } + console.log("\n๐Ÿ“‹ Models:"); for (const modelId of models) { const lookup = pricing.lookups.get(modelId); @@ -500,7 +584,7 @@ async function main() { } else { console.log(` ${modelId}`); if (modelId.startsWith("lmstudio/")) { - console.log(` ๐Ÿ–ฅ๏ธ Local model via LM Studio`); + console.log(` ๐Ÿ–ฅ๏ธ Local model (free)`); } } } @@ -521,10 +605,6 @@ async function main() { `๐Ÿงช TestComponent Tool: ${testComponentEnabled ? "Enabled" : "Disabled"}`, ); - if (lmstudioConfig.enabled) { - console.log(`๐Ÿ–ฅ๏ธ LM Studio: ${lmstudioConfig.baseURL}`); - } - console.log("\n๐Ÿ“ Discovering tests..."); const tests = discoverTests(); console.log( @@ -584,7 +664,7 @@ async function main() { ); } - const model = getModelForId(modelId, lmstudioConfig); + const model = getModelForId(modelId, providerConfig); const testResults = []; const startTime = Date.now(); @@ -718,16 +798,17 @@ async function main() { mcpTransportType: mcpEnabled ? mcpTransportType : null, timestamp: new Date().toISOString(), model: modelId, + provider: providerConfig.type, pricingKey: pricingLookup?.matchedKey ?? null, pricing: pricingInfo, totalCost, cacheSimulation, - lmstudio: modelId.startsWith("lmstudio/") - ? { - baseURL: lmstudioConfig.baseURL, - modelId: lmstudioConfig.modelId, - } - : null, + lmstudio: + providerConfig.type === "lmstudio" + ? { + baseURL: providerConfig.lmstudioBaseURL, + } + : null, }, }; diff --git a/lib/providers/index.ts b/lib/providers/index.ts index fb54a7e..a0ca9d6 100644 --- a/lib/providers/index.ts +++ b/lib/providers/index.ts @@ -1 +1,6 @@ -export { createLMStudioProvider, lmstudio } from "./lmstudio.ts"; +export { + createLMStudioProvider, + lmstudio, + fetchLMStudioModels, + type LMStudioModel, +} from "./lmstudio.ts"; diff --git a/lib/providers/lmstudio.ts b/lib/providers/lmstudio.ts index 6cce9a4..a07e0e5 100644 --- a/lib/providers/lmstudio.ts +++ b/lib/providers/lmstudio.ts @@ -23,3 +23,49 @@ export function createLMStudioProvider( * Default LM Studio provider instance using the default port (1234). */ export const lmstudio = createLMStudioProvider(); + +/** + * Model information returned from LM Studio's /v1/models endpoint + */ +export interface LMStudioModel { + id: string; + object: string; + owned_by: string; +} + +interface LMStudioModelsResponse { + object: string; + data: LMStudioModel[]; +} + +/** + * Fetches available models from an LM Studio server. + * + * @param baseURL - The base URL of the LM Studio server (default: http://localhost:1234/v1) + * @returns Array of available model IDs, or null if the server is not reachable + */ +export async function fetchLMStudioModels( + baseURL: string = "http://localhost:1234/v1", +): Promise { + try { + const response = await fetch(`${baseURL}/models`, { + method: "GET", + headers: { + "Content-Type": "application/json", + }, + }); + + if (!response.ok) { + console.error( + `Failed to fetch LM Studio models: ${response.status} ${response.statusText}`, + ); + return null; + } + + const data = (await response.json()) as LMStudioModelsResponse; + return data.data || []; + } catch (error) { + // Server not running or not reachable + return null; + } +} From b44968c8044990dc7027e41968e2fcecb59ed583 Mon Sep 17 00:00:00 2001 From: Stanislav Khromov Date: Sun, 14 Dec 2025 21:26:28 +0100 Subject: [PATCH 3/8] wip --- index.ts | 120 +++++++--------------------------- lib/providers/index.ts | 5 ++ lib/providers/lmstudio.ts | 134 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 161 insertions(+), 98 deletions(-) diff --git a/index.ts b/index.ts index 63ab913..c516af5 100644 --- a/index.ts +++ b/index.ts @@ -27,8 +27,11 @@ import { formatMTokCost, } from "./lib/pricing.ts"; import { - createLMStudioProvider, - fetchLMStudioModels, + configureLMStudio, + selectModelsFromLMStudio, + getLMStudioModel, + isLMStudioModel, + type LMStudioConfig, } from "./lib/providers/lmstudio.ts"; import type { LanguageModel } from "ai"; import { @@ -40,7 +43,6 @@ import { select, confirm, note, - spinner, } from "@clack/prompts"; import { gateway } from "ai"; @@ -48,7 +50,7 @@ type ProviderType = "gateway" | "lmstudio"; interface ProviderConfig { type: ProviderType; - lmstudioBaseURL?: string; + lmstudio?: LMStudioConfig; } async function validateAndConfirmPricing( @@ -166,33 +168,8 @@ async function selectProvider(): Promise { } if (provider === "lmstudio") { - const customUrl = await confirm({ - message: "Use custom LM Studio URL? (default: http://localhost:1234/v1)", - initialValue: false, - }); - - if (isCancel(customUrl)) { - cancel("Operation cancelled."); - process.exit(0); - } - - let baseURL = "http://localhost:1234/v1"; - - if (customUrl) { - const urlInput = await text({ - message: "Enter LM Studio server URL", - placeholder: "http://localhost:1234/v1", - }); - - if (isCancel(urlInput)) { - cancel("Operation cancelled."); - process.exit(0); - } - - baseURL = urlInput || "http://localhost:1234/v1"; - } - - return { type: "lmstudio", lmstudioBaseURL: baseURL }; + const lmstudioConfig = await configureLMStudio(); + return { type: "lmstudio", lmstudio: lmstudioConfig }; } return { type: "gateway" }; @@ -241,60 +218,6 @@ async function selectModelsFromGateway( return { selectedModels, pricing }; } -async function selectModelsFromLMStudio(baseURL: string) { - const s = spinner(); - s.start("Connecting to LM Studio..."); - - const lmstudioModels = await fetchLMStudioModels(baseURL); - - if (lmstudioModels === null) { - s.stop("Failed to connect to LM Studio"); - note( - `Could not connect to LM Studio at ${baseURL}\n\nMake sure:\n1. LM Studio is running\n2. A model is loaded\n3. The local server is started (Local Server tab โ†’ Start Server)`, - "โŒ Connection Failed", - ); - cancel("Cannot proceed without LM Studio connection."); - process.exit(1); - } - - if (lmstudioModels.length === 0) { - s.stop("No models found"); - note( - `LM Studio is running but no models are loaded.\n\nPlease load a model in LM Studio and try again.`, - "โš ๏ธ No Models Available", - ); - cancel("Cannot proceed without loaded models."); - process.exit(1); - } - - s.stop(`Found ${lmstudioModels.length} model(s)`); - - const models = await multiselect({ - message: "Select model(s) to benchmark", - options: lmstudioModels.map((model) => ({ - value: model.id, - label: model.id, - hint: model.owned_by !== "unknown" ? `by ${model.owned_by}` : undefined, - })), - }); - - if (isCancel(models)) { - cancel("Operation cancelled."); - process.exit(0); - } - - // LM Studio models are free (local), so no pricing - const pricing = { - enabled: false, - lookups: new Map>(), - }; - - // Prefix with lmstudio/ for identification - const selectedModels = models.map((m) => `lmstudio/${m}`); - - return { selectedModels, pricing }; -} - async function selectOptions() { intro("๐Ÿš€ Svelte AI Bench"); @@ -318,11 +241,14 @@ async function selectOptions() { pricing = result.pricing; } else { pricingMap = buildPricingMap([]); - const result = await selectModelsFromLMStudio( - providerConfig.lmstudioBaseURL!, + selectedModels = await selectModelsFromLMStudio( + providerConfig.lmstudio!.baseURL, ); - selectedModels = result.selectedModels; - pricing = result.pricing; + // LM Studio models are free (local), so no pricing + pricing = { + enabled: false, + lookups: new Map>(), + }; } const mcp_integration = await select({ @@ -404,10 +330,8 @@ function getModelForId( modelId: string, providerConfig: ProviderConfig, ): LanguageModel { - if (modelId.startsWith("lmstudio/")) { - const lmstudioModelId = modelId.replace("lmstudio/", ""); - const provider = createLMStudioProvider(providerConfig.lmstudioBaseURL); - return provider(lmstudioModelId); + if (isLMStudioModel(modelId)) { + return getLMStudioModel(modelId, providerConfig.lmstudio?.baseURL); } return gateway.languageModel(modelId); @@ -560,8 +484,8 @@ async function main() { console.log( `\n๐Ÿ”Œ Provider: ${providerConfig.type === "gateway" ? "Vercel AI Gateway" : "LM Studio"}`, ); - if (providerConfig.type === "lmstudio" && providerConfig.lmstudioBaseURL) { - console.log(` URL: ${providerConfig.lmstudioBaseURL}`); + if (providerConfig.type === "lmstudio" && providerConfig.lmstudio) { + console.log(` URL: ${providerConfig.lmstudio.baseURL}`); } console.log("\n๐Ÿ“‹ Models:"); @@ -583,7 +507,7 @@ async function main() { ); } else { console.log(` ${modelId}`); - if (modelId.startsWith("lmstudio/")) { + if (isLMStudioModel(modelId)) { console.log(` ๐Ÿ–ฅ๏ธ Local model (free)`); } } @@ -804,9 +728,9 @@ async function main() { totalCost, cacheSimulation, lmstudio: - providerConfig.type === "lmstudio" + providerConfig.type === "lmstudio" && providerConfig.lmstudio ? { - baseURL: providerConfig.lmstudioBaseURL, + baseURL: providerConfig.lmstudio.baseURL, } : null, }, diff --git a/lib/providers/index.ts b/lib/providers/index.ts index a0ca9d6..9493dac 100644 --- a/lib/providers/index.ts +++ b/lib/providers/index.ts @@ -2,5 +2,10 @@ export { createLMStudioProvider, lmstudio, fetchLMStudioModels, + configureLMStudio, + selectModelsFromLMStudio, + getLMStudioModel, + isLMStudioModel, type LMStudioModel, + type LMStudioConfig, } from "./lmstudio.ts"; diff --git a/lib/providers/lmstudio.ts b/lib/providers/lmstudio.ts index a07e0e5..2a01d94 100644 --- a/lib/providers/lmstudio.ts +++ b/lib/providers/lmstudio.ts @@ -1,4 +1,14 @@ import { createOpenAICompatible } from "@ai-sdk/openai-compatible"; +import { + multiselect, + isCancel, + cancel, + confirm, + text, + spinner, + note, +} from "@clack/prompts"; +import type { LanguageModel } from "ai"; /** * Creates an LM Studio provider instance. @@ -38,6 +48,13 @@ interface LMStudioModelsResponse { data: LMStudioModel[]; } +/** + * LM Studio configuration + */ +export interface LMStudioConfig { + baseURL: string; +} + /** * Fetches available models from an LM Studio server. * @@ -69,3 +86,120 @@ export async function fetchLMStudioModels( return null; } } + +/** + * Prompts the user to configure LM Studio connection settings. + * + * @returns LM Studio configuration with base URL + */ +export async function configureLMStudio(): Promise { + const customUrl = await confirm({ + message: "Use custom LM Studio URL? (default: http://localhost:1234/v1)", + initialValue: false, + }); + + if (isCancel(customUrl)) { + cancel("Operation cancelled."); + process.exit(0); + } + + let baseURL = "http://localhost:1234/v1"; + + if (customUrl) { + const urlInput = await text({ + message: "Enter LM Studio server URL", + placeholder: "http://localhost:1234/v1", + }); + + if (isCancel(urlInput)) { + cancel("Operation cancelled."); + process.exit(0); + } + + baseURL = urlInput || "http://localhost:1234/v1"; + } + + return { baseURL }; +} + +/** + * Connects to LM Studio and prompts the user to select models. + * + * @param baseURL - The base URL of the LM Studio server + * @returns Array of selected model IDs (prefixed with "lmstudio/") + */ +export async function selectModelsFromLMStudio( + baseURL: string, +): Promise { + const s = spinner(); + s.start("Connecting to LM Studio..."); + + const lmstudioModels = await fetchLMStudioModels(baseURL); + + if (lmstudioModels === null) { + s.stop("Failed to connect to LM Studio"); + note( + `Could not connect to LM Studio at ${baseURL}\n\nMake sure:\n1. LM Studio is running\n2. A model is loaded\n3. The local server is started (Local Server tab โ†’ Start Server)`, + "โŒ Connection Failed", + ); + cancel("Cannot proceed without LM Studio connection."); + process.exit(1); + } + + if (lmstudioModels.length === 0) { + s.stop("No models found"); + note( + `LM Studio is running but no models are loaded.\n\nPlease load a model in LM Studio and try again.`, + "โš ๏ธ No Models Available", + ); + cancel("Cannot proceed without loaded models."); + process.exit(1); + } + + s.stop(`Found ${lmstudioModels.length} model(s)`); + + const models = await multiselect({ + message: "Select model(s) to benchmark", + options: lmstudioModels.map((model) => ({ + value: model.id, + label: model.id, + hint: model.owned_by !== "unknown" ? `by ${model.owned_by}` : undefined, + })), + }); + + if (isCancel(models)) { + cancel("Operation cancelled."); + process.exit(0); + } + + // Prefix with lmstudio/ for identification + return models.map((m) => `lmstudio/${m}`); +} + +/** + * Gets a language model instance for an LM Studio model ID. + * + * @param modelId - The model ID (with or without "lmstudio/" prefix) + * @param baseURL - The base URL of the LM Studio server + * @returns A LanguageModel instance + */ +export function getLMStudioModel( + modelId: string, + baseURL?: string, +): LanguageModel { + const actualModelId = modelId.startsWith("lmstudio/") + ? modelId.replace("lmstudio/", "") + : modelId; + const provider = createLMStudioProvider(baseURL); + return provider(actualModelId); +} + +/** + * Checks if a model ID is an LM Studio model. + * + * @param modelId - The model ID to check + * @returns True if the model ID starts with "lmstudio/" + */ +export function isLMStudioModel(modelId: string): boolean { + return modelId.startsWith("lmstudio/"); +} From cef065dc9113a2b3cff0850bd43d0bd4d693e13f Mon Sep 17 00:00:00 2001 From: Stanislav Khromov Date: Sun, 14 Dec 2025 21:27:34 +0100 Subject: [PATCH 4/8] cleanup --- lib/providers/index.ts | 11 -------- lib/providers/lmstudio.ts | 53 +-------------------------------------- 2 files changed, 1 insertion(+), 63 deletions(-) delete mode 100644 lib/providers/index.ts diff --git a/lib/providers/index.ts b/lib/providers/index.ts deleted file mode 100644 index 9493dac..0000000 --- a/lib/providers/index.ts +++ /dev/null @@ -1,11 +0,0 @@ -export { - createLMStudioProvider, - lmstudio, - fetchLMStudioModels, - configureLMStudio, - selectModelsFromLMStudio, - getLMStudioModel, - isLMStudioModel, - type LMStudioModel, - type LMStudioConfig, -} from "./lmstudio.ts"; diff --git a/lib/providers/lmstudio.ts b/lib/providers/lmstudio.ts index 2a01d94..f763d5b 100644 --- a/lib/providers/lmstudio.ts +++ b/lib/providers/lmstudio.ts @@ -10,16 +10,6 @@ import { } from "@clack/prompts"; import type { LanguageModel } from "ai"; -/** - * Creates an LM Studio provider instance. - * - * LM Studio is a user interface for running local models. - * It contains an OpenAI compatible API server that you can use with the AI SDK. - * You can start the local server under the Local Server tab in the LM Studio UI. - * - * @param baseURL - The base URL of the LM Studio server (default: http://localhost:1234/v1) - * @returns An LM Studio provider instance - */ export function createLMStudioProvider( baseURL: string = "http://localhost:1234/v1", ) { @@ -29,14 +19,8 @@ export function createLMStudioProvider( }); } -/** - * Default LM Studio provider instance using the default port (1234). - */ export const lmstudio = createLMStudioProvider(); -/** - * Model information returned from LM Studio's /v1/models endpoint - */ export interface LMStudioModel { id: string; object: string; @@ -48,19 +32,10 @@ interface LMStudioModelsResponse { data: LMStudioModel[]; } -/** - * LM Studio configuration - */ export interface LMStudioConfig { baseURL: string; } -/** - * Fetches available models from an LM Studio server. - * - * @param baseURL - The base URL of the LM Studio server (default: http://localhost:1234/v1) - * @returns Array of available model IDs, or null if the server is not reachable - */ export async function fetchLMStudioModels( baseURL: string = "http://localhost:1234/v1", ): Promise { @@ -81,17 +56,11 @@ export async function fetchLMStudioModels( const data = (await response.json()) as LMStudioModelsResponse; return data.data || []; - } catch (error) { - // Server not running or not reachable + } catch { return null; } } -/** - * Prompts the user to configure LM Studio connection settings. - * - * @returns LM Studio configuration with base URL - */ export async function configureLMStudio(): Promise { const customUrl = await confirm({ message: "Use custom LM Studio URL? (default: http://localhost:1234/v1)", @@ -122,12 +91,6 @@ export async function configureLMStudio(): Promise { return { baseURL }; } -/** - * Connects to LM Studio and prompts the user to select models. - * - * @param baseURL - The base URL of the LM Studio server - * @returns Array of selected model IDs (prefixed with "lmstudio/") - */ export async function selectModelsFromLMStudio( baseURL: string, ): Promise { @@ -172,17 +135,9 @@ export async function selectModelsFromLMStudio( process.exit(0); } - // Prefix with lmstudio/ for identification return models.map((m) => `lmstudio/${m}`); } -/** - * Gets a language model instance for an LM Studio model ID. - * - * @param modelId - The model ID (with or without "lmstudio/" prefix) - * @param baseURL - The base URL of the LM Studio server - * @returns A LanguageModel instance - */ export function getLMStudioModel( modelId: string, baseURL?: string, @@ -194,12 +149,6 @@ export function getLMStudioModel( return provider(actualModelId); } -/** - * Checks if a model ID is an LM Studio model. - * - * @param modelId - The model ID to check - * @returns True if the model ID starts with "lmstudio/" - */ export function isLMStudioModel(modelId: string): boolean { return modelId.startsWith("lmstudio/"); } From 9173cee922b2932e72c89aefb3e8774fb9d1985e Mon Sep 17 00:00:00 2001 From: Stanislav Khromov Date: Sun, 14 Dec 2025 21:28:47 +0100 Subject: [PATCH 5/8] Update index.ts --- index.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/index.ts b/index.ts index c516af5..12fa465 100644 --- a/index.ts +++ b/index.ts @@ -221,7 +221,6 @@ async function selectModelsFromGateway( async function selectOptions() { intro("๐Ÿš€ Svelte AI Bench"); - // Step 1: Select provider const providerConfig = await selectProvider(); // Get pricing map for gateway (needed even if using LM Studio, for the type) @@ -232,7 +231,6 @@ async function selectOptions() { lookups: Map>; }; - // Step 2: Select models based on provider if (providerConfig.type === "gateway") { const available_models = await gateway.getAvailableModels(); pricingMap = buildPricingMap(available_models.models); From 712b985131661953df953481e2a0d26eec9c3668 Mon Sep 17 00:00:00 2001 From: Stanislav Khromov Date: Sun, 14 Dec 2025 21:39:29 +0100 Subject: [PATCH 6/8] refactor --- index.ts | 177 +++--------------------------------- lib/providers/ai-gateway.ts | 155 +++++++++++++++++++++++++++++++ 2 files changed, 170 insertions(+), 162 deletions(-) create mode 100644 lib/providers/ai-gateway.ts diff --git a/index.ts b/index.ts index 12fa465..b925708 100644 --- a/index.ts +++ b/index.ts @@ -19,13 +19,15 @@ import { runTestVerification, } from "./lib/output-test-runner.ts"; import { resultWriteTool, testComponentTool } from "./lib/tools/index.ts"; +import { getModelPricingDisplay, formatCost, formatMTokCost } from "./lib/pricing.ts"; import { - buildPricingMap, - lookupPricingFromMap, - getModelPricingDisplay, - formatCost, - formatMTokCost, -} from "./lib/pricing.ts"; + gateway, + getGatewayModelsAndPricing, + selectModelsFromGateway, + type PricingMap, + type PricingLookup, + type PricingResult, +} from "./lib/providers/ai-gateway.ts"; import { configureLMStudio, selectModelsFromLMStudio, @@ -34,17 +36,8 @@ import { type LMStudioConfig, } from "./lib/providers/lmstudio.ts"; import type { LanguageModel } from "ai"; -import { - intro, - multiselect, - isCancel, - cancel, - text, - select, - confirm, - note, -} from "@clack/prompts"; -import { gateway } from "ai"; +import { intro, isCancel, cancel, select, confirm, text } from "@clack/prompts"; +import { buildPricingMap } from "./lib/pricing.ts"; type ProviderType = "gateway" | "lmstudio"; @@ -53,97 +46,6 @@ interface ProviderConfig { lmstudio?: LMStudioConfig; } -async function validateAndConfirmPricing( - models: string[], - pricingMap: ReturnType, -) { - const lookups = new Map>(); - - for (const modelId of models) { - const lookup = lookupPricingFromMap(modelId, pricingMap); - lookups.set(modelId, lookup); - } - - const modelsWithPricing = models.filter((m) => lookups.get(m) !== null); - const modelsWithoutPricing = models.filter((m) => lookups.get(m) === null); - - if (modelsWithoutPricing.length === 0) { - const pricingLines = models.map((modelId) => { - const lookup = lookups.get(modelId)!; - const display = getModelPricingDisplay(lookup.pricing); - const cacheReadText = - display.cacheReadCostPerMTok !== undefined - ? `, ${formatMTokCost(display.cacheReadCostPerMTok)}/MTok cache read` - : ""; - const cacheWriteText = - display.cacheCreationCostPerMTok !== undefined - ? `, ${formatMTokCost(display.cacheCreationCostPerMTok)}/MTok cache write` - : ""; - return `${modelId}\n โ†’ ${formatMTokCost(display.inputCostPerMTok)}/MTok in, ${formatMTokCost(display.outputCostPerMTok)}/MTok out${cacheReadText}${cacheWriteText}`; - }); - - note(pricingLines.join("\n\n"), "๐Ÿ’ฐ Pricing Found"); - - const usePricing = await confirm({ - message: "Enable cost calculation?", - initialValue: true, - }); - - if (isCancel(usePricing)) { - cancel("Operation cancelled."); - process.exit(0); - } - - return { enabled: usePricing, lookups }; - } else { - const lines: string[] = []; - - if (modelsWithoutPricing.length > 0) { - lines.push("No pricing found for:"); - for (const modelId of modelsWithoutPricing) { - lines.push(` โœ— ${modelId}`); - } - } - - if (modelsWithPricing.length > 0) { - lines.push(""); - lines.push("Pricing available for:"); - for (const modelId of modelsWithPricing) { - const lookup = lookups.get(modelId)!; - const display = getModelPricingDisplay(lookup.pricing); - const cacheReadText = - display.cacheReadCostPerMTok !== undefined - ? `, ${formatMTokCost(display.cacheReadCostPerMTok)}/MTok cache read` - : ""; - const cacheWriteText = - display.cacheCreationCostPerMTok !== undefined - ? `, ${formatMTokCost(display.cacheCreationCostPerMTok)}/MTok cache write` - : ""; - lines.push( - ` โœ“ ${modelId} (${formatMTokCost(display.inputCostPerMTok)}/MTok in, ${formatMTokCost(display.outputCostPerMTok)}/MTok out${cacheReadText}${cacheWriteText})`, - ); - } - } - - lines.push(""); - lines.push("Cost calculation will be disabled."); - - note(lines.join("\n"), "โš ๏ธ Pricing Incomplete"); - - const proceed = await confirm({ - message: "Continue without pricing?", - initialValue: true, - }); - - if (isCancel(proceed) || !proceed) { - cancel("Operation cancelled."); - process.exit(0); - } - - return { enabled: false, lookups }; - } -} - async function selectProvider(): Promise { const provider = await select({ message: "Select model provider", @@ -175,65 +77,18 @@ async function selectProvider(): Promise { return { type: "gateway" }; } -async function selectModelsFromGateway( - pricingMap: ReturnType, -) { - const available_models = await gateway.getAvailableModels(); - - const models = await multiselect({ - message: "Select model(s) to benchmark", - options: [{ value: "custom", label: "Custom" }].concat( - available_models.models.reduce>( - (arr, model) => { - if (model.modelType === "language") { - arr.push({ value: model.id, label: model.name }); - } - return arr; - }, - [], - ), - ), - }); - - if (isCancel(models)) { - cancel("Operation cancelled."); - process.exit(0); - } - - if (models.includes("custom")) { - const custom_model = await text({ - message: "Enter custom model id", - }); - if (isCancel(custom_model)) { - cancel("Operation cancelled."); - process.exit(0); - } - models.push(custom_model); - } - - const selectedModels = models.filter((model) => model !== "custom"); - - const pricing = await validateAndConfirmPricing(selectedModels, pricingMap); - - return { selectedModels, pricing }; -} - async function selectOptions() { intro("๐Ÿš€ Svelte AI Bench"); const providerConfig = await selectProvider(); - // Get pricing map for gateway (needed even if using LM Studio, for the type) - let pricingMap: ReturnType; + let pricingMap: PricingMap; let selectedModels: string[]; - let pricing: { - enabled: boolean; - lookups: Map>; - }; + let pricing: PricingResult; if (providerConfig.type === "gateway") { - const available_models = await gateway.getAvailableModels(); - pricingMap = buildPricingMap(available_models.models); + const gatewayData = await getGatewayModelsAndPricing(); + pricingMap = gatewayData.pricingMap; const result = await selectModelsFromGateway(pricingMap); selectedModels = result.selectedModels; pricing = result.pricing; @@ -242,10 +97,9 @@ async function selectOptions() { selectedModels = await selectModelsFromLMStudio( providerConfig.lmstudio!.baseURL, ); - // LM Studio models are free (local), so no pricing pricing = { enabled: false, - lookups: new Map>(), + lookups: new Map(), }; } @@ -666,7 +520,6 @@ async function main() { } console.log(`Total cost: ${formatCost(totalCost.totalCost)}`); - // Simulate cache savings cacheSimulation = simulateCacheSavings( testResults, pricingLookup.pricing, diff --git a/lib/providers/ai-gateway.ts b/lib/providers/ai-gateway.ts new file mode 100644 index 0000000..8c4b633 --- /dev/null +++ b/lib/providers/ai-gateway.ts @@ -0,0 +1,155 @@ +import { gateway } from "ai"; +import { multiselect, isCancel, cancel, text, confirm, note } from "@clack/prompts"; +import { + buildPricingMap, + lookupPricingFromMap, + getModelPricingDisplay, + formatMTokCost, +} from "../pricing.ts"; + +export { gateway }; + +export type PricingMap = ReturnType; +export type PricingLookup = ReturnType; + +export interface PricingResult { + enabled: boolean; + lookups: Map; +} + +export async function getGatewayModelsAndPricing() { + const available_models = await gateway.getAvailableModels(); + const pricingMap = buildPricingMap(available_models.models); + return { models: available_models.models, pricingMap }; +} + +export async function validateAndConfirmPricing( + models: string[], + pricingMap: PricingMap, +): Promise { + const lookups = new Map(); + + for (const modelId of models) { + const lookup = lookupPricingFromMap(modelId, pricingMap); + lookups.set(modelId, lookup); + } + + const modelsWithPricing = models.filter((m) => lookups.get(m) !== null); + const modelsWithoutPricing = models.filter((m) => lookups.get(m) === null); + + if (modelsWithoutPricing.length === 0) { + const pricingLines = models.map((modelId) => { + const lookup = lookups.get(modelId)!; + const display = getModelPricingDisplay(lookup.pricing); + const cacheReadText = + display.cacheReadCostPerMTok !== undefined + ? `, ${formatMTokCost(display.cacheReadCostPerMTok)}/MTok cache read` + : ""; + const cacheWriteText = + display.cacheCreationCostPerMTok !== undefined + ? `, ${formatMTokCost(display.cacheCreationCostPerMTok)}/MTok cache write` + : ""; + return `${modelId}\n โ†’ ${formatMTokCost(display.inputCostPerMTok)}/MTok in, ${formatMTokCost(display.outputCostPerMTok)}/MTok out${cacheReadText}${cacheWriteText}`; + }); + + note(pricingLines.join("\n\n"), "๐Ÿ’ฐ Pricing Found"); + + const usePricing = await confirm({ + message: "Enable cost calculation?", + initialValue: true, + }); + + if (isCancel(usePricing)) { + cancel("Operation cancelled."); + process.exit(0); + } + + return { enabled: usePricing, lookups }; + } else { + const lines: string[] = []; + + if (modelsWithoutPricing.length > 0) { + lines.push("No pricing found for:"); + for (const modelId of modelsWithoutPricing) { + lines.push(` โœ— ${modelId}`); + } + } + + if (modelsWithPricing.length > 0) { + lines.push(""); + lines.push("Pricing available for:"); + for (const modelId of modelsWithPricing) { + const lookup = lookups.get(modelId)!; + const display = getModelPricingDisplay(lookup.pricing); + const cacheReadText = + display.cacheReadCostPerMTok !== undefined + ? `, ${formatMTokCost(display.cacheReadCostPerMTok)}/MTok cache read` + : ""; + const cacheWriteText = + display.cacheCreationCostPerMTok !== undefined + ? `, ${formatMTokCost(display.cacheCreationCostPerMTok)}/MTok cache write` + : ""; + lines.push( + ` โœ“ ${modelId} (${formatMTokCost(display.inputCostPerMTok)}/MTok in, ${formatMTokCost(display.outputCostPerMTok)}/MTok out${cacheReadText}${cacheWriteText})`, + ); + } + } + + lines.push(""); + lines.push("Cost calculation will be disabled."); + + note(lines.join("\n"), "โš ๏ธ Pricing Incomplete"); + + const proceed = await confirm({ + message: "Continue without pricing?", + initialValue: true, + }); + + if (isCancel(proceed) || !proceed) { + cancel("Operation cancelled."); + process.exit(0); + } + + return { enabled: false, lookups }; + } +} + +export async function selectModelsFromGateway(pricingMap: PricingMap) { + const available_models = await gateway.getAvailableModels(); + + const models = await multiselect({ + message: "Select model(s) to benchmark", + options: [{ value: "custom", label: "Custom" }].concat( + available_models.models.reduce>( + (arr, model) => { + if (model.modelType === "language") { + arr.push({ value: model.id, label: model.name }); + } + return arr; + }, + [], + ), + ), + }); + + if (isCancel(models)) { + cancel("Operation cancelled."); + process.exit(0); + } + + if (models.includes("custom")) { + const custom_model = await text({ + message: "Enter custom model id", + }); + if (isCancel(custom_model)) { + cancel("Operation cancelled."); + process.exit(0); + } + models.push(custom_model); + } + + const selectedModels = models.filter((model) => model !== "custom"); + const pricing = await validateAndConfirmPricing(selectedModels, pricingMap); + + return { selectedModels, pricing }; +} From 64d93b383104e87c4cad28961d2ad313eda53a59 Mon Sep 17 00:00:00 2001 From: Stanislav Khromov Date: Sun, 14 Dec 2025 21:49:55 +0100 Subject: [PATCH 7/8] Update report.ts --- lib/report.ts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/report.ts b/lib/report.ts index a2ff3c2..d2a671d 100644 --- a/lib/report.ts +++ b/lib/report.ts @@ -82,16 +82,22 @@ export interface TotalCostInfo { cachedInputTokens: number; } +interface LMStudioMetadata { + baseURL: string; +} + interface Metadata { mcpEnabled: boolean; mcpServerUrl: string | null; mcpTransportType?: string | null; timestamp: string; model: string; + provider?: "gateway" | "lmstudio"; pricingKey?: string | null; pricing?: PricingInfo | null; totalCost?: TotalCostInfo | null; cacheSimulation?: ReturnType | null; + lmstudio?: LMStudioMetadata | null; } export interface SingleTestResult { From fab439fe55d4c88db52916458112fb90f36cbdf8 Mon Sep 17 00:00:00 2001 From: Stanislav Khromov Date: Sun, 14 Dec 2025 21:51:08 +0100 Subject: [PATCH 8/8] Update ai-gateway.ts --- lib/providers/ai-gateway.ts | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/providers/ai-gateway.ts b/lib/providers/ai-gateway.ts index 8c4b633..4d7eac5 100644 --- a/lib/providers/ai-gateway.ts +++ b/lib/providers/ai-gateway.ts @@ -18,9 +18,9 @@ export interface PricingResult { } export async function getGatewayModelsAndPricing() { - const available_models = await gateway.getAvailableModels(); - const pricingMap = buildPricingMap(available_models.models); - return { models: available_models.models, pricingMap }; + const availableModels = await gateway.getAvailableModels(); + const pricingMap = buildPricingMap(availableModels.models); + return { models: availableModels.models, pricingMap }; } export async function validateAndConfirmPricing( @@ -115,12 +115,12 @@ export async function validateAndConfirmPricing( } export async function selectModelsFromGateway(pricingMap: PricingMap) { - const available_models = await gateway.getAvailableModels(); + const availableModels = await gateway.getAvailableModels(); const models = await multiselect({ message: "Select model(s) to benchmark", options: [{ value: "custom", label: "Custom" }].concat( - available_models.models.reduce>( + availableModels.models.reduce>( (arr, model) => { if (model.modelType === "language") { arr.push({ value: model.id, label: model.name }); @@ -138,14 +138,14 @@ export async function selectModelsFromGateway(pricingMap: PricingMap) { } if (models.includes("custom")) { - const custom_model = await text({ + const customModel = await text({ message: "Enter custom model id", }); - if (isCancel(custom_model)) { + if (isCancel(customModel)) { cancel("Operation cancelled."); process.exit(0); } - models.push(custom_model); + models.push(customModel); } const selectedModels = models.filter((model) => model !== "custom");