From 230b3cc8cd922364fb4fa64cc457d7c8037b996c Mon Sep 17 00:00:00 2001 From: zhangmo8 Date: Tue, 7 Apr 2026 09:14:20 +0800 Subject: [PATCH 1/2] fix(provider): sync db-backed model lists --- .../configPresenter/providerDbLoader.ts | 115 ++++- .../configPresenter/providerDbSupplements.ts | 159 +++++++ .../presenter/llmProviderPresenter/index.ts | 62 ++- .../providers/doubaoProvider.ts | 20 +- .../settings/components/ProviderApiConfig.vue | 27 +- src/renderer/src/i18n/da-DK/settings.json | 9 +- src/renderer/src/i18n/en-US/settings.json | 9 +- src/renderer/src/i18n/fa-IR/settings.json | 9 +- src/renderer/src/i18n/fr-FR/settings.json | 9 +- src/renderer/src/i18n/he-IL/settings.json | 9 +- src/renderer/src/i18n/ja-JP/settings.json | 9 +- src/renderer/src/i18n/ko-KR/settings.json | 9 +- src/renderer/src/i18n/pt-BR/settings.json | 9 +- src/renderer/src/i18n/ru-RU/settings.json | 9 +- src/renderer/src/i18n/zh-CN/settings.json | 9 +- src/renderer/src/i18n/zh-HK/settings.json | 9 +- src/renderer/src/i18n/zh-TW/settings.json | 9 +- src/shared/providerDbCatalog.ts | 9 + .../configPresenter/providerDbLoader.test.ts | 73 +++- .../backgroundModelSync.test.ts | 393 ++++++++++++++++++ .../doubaoProvider.test.ts | 226 ++++++++++ .../components/ProviderApiConfig.test.ts | 78 ++++ 22 files changed, 1232 insertions(+), 38 deletions(-) create mode 100644 src/main/presenter/configPresenter/providerDbSupplements.ts create mode 100644 src/shared/providerDbCatalog.ts create mode 100644 test/main/presenter/llmProviderPresenter/backgroundModelSync.test.ts create mode 100644 test/main/presenter/llmProviderPresenter/doubaoProvider.test.ts diff --git a/src/main/presenter/configPresenter/providerDbLoader.ts b/src/main/presenter/configPresenter/providerDbLoader.ts index 0cc742fcd..80fa80b8e 100644 --- a/src/main/presenter/configPresenter/providerDbLoader.ts +++ b/src/main/presenter/configPresenter/providerDbLoader.ts @@ -8,6 +8,7 @@ import { sanitizeAggregate } from '@shared/types/model-db' import { resolveProviderId } from './providerId' +import { PROVIDER_DB_SUPPLEMENTS } from './providerDbSupplements' import { eventBus, SendTarget } from '@/eventbus' import { PROVIDER_DB_EVENTS } from '@/events' @@ -29,6 +30,108 @@ export type ProviderDbRefreshResult = { message?: string } +const isPlainObject = (value: unknown): value is Record => { + return !!value && typeof value === 'object' && !Array.isArray(value) +} + +const cloneValue = (value: T): T => { + if (typeof structuredClone === 'function') { + return structuredClone(value) + } + + return JSON.parse(JSON.stringify(value)) as T +} + +const mergeDefinedValue = (base: T, override: unknown): T => { + if (override === undefined) { + return cloneValue(base) + } + + if (!isPlainObject(override)) { + return cloneValue(override as T) + } + + const result: Record = isPlainObject(base) ? cloneValue(base) : {} + + for (const [key, value] of Object.entries(override)) { + if (value === undefined) continue + + const current = result[key] + result[key] = + isPlainObject(current) && isPlainObject(value) + ? mergeDefinedValue(current, value) + : cloneValue(value) + } + + return result as T +} + +const mergeProviderModels = ( + baseModels: ProviderModel[], + supplementModels: ProviderModel[] | undefined +): ProviderModel[] => { + const mergedModels = baseModels.map((model) => cloneValue(model)) + + if (!supplementModels || supplementModels.length === 0) { + return mergedModels + } + + const modelIndexById = new Map(mergedModels.map((model, index) => [model.id, index])) + + for (const supplementModel of supplementModels) { + const existingIndex = modelIndexById.get(supplementModel.id) + + if (existingIndex === undefined) { + modelIndexById.set(supplementModel.id, mergedModels.length) + mergedModels.push(cloneValue(supplementModel)) + continue + } + + const existingModel = mergedModels[existingIndex] + const mergedModel = mergeDefinedValue(existingModel, supplementModel) + + if (existingModel.name) { + mergedModel.name = existingModel.name + } + + if (existingModel.display_name) { + mergedModel.display_name = existingModel.display_name + } + + mergedModels[existingIndex] = mergedModel + } + + return mergedModels +} + +const mergeProviderAggregate = ( + base: ProviderAggregate | null, + supplement: ProviderAggregate +): ProviderAggregate => { + const mergedProviders = base ? cloneValue(base.providers) : {} + + for (const [providerId, supplementProvider] of Object.entries(supplement.providers)) { + const existingProvider = mergedProviders[providerId] + + if (!existingProvider) { + mergedProviders[providerId] = cloneValue(supplementProvider) + continue + } + + const { models: _existingModels, ...existingWithoutModels } = existingProvider + const { models: supplementModels, ...supplementWithoutModels } = supplementProvider + + mergedProviders[providerId] = { + ...mergeDefinedValue(existingWithoutModels, supplementWithoutModels), + models: mergeProviderModels(existingProvider.models, supplementModels) + } + } + + return { + providers: mergedProviders + } +} + export class ProviderDbLoader { private cache: ProviderAggregate | null = null private userDataDir: string @@ -51,7 +154,7 @@ export class ProviderDbLoader { // Public: initialize on app start (non-blocking refresh) async initialize(): Promise { // Load from cache or built-in - this.cache = this.loadFromCache() ?? this.loadFromBuiltIn() + this.cache = this.applySupplements(this.loadFromCache() ?? this.loadFromBuiltIn()) if (this.cache) { try { const providersCount = Object.keys(this.cache.providers || {}).length @@ -76,7 +179,7 @@ export class ProviderDbLoader { getDb(): ProviderAggregate | null { if (this.cache) return this.cache // Lazy try again if not initialized yet - this.cache = this.loadFromCache() ?? this.loadFromBuiltIn() + this.cache = this.applySupplements(this.loadFromCache() ?? this.loadFromBuiltIn()) return this.cache } @@ -123,6 +226,10 @@ export class ProviderDbLoader { } } + private applySupplements(db: ProviderAggregate | null): ProviderAggregate { + return mergeProviderAggregate(db, PROVIDER_DB_SUPPLEMENTS) + } + private readMeta(): MetaFile | null { try { if (!fs.existsSync(this.metaFilePath)) return null @@ -279,9 +386,9 @@ export class ProviderDbLoader { // Write cache atomically and update in-memory this.writeCacheAtomically(sanitized) this.writeMeta(meta) - this.cache = sanitized + this.cache = this.applySupplements(sanitized) try { - const providersCount = Object.keys(sanitized.providers || {}).length + const providersCount = Object.keys(this.cache.providers || {}).length eventBus.send(PROVIDER_DB_EVENTS.UPDATED, SendTarget.ALL_WINDOWS, { providersCount, lastUpdated: meta.lastUpdated diff --git a/src/main/presenter/configPresenter/providerDbSupplements.ts b/src/main/presenter/configPresenter/providerDbSupplements.ts new file mode 100644 index 000000000..f38a9cfbc --- /dev/null +++ b/src/main/presenter/configPresenter/providerDbSupplements.ts @@ -0,0 +1,159 @@ +import type { ProviderAggregate } from '@shared/types/model-db' + +const DOUBAO_THINKING_NOTE = 'doubao-thinking-parameter' + +const createDoubaoThinkingOverride = (id: string) => ({ + id, + extra_capabilities: { + reasoning: { + notes: [DOUBAO_THINKING_NOTE] + } + } +}) + +// Source notes: +// - https://www.volcengine.com/docs/6492/1544808?lang=zh +// - https://developer.volcengine.com/articles/7622677873391829033 +export const PROVIDER_DB_SUPPLEMENTS: ProviderAggregate = { + providers: { + doubao: { + id: 'doubao', + models: [ + createDoubaoThinkingOverride('doubao-seed-1-6-vision-250815'), + createDoubaoThinkingOverride('doubao-seed-1-6-250615'), + createDoubaoThinkingOverride('doubao-seed-1-6-flash-250715'), + createDoubaoThinkingOverride('doubao-seed-1-6-flash-250615'), + createDoubaoThinkingOverride('doubao-seed-1-6-thinking-250715'), + createDoubaoThinkingOverride('doubao-seed-1-6-thinking-250615'), + { + id: 'doubao-seed-1.8', + name: 'Doubao-Seed 1.8', + display_name: 'Doubao-Seed 1.8', + type: 'chat', + attachment: true, + reasoning: { + supported: true, + default: true + }, + tool_call: true, + temperature: true, + modalities: { + input: ['text', 'image', 'video'], + output: ['text'] + }, + limit: { + context: 256000, + output: 64000 + }, + extra_capabilities: { + reasoning: { + notes: [DOUBAO_THINKING_NOTE] + } + } + }, + { + id: 'doubao-seed-2.0-code', + name: 'Doubao-Seed 2.0 Code', + display_name: 'Doubao-Seed 2.0 Code', + type: 'chat', + attachment: true, + reasoning: { + supported: false + }, + tool_call: true, + temperature: true, + modalities: { + input: ['text', 'image'], + output: ['text'] + }, + limit: { + context: 256000, + output: 32000 + } + }, + { + id: 'doubao-seed-2.0-lite', + name: 'Doubao-Seed 2.0 Lite', + display_name: 'Doubao-Seed 2.0 Lite', + type: 'chat', + attachment: true, + reasoning: { + supported: true, + default: true + }, + tool_call: true, + temperature: true, + modalities: { + input: ['text', 'image', 'video'], + output: ['text'] + }, + limit: { + context: 256000, + output: 64000 + }, + extra_capabilities: { + reasoning: { + notes: [DOUBAO_THINKING_NOTE] + } + } + }, + { + id: 'doubao-seed-2.0-mini', + name: 'Doubao-Seed 2.0 Mini', + display_name: 'Doubao-Seed 2.0 Mini', + type: 'chat', + attachment: true, + reasoning: { + supported: true, + default: true + }, + tool_call: true, + temperature: true, + modalities: { + input: ['text', 'image', 'video'], + output: ['text'] + }, + limit: { + context: 256000, + output: 64000 + }, + extra_capabilities: { + reasoning: { + notes: [DOUBAO_THINKING_NOTE] + } + } + }, + { + id: 'doubao-seed-2.0-pro', + name: 'Doubao-Seed 2.0 Pro', + display_name: 'Doubao-Seed 2.0 Pro', + type: 'chat', + attachment: true, + reasoning: { + supported: true, + default: true + }, + tool_call: true, + temperature: true, + modalities: { + input: ['text', 'image', 'video'], + output: ['text'] + }, + limit: { + context: 256000, + output: 64000 + }, + extra_capabilities: { + reasoning: { + notes: [DOUBAO_THINKING_NOTE] + } + } + } + ] + } + } +} + +export const PROVIDER_DB_SUPPLEMENT_NOTES = { + doubaoThinking: DOUBAO_THINKING_NOTE +} as const diff --git a/src/main/presenter/llmProviderPresenter/index.ts b/src/main/presenter/llmProviderPresenter/index.ts index 20c756658..30c035f69 100644 --- a/src/main/presenter/llmProviderPresenter/index.ts +++ b/src/main/presenter/llmProviderPresenter/index.ts @@ -18,8 +18,9 @@ import { AcpDebugRunResult } from '@shared/presenter' import { ProviderChange, ProviderBatchUpdate } from '@shared/provider-operations' +import { isProviderDbBackedProvider } from '@shared/providerDbCatalog' import { eventBus } from '@/eventbus' -import { CONFIG_EVENTS } from '@/events' +import { CONFIG_EVENTS, PROVIDER_DB_EVENTS } from '@/events' import { BaseLLMProvider } from './baseProvider' import { ProviderConfig, StreamState } from './types' import { RateLimitManager } from './managers/rateLimitManager' @@ -47,6 +48,8 @@ const createAbortError = (): Error => { export class LLMProviderPresenter implements ILlmProviderPresenter { private currentProviderId: string | null = null private readonly activeStreams: Map = new Map() + private readonly modelRefreshPromises: Map> = new Map() + private readonly configPresenter: IConfigPresenter private readonly config: ProviderConfig = { maxConcurrentStreams: 10 } @@ -63,6 +66,7 @@ export class LLMProviderPresenter implements ILlmProviderPresenter { sqlitePresenter: ISQLitePresenter, mcpRuntime?: ProviderMcpRuntimePort ) { + this.configPresenter = configPresenter this.rateLimitManager = new RateLimitManager(configPresenter) this.acpSessionPersistence = new AcpSessionPersistence(sqlitePresenter) this.providerInstanceManager = new ProviderInstanceManager({ @@ -105,6 +109,10 @@ export class LLMProviderPresenter implements ILlmProviderPresenter { eventBus.on(CONFIG_EVENTS.PROVIDER_BATCH_UPDATE, (batchUpdate: ProviderBatchUpdate) => { this.providerInstanceManager.handleProviderBatchUpdate(batchUpdate) }) + + eventBus.on(PROVIDER_DB_EVENTS.UPDATED, () => { + this.refreshEnabledProviderDbBackedModelsInBackground('provider-db-updated') + }) } getProviders(): LLM_PROVIDER[] { @@ -378,10 +386,58 @@ export class LLMProviderPresenter implements ILlmProviderPresenter { return provider.getKeyStatus() } - async refreshModels(providerId: string): Promise { - try { + private getEnabledProviderIdsUsingProviderDb(): string[] { + return this.providerInstanceManager + .getProviders() + .filter((provider) => provider.enable && isProviderDbBackedProvider(provider.id)) + .map((provider) => provider.id) + } + + private async syncProviderDbBeforeRefresh(providerId: string): Promise { + if (!isProviderDbBackedProvider(providerId)) { + return + } + + const result = await this.configPresenter.refreshProviderDb(true) + if (result.status === 'error') { + throw new Error(result.message || 'Provider DB refresh failed') + } + } + + private enqueueProviderModelRefresh(providerId: string): Promise { + const existingRefresh = this.modelRefreshPromises.get(providerId) + if (existingRefresh) { + return existingRefresh + } + + const refreshPromise = (async () => { const provider = this.getProviderInstance(providerId) await provider.refreshModels() + })().finally(() => { + if (this.modelRefreshPromises.get(providerId) === refreshPromise) { + this.modelRefreshPromises.delete(providerId) + } + }) + + this.modelRefreshPromises.set(providerId, refreshPromise) + return refreshPromise + } + + private refreshEnabledProviderDbBackedModelsInBackground(reason: string): void { + for (const providerId of this.getEnabledProviderIdsUsingProviderDb()) { + void this.enqueueProviderModelRefresh(providerId).catch((error) => { + console.warn( + `[LLMProviderPresenter] Failed to refresh models for provider ${providerId} during ${reason}:`, + error + ) + }) + } + } + + async refreshModels(providerId: string): Promise { + try { + await this.syncProviderDbBeforeRefresh(providerId) + await this.enqueueProviderModelRefresh(providerId) } catch (error) { console.error(`Failed to refresh models for provider ${providerId}:`, error) const errorMessage = error instanceof Error ? error.message : String(error) diff --git a/src/main/presenter/llmProviderPresenter/providers/doubaoProvider.ts b/src/main/presenter/llmProviderPresenter/providers/doubaoProvider.ts index 22dfdae67..c41ae0541 100644 --- a/src/main/presenter/llmProviderPresenter/providers/doubaoProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/doubaoProvider.ts @@ -16,21 +16,10 @@ import { } from '@shared/modelConfigDefaults' import { OpenAICompatibleProvider } from './openAICompatibleProvider' import { providerDbLoader } from '../../configPresenter/providerDbLoader' -import { modelCapabilities } from '../../configPresenter/modelCapabilities' +import { PROVIDER_DB_SUPPLEMENT_NOTES } from '../../configPresenter/providerDbSupplements' import type { ProviderMcpRuntimePort } from '../runtimePorts' export class DoubaoProvider extends OpenAICompatibleProvider { - // List of models that support thinking parameter - private static readonly THINKING_MODELS: string[] = [ - 'deepseek-v3-1-250821', - 'doubao-seed-1-6-vision-250815', - 'doubao-seed-1-6-250615', - 'doubao-seed-1-6-flash-250615', - 'doubao-1-5-thinking-vision-pro-250428', - 'doubao-1-5-ui-tars-250428', - 'doubao-1-5-thinking-pro-m-250428' - ] - constructor( provider: LLM_PROVIDER, configPresenter: IConfigPresenter, @@ -41,7 +30,9 @@ export class DoubaoProvider extends OpenAICompatibleProvider { } private supportsThinking(modelId: string): boolean { - return DoubaoProvider.THINKING_MODELS.includes(modelId) + const model = providerDbLoader.getModel(this.provider.id, modelId) + const notes = model?.extra_capabilities?.reasoning?.notes + return Array.isArray(notes) && notes.includes(PROVIDER_DB_SUPPLEMENT_NOTES.doubaoThinking) } /** @@ -93,8 +84,7 @@ export class DoubaoProvider extends OpenAICompatibleProvider { } protected async fetchOpenAIModels(): Promise { - const resolvedId = modelCapabilities.resolveProviderId(this.provider.id) || this.provider.id - const provider = providerDbLoader.getProvider(resolvedId) + const provider = providerDbLoader.getProvider(this.provider.id) if (!provider || !Array.isArray(provider.models)) { return [] } diff --git a/src/renderer/settings/components/ProviderApiConfig.vue b/src/renderer/settings/components/ProviderApiConfig.vue index cf912291d..83a9d2f7e 100644 --- a/src/renderer/settings/components/ProviderApiConfig.vue +++ b/src/renderer/settings/components/ProviderApiConfig.vue @@ -165,8 +165,10 @@ : t('settings.provider.refreshModels') }} - +

+ {{ t('settings.provider.refreshModelsWithMetadataHint') }} +

{{ t('settings.provider.howToGet') }}: {{ t('settings.provider.getKeyTip') }} {{ @@ -193,8 +195,10 @@ import { import { Icon } from '@iconify/vue' import GitHubCopilotOAuth from './GitHubCopilotOAuth.vue' import { usePresenter } from '@/composables/usePresenter' +import { useToast } from '@/components/use-toast' import { useModelCheckStore } from '@/stores/modelCheck' import type { LLM_PROVIDER, KeyStatus } from '@shared/presenter' +import { isProviderDbBackedProvider } from '@shared/providerDbCatalog' interface ProviderWebsites { official: string @@ -207,6 +211,7 @@ interface ProviderWebsites { const { t } = useI18n() const llmProviderPresenter = usePresenter('llmproviderPresenter') const modelCheckStore = useModelCheckStore() +const { toast } = useToast() const EDITABLE_BASE_URL_PROVIDER_IDS = new Set([ 'openai', @@ -247,6 +252,7 @@ const isBaseUrlEditableByDefault = computed( const showLockedBaseUrl = computed( () => !isBaseUrlEditableByDefault.value && !baseUrlUnlocked.value ) +const shouldRefreshProviderDbFirst = computed(() => isProviderDbBackedProvider(props.provider.id)) watch( () => props.provider, @@ -323,8 +329,27 @@ const refreshModels = async () => { isRefreshing.value = true try { await llmProviderPresenter.refreshModels(props.provider.id) + toast({ + title: t('settings.provider.toast.refreshModelsSuccessTitle'), + description: t( + shouldRefreshProviderDbFirst.value + ? 'settings.provider.toast.refreshModelsSuccessDescriptionWithMetadata' + : 'settings.provider.toast.refreshModelsSuccessDescription' + ), + duration: 4000 + }) } catch (error) { console.error('Failed to refresh models:', error) + toast({ + title: t('settings.provider.toast.refreshModelsFailedTitle'), + description: t( + shouldRefreshProviderDbFirst.value + ? 'settings.provider.toast.refreshModelsFailedDescriptionWithMetadata' + : 'settings.provider.toast.refreshModelsFailedDescription' + ), + variant: 'destructive', + duration: 4000 + }) } finally { isRefreshing.value = false } diff --git a/src/renderer/src/i18n/da-DK/settings.json b/src/renderer/src/i18n/da-DK/settings.json index 6c3686406..9c767e566 100644 --- a/src/renderer/src/i18n/da-DK/settings.json +++ b/src/renderer/src/i18n/da-DK/settings.json @@ -420,6 +420,7 @@ "openaiResponsesNotice": "OpenAI bruger som standard Responses API. Hvis et tredjeparts-endpoint kun understøtter Chat Completions, skal du bruge OpenAI Completions-udbyderen.", "modifyBaseUrl": "Rediger", "baseUrlLockedHint": "Denne udbyder er låst til den anbefalede basis-URL for at mindske fejlkonfiguration.", + "refreshModelsWithMetadataHint": "Opdatering af denne udbyder synkroniserer først upstream-metadata og genopbygger derefter den lokale modelliste.", "modelList": "Modelliste", "enableModels": "Aktivér modeller", "disableAllModels": "Deaktiver alle modeller", @@ -594,7 +595,13 @@ "backupSuccessTitle": "Backup fuldført", "backupSuccessMessage": "Backup gemt {time} ({size})", "importSuccessTitle": "Import fuldført", - "importSuccessMessage": "Importerede {count} samtaler" + "importSuccessMessage": "Importerede {count} samtaler", + "refreshModelsSuccessTitle": "Modeller opdateret", + "refreshModelsSuccessDescription": "Den nyeste modelliste er blevet synkroniseret for denne udbyder.", + "refreshModelsSuccessDescriptionWithMetadata": "Upstream-metadata og den nyeste modelliste er blevet synkroniseret for denne udbyder.", + "refreshModelsFailedTitle": "Opdatering mislykkedes", + "refreshModelsFailedDescription": "Modellerne for denne udbyder kunne ikke opdateres lige nu. Prøv igen senere.", + "refreshModelsFailedDescriptionWithMetadata": "Upstream-metadata og modeller for denne udbyder kunne ikke opdateres lige nu. Prøv igen senere." }, "modelscope": { "mcpSync": { diff --git a/src/renderer/src/i18n/en-US/settings.json b/src/renderer/src/i18n/en-US/settings.json index e232e5a33..49f77feaf 100644 --- a/src/renderer/src/i18n/en-US/settings.json +++ b/src/renderer/src/i18n/en-US/settings.json @@ -548,6 +548,7 @@ "openaiResponsesNotice": "OpenAI defaults to the Responses API. If a third-party endpoint only supports Chat Completions, use the OpenAI Completions provider.", "modifyBaseUrl": "Modify", "baseUrlLockedHint": "This provider is pinned to the recommended Base URL to reduce misconfiguration.", + "refreshModelsWithMetadataHint": "Refreshing this provider will sync upstream metadata first, then rebuild the local model list.", "modelList": "Model List", "enableModels": "Enable Models", "disableAllModels": "Disable All Models", @@ -690,7 +691,13 @@ "backupSuccessTitle": "Backup completed", "backupSuccessMessage": "Backup saved at {time} ({size})", "importSuccessTitle": "Import completed", - "importSuccessMessage": "Successfully imported {count} conversations" + "importSuccessMessage": "Successfully imported {count} conversations", + "refreshModelsSuccessTitle": "Models refreshed", + "refreshModelsSuccessDescription": "The latest model list has been synced for this provider.", + "refreshModelsSuccessDescriptionWithMetadata": "Upstream metadata and the latest model list have been synced for this provider.", + "refreshModelsFailedTitle": "Refresh failed", + "refreshModelsFailedDescription": "Unable to refresh models for this provider right now. Please try again later.", + "refreshModelsFailedDescriptionWithMetadata": "Unable to refresh upstream metadata and models for this provider right now. Please try again later." }, "modelscope": { "mcpSync": { diff --git a/src/renderer/src/i18n/fa-IR/settings.json b/src/renderer/src/i18n/fa-IR/settings.json index 3331166ed..4a37bbb5d 100644 --- a/src/renderer/src/i18n/fa-IR/settings.json +++ b/src/renderer/src/i18n/fa-IR/settings.json @@ -486,6 +486,7 @@ "openaiResponsesNotice": "OpenAI به صورت پیش‌فرض از Responses API استفاده می‌کند. اگر endpoint شخص ثالث فقط از Chat Completions پشتیبانی می‌کند، از ارائه‌دهنده OpenAI Completions استفاده کنید.", "modifyBaseUrl": "ویرایش", "baseUrlLockedHint": "این ارائه‌دهنده برای کاهش خطاهای پیکربندی روی آدرس پایه پیشنهادی قفل شده است.", + "refreshModelsWithMetadataHint": "نوسازی این ارائه‌دهنده ابتدا فرادادهٔ بالادستی را همگام می‌کند و سپس فهرست محلی مدل‌ها را بازسازی می‌کند.", "modelList": "فهرست مدل‌ها", "enableModels": "روشن کردن مدل‌ها", "disableAllModels": "خاموش کردن همه مدل‌ها", @@ -660,7 +661,13 @@ "backupSuccessTitle": "پشتیبان‌گیری کامل شد", "backupSuccessMessage": "فایل پشتیبان در {time} ذخیره شد (حجم: {size})", "importSuccessTitle": "درون‌ریزی با موفقیت انجام شد", - "importSuccessMessage": "{count} گفتگو با موفقیت درون‌ریزی شد" + "importSuccessMessage": "{count} گفتگو با موفقیت درون‌ریزی شد", + "refreshModelsSuccessTitle": "مدل‌ها نوسازی شدند", + "refreshModelsSuccessDescription": "جدیدترین فهرست مدل برای این ارائه‌دهنده همگام شد.", + "refreshModelsSuccessDescriptionWithMetadata": "فرادادهٔ بالادستی و جدیدترین فهرست مدل برای این ارائه‌دهنده همگام شد.", + "refreshModelsFailedTitle": "نوسازی ناموفق بود", + "refreshModelsFailedDescription": "در حال حاضر امکان نوسازی مدل‌های این ارائه‌دهنده وجود ندارد. لطفاً بعداً دوباره تلاش کنید.", + "refreshModelsFailedDescriptionWithMetadata": "در حال حاضر امکان همگام‌سازی فرادادهٔ بالادستی و مدل‌های این ارائه‌دهنده وجود ندارد. لطفاً بعداً دوباره تلاش کنید." }, "apiKeyLabel": "کلید API", "apiUrlLabel": "آدرس API", diff --git a/src/renderer/src/i18n/fr-FR/settings.json b/src/renderer/src/i18n/fr-FR/settings.json index e5ff8a06b..18bf90021 100644 --- a/src/renderer/src/i18n/fr-FR/settings.json +++ b/src/renderer/src/i18n/fr-FR/settings.json @@ -486,6 +486,7 @@ "openaiResponsesNotice": "OpenAI utilise par défaut l'API Responses. Si un endpoint tiers ne prend en charge que Chat Completions, utilisez le fournisseur OpenAI Completions.", "modifyBaseUrl": "Modifier", "baseUrlLockedHint": "Ce fournisseur est verrouillé sur l’URL de base recommandée afin de réduire les erreurs de configuration.", + "refreshModelsWithMetadataHint": "L’actualisation de ce fournisseur synchronise d’abord les métadonnées en amont, puis reconstruit la liste locale des modèles.", "modelList": "Liste des modèles", "enableModels": "Activer les modèles", "disableAllModels": "Désactiver tous les modèles", @@ -660,7 +661,13 @@ "backupSuccessTitle": "Sauvegarde terminée", "backupSuccessMessage": "Sauvegarde enregistrée à {time} ({size})", "importSuccessTitle": "Importation terminée", - "importSuccessMessage": "{count} conversations importées avec succès" + "importSuccessMessage": "{count} conversations importées avec succès", + "refreshModelsSuccessTitle": "Modèles actualisés", + "refreshModelsSuccessDescription": "La dernière liste de modèles a été synchronisée pour ce fournisseur.", + "refreshModelsSuccessDescriptionWithMetadata": "Les métadonnées en amont et la dernière liste de modèles ont été synchronisées pour ce fournisseur.", + "refreshModelsFailedTitle": "Actualisation échouée", + "refreshModelsFailedDescription": "Impossible d’actualiser les modèles de ce fournisseur pour le moment. Veuillez réessayer plus tard.", + "refreshModelsFailedDescriptionWithMetadata": "Impossible de synchroniser les métadonnées en amont et les modèles de ce fournisseur pour le moment. Veuillez réessayer plus tard." }, "apiKeyLabel": "Clé API", "apiUrlLabel": "URL API", diff --git a/src/renderer/src/i18n/he-IL/settings.json b/src/renderer/src/i18n/he-IL/settings.json index 111d1c59c..3ef4d334e 100644 --- a/src/renderer/src/i18n/he-IL/settings.json +++ b/src/renderer/src/i18n/he-IL/settings.json @@ -486,6 +486,7 @@ "openaiResponsesNotice": "OpenAI משתמשת כברירת מחדל ב-Responses API. אם endpoint של צד שלישי תומך רק ב-Chat Completions, השתמש בספק OpenAI Completions.", "modifyBaseUrl": "ערוך", "baseUrlLockedHint": "הספק הזה נעול לכתובת הבסיס המומלצת כדי לצמצם שגיאות תצורה.", + "refreshModelsWithMetadataHint": "רענון הספק הזה יסנכרן קודם את המטא־דאטה במעלה הזרם ולאחר מכן יבנה מחדש את רשימת המודלים המקומית.", "modelList": "רשימת מודלים", "enableModels": "הפעל מודלים", "disableAllModels": "השבת את כל המודלים", @@ -660,7 +661,13 @@ "backupSuccessTitle": "הגיבוי הושלם", "backupSuccessMessage": "הגיבוי נשמר ב-{time} ({size})", "importSuccessTitle": "הייבוא הושלם", - "importSuccessMessage": "יובאו בהצלחה {count} שיחות" + "importSuccessMessage": "יובאו בהצלחה {count} שיחות", + "refreshModelsSuccessTitle": "המודלים רועננו", + "refreshModelsSuccessDescription": "רשימת המודלים העדכנית סונכרנה עבור הספק הזה.", + "refreshModelsSuccessDescriptionWithMetadata": "המטא־דאטה במעלה הזרם ורשימת המודלים העדכנית סונכרנו עבור הספק הזה.", + "refreshModelsFailedTitle": "הרענון נכשל", + "refreshModelsFailedDescription": "לא ניתן לרענן כעת את המודלים של הספק הזה. נסה שוב מאוחר יותר.", + "refreshModelsFailedDescriptionWithMetadata": "לא ניתן לסנכרן כעת את המטא־דאטה במעלה הזרם ואת המודלים של הספק הזה. נסה שוב מאוחר יותר." }, "modelscope": { "mcpSync": { diff --git a/src/renderer/src/i18n/ja-JP/settings.json b/src/renderer/src/i18n/ja-JP/settings.json index 01ff2eae0..eccfce993 100644 --- a/src/renderer/src/i18n/ja-JP/settings.json +++ b/src/renderer/src/i18n/ja-JP/settings.json @@ -486,6 +486,7 @@ "openaiResponsesNotice": "OpenAI 公式サービスはデフォルトで Responses API を使用します。サードパーティのエンドポイントが Chat Completions のみをサポートしている場合は、OpenAI Completions プロバイダーを使用してください。", "modifyBaseUrl": "変更", "baseUrlLockedHint": "このプロバイダーは設定ミスを減らすため、推奨されるベースURLに固定されています。", + "refreshModelsWithMetadataHint": "このプロバイダーを更新すると、まず上流メタデータを同期し、その後ローカルのモデル一覧を再構築します。", "modelList": "モデル一覧", "enableModels": "モデルを有効にする", "disableAllModels": "すべてのモデルを無効にする", @@ -660,7 +661,13 @@ "backupSuccessTitle": "バックアップが完了しました", "backupSuccessMessage": "バックアップを保存しました(時刻:{time}、サイズ:{size})", "importSuccessTitle": "インポートが完了しました", - "importSuccessMessage": "{count} 件の会話を正常にインポートしました" + "importSuccessMessage": "{count} 件の会話を正常にインポートしました", + "refreshModelsSuccessTitle": "モデルを更新しました", + "refreshModelsSuccessDescription": "このプロバイダーの最新のモデル一覧を同期しました。", + "refreshModelsSuccessDescriptionWithMetadata": "このプロバイダーの上流メタデータと最新のモデル一覧を同期しました。", + "refreshModelsFailedTitle": "更新に失敗しました", + "refreshModelsFailedDescription": "現在、このプロバイダーのモデル一覧を更新できません。後でもう一度お試しください。", + "refreshModelsFailedDescriptionWithMetadata": "現在、このプロバイダーの上流メタデータとモデルを同期できません。後でもう一度お試しください。" }, "apiKeyLabel": "API Key", "apiUrlLabel": "API URL", diff --git a/src/renderer/src/i18n/ko-KR/settings.json b/src/renderer/src/i18n/ko-KR/settings.json index 81d19ef06..fa5457606 100644 --- a/src/renderer/src/i18n/ko-KR/settings.json +++ b/src/renderer/src/i18n/ko-KR/settings.json @@ -622,6 +622,7 @@ "openaiResponsesNotice": "OpenAI 공식 서비스는 기본적으로 Responses API를 사용합니다. 서드파티 엔드포인트가 Chat Completions만 지원하는 경우 OpenAI Completions 공급자를 사용하세요.", "modifyBaseUrl": "수정", "baseUrlLockedHint": "이 제공자는 잘못된 설정을 줄이기 위해 권장 기본 URL로 고정되어 있습니다.", + "refreshModelsWithMetadataHint": "이 공급자를 새로 고치면 먼저 상위 메타데이터를 동기화한 뒤 로컬 모델 목록을 다시 구성합니다.", "azureApiVersion": "API 버전", "safety": { "title": "보안 설정", @@ -660,7 +661,13 @@ "backupSuccessTitle": "백업이 완료되었습니다", "backupSuccessMessage": "백업이 저장되었습니다 (시간: {time}, 크기: {size})", "importSuccessTitle": "가져오기가 완료되었습니다", - "importSuccessMessage": "{count}개의 대화를 성공적으로 가져왔습니다" + "importSuccessMessage": "{count}개의 대화를 성공적으로 가져왔습니다", + "refreshModelsSuccessTitle": "모델을 새로 고쳤습니다", + "refreshModelsSuccessDescription": "이 공급자의 최신 모델 목록이 동기화되었습니다.", + "refreshModelsSuccessDescriptionWithMetadata": "이 공급자의 상위 메타데이터와 최신 모델 목록이 동기화되었습니다.", + "refreshModelsFailedTitle": "새로 고침에 실패했습니다", + "refreshModelsFailedDescription": "지금은 이 공급자의 모델 목록을 새로 고칠 수 없습니다. 나중에 다시 시도하세요.", + "refreshModelsFailedDescriptionWithMetadata": "지금은 이 공급자의 상위 메타데이터와 모델을 동기화할 수 없습니다. 나중에 다시 시도하세요." }, "apiKeyLabel": "API Key", "apiUrlLabel": "API URL", diff --git a/src/renderer/src/i18n/pt-BR/settings.json b/src/renderer/src/i18n/pt-BR/settings.json index 9478b6a9c..080d25a35 100644 --- a/src/renderer/src/i18n/pt-BR/settings.json +++ b/src/renderer/src/i18n/pt-BR/settings.json @@ -486,6 +486,7 @@ "openaiResponsesNotice": "A OpenAI usa a API Responses por padrão. Se um endpoint de terceiros oferecer suporte apenas a Chat Completions, use o provedor OpenAI Completions.", "modifyBaseUrl": "Modificar", "baseUrlLockedHint": "Este provedor está fixado na URL base recomendada para reduzir erros de configuração.", + "refreshModelsWithMetadataHint": "Atualizar este provedor sincroniza primeiro os metadados upstream e depois reconstrói a lista local de modelos.", "modelList": "Lista de Modelos", "enableModels": "Habilitar Modelos", "disableAllModels": "Desabilitar Todos os Modelos", @@ -660,7 +661,13 @@ "backupSuccessTitle": "Backup concluído", "backupSuccessMessage": "Backup salvo em {time} ({size})", "importSuccessTitle": "Importação concluída", - "importSuccessMessage": "{count} conversas importadas com sucesso" + "importSuccessMessage": "{count} conversas importadas com sucesso", + "refreshModelsSuccessTitle": "Modelos atualizados", + "refreshModelsSuccessDescription": "A lista mais recente de modelos foi sincronizada para este provedor.", + "refreshModelsSuccessDescriptionWithMetadata": "Os metadados upstream e a lista mais recente de modelos foram sincronizados para este provedor.", + "refreshModelsFailedTitle": "Falha na atualização", + "refreshModelsFailedDescription": "Não foi possível atualizar os modelos deste provedor agora. Tente novamente mais tarde.", + "refreshModelsFailedDescriptionWithMetadata": "Não foi possível sincronizar os metadados upstream e os modelos deste provedor agora. Tente novamente mais tarde." }, "modelscope": { "mcpSync": { diff --git a/src/renderer/src/i18n/ru-RU/settings.json b/src/renderer/src/i18n/ru-RU/settings.json index 00fffd7ec..f913f56f0 100644 --- a/src/renderer/src/i18n/ru-RU/settings.json +++ b/src/renderer/src/i18n/ru-RU/settings.json @@ -622,6 +622,7 @@ "openaiResponsesNotice": "OpenAI по умолчанию использует Responses API. Если сторонний endpoint поддерживает только Chat Completions, используйте провайдера OpenAI Completions.", "modifyBaseUrl": "Изменить", "baseUrlLockedHint": "Этот провайдер закреплён за рекомендуемым базовым URL, чтобы снизить риск ошибочной настройки.", + "refreshModelsWithMetadataHint": "Обновление этого провайдера сначала синхронизирует метаданные из upstream, а затем пересоберёт локальный список моделей.", "azureApiVersion": "Версия API", "safety": { "title": "Настройки безопасности", @@ -660,7 +661,13 @@ "backupSuccessTitle": "Резервное копирование завершено", "backupSuccessMessage": "Резервная копия сохранена (время: {time}, размер: {size})", "importSuccessTitle": "Импорт завершён", - "importSuccessMessage": "Успешно импортировано {count} бесед" + "importSuccessMessage": "Успешно импортировано {count} бесед", + "refreshModelsSuccessTitle": "Модели обновлены", + "refreshModelsSuccessDescription": "Для этого провайдера синхронизирован актуальный список моделей.", + "refreshModelsSuccessDescriptionWithMetadata": "Для этого провайдера синхронизированы upstream-метаданные и актуальный список моделей.", + "refreshModelsFailedTitle": "Не удалось обновить", + "refreshModelsFailedDescription": "Сейчас не удалось обновить модели этого провайдера. Попробуйте позже.", + "refreshModelsFailedDescriptionWithMetadata": "Сейчас не удалось синхронизировать upstream-метаданные и модели этого провайдера. Попробуйте позже." }, "apiKeyLabel": "API ключ", "apiUrlLabel": "API URL", diff --git a/src/renderer/src/i18n/zh-CN/settings.json b/src/renderer/src/i18n/zh-CN/settings.json index 577d45cb3..4e154161e 100644 --- a/src/renderer/src/i18n/zh-CN/settings.json +++ b/src/renderer/src/i18n/zh-CN/settings.json @@ -549,6 +549,7 @@ "openaiResponsesNotice": "OpenAI 官方服务默认使用 Responses API;若第三方仅支持 Chat Completions,请使用 OpenAI Completions 服务商。", "modifyBaseUrl": "修改", "baseUrlLockedHint": "此类 provider 默认固定为推荐的 Base URL,避免误修改导致请求异常。", + "refreshModelsWithMetadataHint": "刷新该服务商时会先同步上游元数据,再重建本地模型列表。", "modelList": "模型列表", "enableModels": "启用模型", "disableAllModels": "禁用全部", @@ -576,7 +577,13 @@ "backupSuccessTitle": "备份完成", "backupSuccessMessage": "备份已保存(时间:{time},大小:{size})", "importSuccessTitle": "导入完成", - "importSuccessMessage": "成功导入 {count} 条会话" + "importSuccessMessage": "成功导入 {count} 条会话", + "refreshModelsSuccessTitle": "模型已刷新", + "refreshModelsSuccessDescription": "该服务商的最新模型列表已同步。", + "refreshModelsSuccessDescriptionWithMetadata": "该服务商的上游元数据和最新模型列表已同步。", + "refreshModelsFailedTitle": "刷新失败", + "refreshModelsFailedDescription": "暂时无法刷新该服务商的模型列表,请稍后重试。", + "refreshModelsFailedDescriptionWithMetadata": "暂时无法刷新该服务商的上游元数据和模型列表,请稍后重试。" }, "modelscope": { "name": "ModelScope", diff --git a/src/renderer/src/i18n/zh-HK/settings.json b/src/renderer/src/i18n/zh-HK/settings.json index 009313983..01d19aa77 100644 --- a/src/renderer/src/i18n/zh-HK/settings.json +++ b/src/renderer/src/i18n/zh-HK/settings.json @@ -486,6 +486,7 @@ "openaiResponsesNotice": "OpenAI 官方服務預設使用 Responses API;若第三方僅支援 Chat Completions,請使用 OpenAI Completions 服務商。", "modifyBaseUrl": "修改", "baseUrlLockedHint": "此類 provider 預設固定為建議的 Base URL,避免誤修改導致請求異常。", + "refreshModelsWithMetadataHint": "刷新該服務商時會先同步上游元資料,再重建本地模型列表。", "modelList": "模型列表", "enableModels": "模型已經啟用", "verifyLink": "驗證鏈接", @@ -660,7 +661,13 @@ "backupSuccessTitle": "備份完成", "backupSuccessMessage": "備份已儲存(時間:{time},大小:{size})", "importSuccessTitle": "匯入完成", - "importSuccessMessage": "成功匯入 {count} 個對話" + "importSuccessMessage": "成功匯入 {count} 個對話", + "refreshModelsSuccessTitle": "模型已刷新", + "refreshModelsSuccessDescription": "該服務商的最新模型列表已同步。", + "refreshModelsSuccessDescriptionWithMetadata": "該服務商的上游元資料和最新模型列表已同步。", + "refreshModelsFailedTitle": "刷新失敗", + "refreshModelsFailedDescription": "暫時無法刷新該服務商的模型列表,請稍後重試。", + "refreshModelsFailedDescriptionWithMetadata": "暫時無法刷新該服務商的上游元資料和模型列表,請稍後重試。" }, "apiKeyLabel": "API Key", "apiUrlLabel": "API URL", diff --git a/src/renderer/src/i18n/zh-TW/settings.json b/src/renderer/src/i18n/zh-TW/settings.json index 62fb9d8a7..16f3e5e05 100644 --- a/src/renderer/src/i18n/zh-TW/settings.json +++ b/src/renderer/src/i18n/zh-TW/settings.json @@ -486,6 +486,7 @@ "openaiResponsesNotice": "OpenAI 官方服務預設使用 Responses API;若第三方僅支援 Chat Completions,請使用 OpenAI Completions 服務商。", "modifyBaseUrl": "修改", "baseUrlLockedHint": "此類 provider 預設固定為建議的 Base URL,避免誤修改導致請求異常。", + "refreshModelsWithMetadataHint": "重新整理該服務商時會先同步上游中繼資料,再重建本機模型清單。", "modelList": "模型清單", "enableModels": "啟用模型", "disableAllModels": "全部停用", @@ -660,7 +661,13 @@ "backupSuccessTitle": "備份完成", "backupSuccessMessage": "備份已儲存(時間:{time},大小:{size})", "importSuccessTitle": "匯入完成", - "importSuccessMessage": "成功匯入 {count} 個對話" + "importSuccessMessage": "成功匯入 {count} 個對話", + "refreshModelsSuccessTitle": "模型已重新整理", + "refreshModelsSuccessDescription": "該服務商的最新模型清單已同步。", + "refreshModelsSuccessDescriptionWithMetadata": "該服務商的上游中繼資料和最新模型清單已同步。", + "refreshModelsFailedTitle": "重新整理失敗", + "refreshModelsFailedDescription": "目前無法重新整理該服務商的模型清單,請稍後再試。", + "refreshModelsFailedDescriptionWithMetadata": "目前無法同步該服務商的上游中繼資料和模型,請稍後再試。" }, "apiKeyLabel": "API Key", "apiUrlLabel": "API URL", diff --git a/src/shared/providerDbCatalog.ts b/src/shared/providerDbCatalog.ts new file mode 100644 index 000000000..b6d3eab91 --- /dev/null +++ b/src/shared/providerDbCatalog.ts @@ -0,0 +1,9 @@ +const PROVIDER_DB_BACKED_PROVIDER_IDS = new Set(['doubao', 'zhipu', 'minimax', 'o3fan']) + +export const isProviderDbBackedProvider = (providerId: string | undefined | null): boolean => { + if (!providerId) { + return false + } + + return PROVIDER_DB_BACKED_PROVIDER_IDS.has(providerId.trim().toLowerCase()) +} diff --git a/test/main/presenter/configPresenter/providerDbLoader.test.ts b/test/main/presenter/configPresenter/providerDbLoader.test.ts index cc9eb48b0..d8c3fb208 100644 --- a/test/main/presenter/configPresenter/providerDbLoader.test.ts +++ b/test/main/presenter/configPresenter/providerDbLoader.test.ts @@ -140,9 +140,10 @@ describe('ProviderDbLoader', () => { await loader.initialize() expect(loader.getDb()?.providers).toHaveProperty('openai') + expect(loader.getDb()?.providers).toHaveProperty('doubao') expect(fetchMock).toHaveBeenCalledTimes(1) expect(state.send).toHaveBeenCalledWith('provider-db:loaded', 'ALL_WINDOWS', { - providersCount: 1 + providersCount: 2 }) }) @@ -226,14 +227,80 @@ describe('ProviderDbLoader', () => { const result = await loader.refreshIfNeeded(true) expect(result.status).toBe('updated') - expect(result.providersCount).toBe(2) + expect(result.providersCount).toBe(3) expect(loader.getDb()?.providers).toHaveProperty('anthropic') + expect(loader.getDb()?.providers).toHaveProperty('doubao') expect(state.send).toHaveBeenCalledWith('provider-db:updated', 'ALL_WINDOWS', { - providersCount: 2, + providersCount: 3, lastUpdated: expect.any(Number) }) }) + it('appends missing doubao supplement models without duplicating upstream models', async () => { + writeCachedDb({ + providers: { + doubao: { + id: 'doubao', + name: 'Doubao', + models: [ + { + id: 'doubao-seed-1-6-250615', + name: 'Doubao Seed 1.6' + } + ] + } + } + }) + + const ProviderDbLoader = await importLoader() + const loader = new ProviderDbLoader() + + const doubaoModels = loader.getProvider('doubao')?.models ?? [] + const modelIds = doubaoModels.map((model) => model.id) + + expect(modelIds).toContain('doubao-seed-1-6-250615') + expect(modelIds).toContain('doubao-seed-1.8') + expect(modelIds).toContain('doubao-seed-2.0-pro') + expect(modelIds.filter((id) => id === 'doubao-seed-1-6-250615')).toHaveLength(1) + }) + + it('uses upstream provider data as the merge base for overlapping doubao models', async () => { + writeCachedDb({ + providers: { + doubao: { + id: 'doubao', + name: 'Doubao', + models: [ + { + id: 'doubao-seed-2.0-pro', + name: 'Upstream Doubao Seed 2.0 Pro', + tool_call: false, + modalities: { + input: ['text'], + output: ['text'] + } + } + ] + } + } + }) + + const ProviderDbLoader = await importLoader() + const loader = new ProviderDbLoader() + const model = loader.getModel('doubao', 'doubao-seed-2.0-pro') + + expect(model).toMatchObject({ + id: 'doubao-seed-2.0-pro', + name: 'Upstream Doubao Seed 2.0 Pro', + tool_call: true, + modalities: { + input: ['text', 'image', 'video'], + output: ['text'] + } + }) + expect(model?.extra_capabilities?.reasoning?.notes).toContain('doubao-thinking-parameter') + }) + it('returns an error result and preserves the existing cache when refresh fails', async () => { const cachedAggregate = createAggregate(['openai']) writeCachedDb(cachedAggregate) diff --git a/test/main/presenter/llmProviderPresenter/backgroundModelSync.test.ts b/test/main/presenter/llmProviderPresenter/backgroundModelSync.test.ts new file mode 100644 index 000000000..334074000 --- /dev/null +++ b/test/main/presenter/llmProviderPresenter/backgroundModelSync.test.ts @@ -0,0 +1,393 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' +import type { + IConfigPresenter, + ISQLitePresenter, + LLM_PROVIDER +} from '../../../../src/shared/presenter' +import { LLMProviderPresenter } from '../../../../src/main/presenter/llmProviderPresenter' +import { OpenAICompatibleProvider } from '../../../../src/main/presenter/llmProviderPresenter/providers/openAICompatibleProvider' + +const eventState = vi.hoisted(() => ({ + handlers: new Map void>>() +})) + +const { mockModelsList, mockGetProxyUrl } = vi.hoisted(() => ({ + mockModelsList: vi.fn().mockResolvedValue({ data: [] }), + mockGetProxyUrl: vi.fn().mockReturnValue(null) +})) + +vi.mock('electron', () => ({ + app: { + getName: vi.fn(() => 'DeepChat'), + getVersion: vi.fn(() => '0.0.0-test'), + getPath: vi.fn(() => '/mock/path'), + isReady: vi.fn(() => true), + on: vi.fn() + }, + session: {}, + ipcMain: { + on: vi.fn(), + handle: vi.fn(), + removeHandler: vi.fn() + }, + BrowserWindow: vi.fn(() => ({ + loadURL: vi.fn(), + loadFile: vi.fn(), + on: vi.fn(), + webContents: { send: vi.fn(), on: vi.fn(), isDestroyed: vi.fn(() => false) }, + isDestroyed: vi.fn(() => false), + close: vi.fn(), + show: vi.fn(), + hide: vi.fn() + })), + dialog: { + showOpenDialog: vi.fn() + }, + shell: { + openExternal: vi.fn() + } +})) + +vi.mock('openai', () => { + class MockOpenAI { + chat = { + completions: { + create: vi.fn() + } + } + models = { + list: mockModelsList + } + } + + return { + default: MockOpenAI, + AzureOpenAI: MockOpenAI + } +}) + +vi.mock('@/presenter', () => ({ + presenter: { + devicePresenter: { + cacheImage: vi.fn() + } + } +})) + +vi.mock('@/eventbus', () => ({ + eventBus: { + on: vi.fn((eventName: string, handler: (...args: unknown[]) => void) => { + const handlers = eventState.handlers.get(eventName) ?? [] + handlers.push(handler) + eventState.handlers.set(eventName, handlers) + }), + sendToRenderer: vi.fn(), + sendToMain: vi.fn(), + emit: vi.fn(), + send: vi.fn() + }, + SendTarget: { + ALL_WINDOWS: 'ALL_WINDOWS' + } +})) + +vi.mock('@/events', () => ({ + CONFIG_EVENTS: { + PROXY_RESOLVED: 'config:proxy-resolved', + PROVIDER_ATOMIC_UPDATE: 'config:provider-atomic-update', + PROVIDER_BATCH_UPDATE: 'config:provider-batch-update', + MODEL_LIST_CHANGED: 'config:model-list-changed' + }, + PROVIDER_DB_EVENTS: { + UPDATED: 'provider-db:updated' + }, + NOTIFICATION_EVENTS: { + SHOW_ERROR: 'notification:show-error' + } +})) + +vi.mock('../../../../src/main/presenter/proxyConfig', () => ({ + proxyConfig: { + getProxyUrl: mockGetProxyUrl + } +})) + +vi.mock('../../../../src/main/presenter/configPresenter/modelCapabilities', () => ({ + modelCapabilities: { + supportsReasoningEffort: vi.fn().mockReturnValue(false), + supportsVerbosity: vi.fn().mockReturnValue(false), + supportsReasoning: vi.fn().mockReturnValue(false), + resolveProviderId: vi.fn((providerId: string) => providerId) + } +})) + +const createProvider = (overrides?: Partial): LLM_PROVIDER => ({ + id: 'novita', + name: 'Novita', + apiType: 'openai-completions', + apiKey: 'test-key', + baseUrl: 'https://api.novita.ai/openai', + enable: true, + ...overrides +}) + +const createConfigPresenter = (provider = createProvider()) => + ({ + getProviders: vi.fn().mockReturnValue([provider]), + getProviderById: vi.fn().mockReturnValue(provider), + getProviderModels: vi.fn().mockReturnValue([]), + getCustomModels: vi.fn().mockReturnValue([]), + getModelConfig: vi.fn().mockReturnValue({ + maxTokens: 4096, + contextLength: 8192, + temperature: 0.7, + vision: false, + functionCall: false, + reasoning: false, + type: 'chat' + }), + getSetting: vi.fn().mockReturnValue(undefined), + refreshProviderDb: vi.fn().mockResolvedValue({ + status: 'updated', + lastUpdated: Date.now(), + providersCount: 1 + }), + setProviderModels: vi.fn(), + getModelStatus: vi.fn().mockReturnValue(true), + updateCustomModel: vi.fn(), + addCustomModel: vi.fn(), + removeCustomModel: vi.fn() + }) as unknown as IConfigPresenter + +const mockSqlitePresenter = { + getAcpSession: vi.fn().mockResolvedValue(null), + upsertAcpSession: vi.fn().mockResolvedValue(undefined), + updateAcpSessionId: vi.fn().mockResolvedValue(undefined), + updateAcpWorkdir: vi.fn().mockResolvedValue(undefined), + updateAcpSessionStatus: vi.fn().mockResolvedValue(undefined), + deleteAcpSession: vi.fn().mockResolvedValue(undefined), + deleteAcpSessions: vi.fn().mockResolvedValue(undefined) +} as unknown as ISQLitePresenter + +const emitMainEvent = async (eventName: string, ...args: unknown[]) => { + const handlers = eventState.handlers.get(eventName) ?? [] + handlers.forEach((handler) => handler(...args)) + await Promise.resolve() + await Promise.resolve() +} + +describe('LLMProviderPresenter background model sync', () => { + beforeEach(() => { + vi.clearAllMocks() + eventState.handlers.clear() + mockModelsList.mockResolvedValue({ data: [] }) + mockGetProxyUrl.mockReturnValue(null) + }) + + afterEach(() => { + vi.restoreAllMocks() + }) + + it('does not trigger an extra startup refresh for non DB-backed providers', async () => { + const refreshSpy = vi + .spyOn(OpenAICompatibleProvider.prototype, 'refreshModels') + .mockResolvedValue(undefined) + + const presenter = new LLMProviderPresenter(createConfigPresenter(), mockSqlitePresenter) + await Promise.resolve() + await Promise.resolve() + + expect(presenter.getProviders()).toHaveLength(1) + expect(refreshSpy).not.toHaveBeenCalled() + }) + + it('re-syncs enabled DB-backed provider models when provider-db updates', async () => { + const refreshSpy = vi + .spyOn(OpenAICompatibleProvider.prototype, 'refreshModels') + .mockResolvedValue(undefined) + + new LLMProviderPresenter( + createConfigPresenter( + createProvider({ + id: 'doubao', + name: 'Doubao', + apiType: 'doubao', + baseUrl: 'https://ark.cn-beijing.volces.com/api/v3' + }) + ), + mockSqlitePresenter + ) + await Promise.resolve() + await Promise.resolve() + refreshSpy.mockClear() + + await emitMainEvent('provider-db:updated', { + providersCount: 1, + lastUpdated: Date.now() + }) + + expect(refreshSpy).toHaveBeenCalledTimes(1) + }) + + it('ignores provider-db updates for providers that do not use the provider DB catalog', async () => { + const refreshSpy = vi + .spyOn(OpenAICompatibleProvider.prototype, 'refreshModels') + .mockResolvedValue(undefined) + + new LLMProviderPresenter(createConfigPresenter(), mockSqlitePresenter) + await Promise.resolve() + await Promise.resolve() + + await emitMainEvent('provider-db:updated', { + providersCount: 1, + lastUpdated: Date.now() + }) + + expect(refreshSpy).not.toHaveBeenCalled() + }) + + it('coalesces duplicate background refreshes for the same provider', async () => { + let resolveRefresh: (() => void) | null = null + const refreshSpy = vi + .spyOn(OpenAICompatibleProvider.prototype, 'refreshModels') + .mockReturnValue( + new Promise((resolve) => { + resolveRefresh = resolve + }) + ) + + new LLMProviderPresenter( + createConfigPresenter( + createProvider({ + id: 'doubao', + name: 'Doubao', + apiType: 'doubao', + baseUrl: 'https://ark.cn-beijing.volces.com/api/v3' + }) + ), + mockSqlitePresenter + ) + await Promise.resolve() + await Promise.resolve() + + expect(refreshSpy).not.toHaveBeenCalled() + + await emitMainEvent('provider-db:updated', { + providersCount: 1, + lastUpdated: Date.now() + }) + await emitMainEvent('provider-db:updated', { + providersCount: 1, + lastUpdated: Date.now() + }) + + expect(refreshSpy).toHaveBeenCalledTimes(1) + + resolveRefresh?.() + await Promise.resolve() + await Promise.resolve() + + await emitMainEvent('provider-db:updated', { + providersCount: 1, + lastUpdated: Date.now() + }) + + expect(refreshSpy).toHaveBeenCalledTimes(2) + }) + + it('refreshes provider DB before rebuilding DB-backed provider models', async () => { + const provider = createProvider({ + id: 'doubao', + name: 'Doubao', + apiType: 'doubao', + baseUrl: 'https://ark.cn-beijing.volces.com/api/v3' + }) + const configPresenter = createConfigPresenter(provider) + const refreshSpy = vi + .spyOn(OpenAICompatibleProvider.prototype, 'refreshModels') + .mockResolvedValue(undefined) + + const presenter = new LLMProviderPresenter(configPresenter, mockSqlitePresenter) + await presenter.refreshModels('doubao') + + expect(configPresenter.refreshProviderDb).toHaveBeenCalledWith(true) + expect(refreshSpy).toHaveBeenCalledTimes(1) + expect(configPresenter.refreshProviderDb.mock.invocationCallOrder[0]).toBeLessThan( + refreshSpy.mock.invocationCallOrder[0] + ) + }) + + it('surfaces provider DB refresh failures without rebuilding DB-backed provider models', async () => { + const provider = createProvider({ + id: 'doubao', + name: 'Doubao', + apiType: 'doubao', + baseUrl: 'https://ark.cn-beijing.volces.com/api/v3' + }) + const configPresenter = createConfigPresenter(provider) + configPresenter.refreshProviderDb.mockResolvedValueOnce({ + status: 'error', + lastUpdated: null, + providersCount: 1, + message: 'network down' + }) + const refreshSpy = vi + .spyOn(OpenAICompatibleProvider.prototype, 'refreshModels') + .mockResolvedValue(undefined) + + const presenter = new LLMProviderPresenter(configPresenter, mockSqlitePresenter) + + await expect(presenter.refreshModels('doubao')).rejects.toThrow( + 'Model refresh failed: network down' + ) + expect(refreshSpy).not.toHaveBeenCalled() + }) + + it('does not refresh provider DB for providers that manage models themselves', async () => { + const configPresenter = createConfigPresenter() + const refreshSpy = vi + .spyOn(OpenAICompatibleProvider.prototype, 'refreshModels') + .mockResolvedValue(undefined) + + const presenter = new LLMProviderPresenter(configPresenter, mockSqlitePresenter) + await presenter.refreshModels('novita') + + expect(configPresenter.refreshProviderDb).not.toHaveBeenCalled() + expect(refreshSpy).toHaveBeenCalledTimes(1) + }) + + it('logs provider-db refresh failures without blocking presenter initialization', async () => { + const refreshSpy = vi + .spyOn(OpenAICompatibleProvider.prototype, 'refreshModels') + .mockRejectedValue(new Error('refresh failed')) + const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {}) + + const presenter = new LLMProviderPresenter( + createConfigPresenter( + createProvider({ + id: 'doubao', + name: 'Doubao', + apiType: 'doubao', + baseUrl: 'https://ark.cn-beijing.volces.com/api/v3' + }) + ), + mockSqlitePresenter + ) + await Promise.resolve() + await Promise.resolve() + + await emitMainEvent('provider-db:updated', { + providersCount: 1, + lastUpdated: Date.now() + }) + await Promise.resolve() + await Promise.resolve() + + expect(presenter.getProviders()).toHaveLength(1) + expect(refreshSpy).toHaveBeenCalledTimes(1) + expect(warnSpy).toHaveBeenCalledWith( + '[LLMProviderPresenter] Failed to refresh models for provider doubao during provider-db-updated:', + expect.any(Error) + ) + }) +}) diff --git a/test/main/presenter/llmProviderPresenter/doubaoProvider.test.ts b/test/main/presenter/llmProviderPresenter/doubaoProvider.test.ts new file mode 100644 index 000000000..50c81d799 --- /dev/null +++ b/test/main/presenter/llmProviderPresenter/doubaoProvider.test.ts @@ -0,0 +1,226 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import type { IConfigPresenter, LLM_PROVIDER, ModelConfig } from '../../../../src/shared/presenter' +import { DoubaoProvider } from '../../../../src/main/presenter/llmProviderPresenter/providers/doubaoProvider' + +const { mockChatCompletionsCreate, mockGetProvider, mockGetModel, mockGetProxyUrl } = vi.hoisted( + () => ({ + mockChatCompletionsCreate: vi.fn(), + mockGetProvider: vi.fn(), + mockGetModel: vi.fn(), + mockGetProxyUrl: vi.fn().mockReturnValue(null) + }) +) + +vi.mock('openai', () => { + class MockOpenAI { + chat = { + completions: { + create: mockChatCompletionsCreate + } + } + models = { + list: vi.fn().mockResolvedValue({ data: [] }) + } + } + + return { + default: MockOpenAI, + AzureOpenAI: MockOpenAI + } +}) + +vi.mock('@/presenter', () => ({ + presenter: { + devicePresenter: { + cacheImage: vi.fn() + } + } +})) + +vi.mock('@/eventbus', () => ({ + eventBus: { + on: vi.fn(), + sendToRenderer: vi.fn(), + sendToMain: vi.fn(), + emit: vi.fn(), + send: vi.fn() + }, + SendTarget: { + ALL_WINDOWS: 'ALL_WINDOWS' + } +})) + +vi.mock('@/events', () => ({ + CONFIG_EVENTS: { + MODEL_LIST_CHANGED: 'MODEL_LIST_CHANGED' + }, + NOTIFICATION_EVENTS: { + SHOW_ERROR: 'SHOW_ERROR' + } +})) + +vi.mock('../../../../src/main/presenter/proxyConfig', () => ({ + proxyConfig: { + getProxyUrl: mockGetProxyUrl + } +})) + +vi.mock('../../../../src/main/presenter/configPresenter/providerDbLoader', () => ({ + providerDbLoader: { + getProvider: mockGetProvider, + getModel: mockGetModel + } +})) + +vi.mock('../../../../src/main/presenter/configPresenter/providerDbSupplements', () => ({ + PROVIDER_DB_SUPPLEMENT_NOTES: { + doubaoThinking: 'doubao-thinking-parameter' + } +})) + +vi.mock('../../../../src/main/presenter/configPresenter/modelCapabilities', () => ({ + modelCapabilities: { + supportsReasoningEffort: vi.fn().mockReturnValue(false), + supportsVerbosity: vi.fn().mockReturnValue(false), + supportsReasoning: vi.fn().mockReturnValue(false) + } +})) + +const createAsyncStream = (chunks: Array>) => ({ + async *[Symbol.asyncIterator]() { + for (const chunk of chunks) { + yield chunk + } + } +}) + +const createProvider = (overrides?: Partial): LLM_PROVIDER => ({ + id: 'doubao', + name: 'Doubao', + apiType: 'doubao', + apiKey: 'test-key', + baseUrl: 'https://ark.cn-beijing.volces.com/api/v3', + enable: false, + ...overrides +}) + +const createConfigPresenter = () => + ({ + getProviderModels: vi.fn().mockReturnValue([]), + getCustomModels: vi.fn().mockReturnValue([]), + getModelConfig: vi.fn().mockReturnValue(undefined), + getSetting: vi.fn().mockReturnValue(undefined), + setProviderModels: vi.fn(), + getModelStatus: vi.fn().mockReturnValue(true) + }) as unknown as IConfigPresenter + +describe('DoubaoProvider', () => { + const modelConfig: ModelConfig = { + maxTokens: 1024, + contextLength: 8192, + vision: true, + functionCall: true, + reasoning: true, + type: 'chat' + } + + beforeEach(() => { + vi.clearAllMocks() + mockGetProxyUrl.mockReturnValue(null) + mockChatCompletionsCreate.mockResolvedValue( + createAsyncStream([ + { + choices: [ + { + delta: { + content: 'ok' + }, + finish_reason: 'stop' + } + ] + } + ]) + ) + }) + + it('maps supplemented doubao catalog entries into provider models', async () => { + mockGetProvider.mockReturnValue({ + id: 'doubao', + name: 'Doubao', + models: [ + { + id: 'doubao-seed-2.0-pro', + display_name: 'Doubao-Seed 2.0 Pro', + tool_call: true, + reasoning: { + supported: true + }, + modalities: { + input: ['text', 'image', 'video'], + output: ['text'] + }, + limit: { + context: 256000, + output: 64000 + } + } + ] + }) + + const provider = new DoubaoProvider(createProvider(), createConfigPresenter()) + const models = await (provider as any).fetchOpenAIModels() + + expect(models).toEqual([ + expect.objectContaining({ + id: 'doubao-seed-2.0-pro', + name: 'Doubao-Seed 2.0 Pro', + providerId: 'doubao', + vision: true, + functionCall: true, + reasoning: true + }) + ]) + }) + + it('adds Doubao thinking parameter for supplemented reasoning models based on metadata notes', async () => { + mockGetProvider.mockReturnValue({ + id: 'doubao', + name: 'Doubao', + models: [] + }) + mockGetModel.mockReturnValue({ + id: 'doubao-seed-2.0-pro', + extra_capabilities: { + reasoning: { + notes: ['doubao-thinking-parameter'] + } + } + }) + + const provider = new DoubaoProvider(createProvider(), createConfigPresenter()) + ;(provider as any).isInitialized = true + + const events = [] + for await (const event of provider.coreStream( + [{ role: 'user', content: 'hello' }], + 'doubao-seed-2.0-pro', + modelConfig, + 0.7, + 1024, + [] + )) { + events.push(event) + } + + expect(events.some((event) => event.type === 'text')).toBe(true) + expect(mockChatCompletionsCreate).toHaveBeenCalledWith( + expect.objectContaining({ + model: 'doubao-seed-2.0-pro', + thinking: { + type: 'enabled' + } + }), + undefined + ) + }) +}) diff --git a/test/renderer/components/ProviderApiConfig.test.ts b/test/renderer/components/ProviderApiConfig.test.ts index c943a5d09..c4bea6359 100644 --- a/test/renderer/components/ProviderApiConfig.test.ts +++ b/test/renderer/components/ProviderApiConfig.test.ts @@ -70,6 +70,7 @@ async function setup(options?: { }) { vi.resetModules() + const toast = vi.fn() const llmproviderPresenter = { getKeyStatus: vi.fn().mockResolvedValue(null), refreshModels: vi.fn().mockResolvedValue(undefined) @@ -106,6 +107,11 @@ async function setup(options?: { vi.doMock('@/stores/modelCheck', () => ({ useModelCheckStore: () => modelCheckStore })) + vi.doMock('@/components/use-toast', () => ({ + useToast: () => ({ + toast + }) + })) vi.doMock('@shadcn/components/ui/input', () => ({ Input: createInputStub() @@ -155,6 +161,7 @@ async function setup(options?: { return { wrapper, + toast, llmproviderPresenter, modelCheckStore } @@ -233,4 +240,75 @@ describe('ProviderApiConfig', () => { expect(wrapper.find('input#custom-demo-url').exists()).toBe(true) expect(findButtonByText(wrapper, 'Modify')).toBeUndefined() }) + + it('shows the metadata sync hint for DB-backed providers and delegates refresh to the presenter', async () => { + const { wrapper, toast, llmproviderPresenter } = await setup({ + provider: createProvider({ + id: 'doubao', + name: 'Doubao', + apiType: 'doubao', + baseUrl: 'https://ark.cn-beijing.volces.com/api/v3' + }) + }) + + expect(wrapper.text()).toContain('settings.provider.refreshModelsWithMetadataHint') + + const refreshButton = findButtonByText(wrapper, 'settings.provider.refreshModels') + expect(refreshButton).toBeDefined() + + await refreshButton!.trigger('click') + await flushPromises() + + expect(llmproviderPresenter.refreshModels).toHaveBeenCalledWith('doubao') + expect(toast).toHaveBeenCalledWith({ + title: 'settings.provider.toast.refreshModelsSuccessTitle', + description: 'settings.provider.toast.refreshModelsSuccessDescriptionWithMetadata', + duration: 4000 + }) + }) + + it('refreshes only models for non DB-backed providers', async () => { + const { wrapper, toast, llmproviderPresenter } = await setup() + + expect(wrapper.text()).not.toContain('settings.provider.refreshModelsWithMetadataHint') + + const refreshButton = findButtonByText(wrapper, 'settings.provider.refreshModels') + expect(refreshButton).toBeDefined() + + await refreshButton!.trigger('click') + await flushPromises() + + expect(llmproviderPresenter.refreshModels).toHaveBeenCalledWith('deepseek') + expect(toast).toHaveBeenCalledWith({ + title: 'settings.provider.toast.refreshModelsSuccessTitle', + description: 'settings.provider.toast.refreshModelsSuccessDescription', + duration: 4000 + }) + }) + + it('shows a destructive toast when metadata-backed refresh fails', async () => { + const { wrapper, toast, llmproviderPresenter } = await setup({ + provider: createProvider({ + id: 'doubao', + name: 'Doubao', + apiType: 'doubao', + baseUrl: 'https://ark.cn-beijing.volces.com/api/v3' + }) + }) + llmproviderPresenter.refreshModels.mockRejectedValueOnce(new Error('network down')) + + const refreshButton = findButtonByText(wrapper, 'settings.provider.refreshModels') + expect(refreshButton).toBeDefined() + + await refreshButton!.trigger('click') + await flushPromises() + + expect(llmproviderPresenter.refreshModels).toHaveBeenCalledWith('doubao') + expect(toast).toHaveBeenCalledWith({ + title: 'settings.provider.toast.refreshModelsFailedTitle', + description: 'settings.provider.toast.refreshModelsFailedDescriptionWithMetadata', + variant: 'destructive', + duration: 4000 + }) + }) }) From 500dea417fc2f8fc58475d107113826c11e4f0c9 Mon Sep 17 00:00:00 2001 From: zhangmo8 Date: Tue, 7 Apr 2026 09:55:02 +0800 Subject: [PATCH 2/2] refactor(provider): remove provider supplements and simplify model loading --- .../configPresenter/providerDbLoader.ts | 113 +------------ .../configPresenter/providerDbSupplements.ts | 159 ------------------ .../providers/doubaoProvider.ts | 5 +- .../configPresenter/providerDbLoader.test.ts | 73 +------- .../doubaoProvider.test.ts | 10 +- 5 files changed, 11 insertions(+), 349 deletions(-) delete mode 100644 src/main/presenter/configPresenter/providerDbSupplements.ts diff --git a/src/main/presenter/configPresenter/providerDbLoader.ts b/src/main/presenter/configPresenter/providerDbLoader.ts index 80fa80b8e..fd9c5906f 100644 --- a/src/main/presenter/configPresenter/providerDbLoader.ts +++ b/src/main/presenter/configPresenter/providerDbLoader.ts @@ -8,7 +8,6 @@ import { sanitizeAggregate } from '@shared/types/model-db' import { resolveProviderId } from './providerId' -import { PROVIDER_DB_SUPPLEMENTS } from './providerDbSupplements' import { eventBus, SendTarget } from '@/eventbus' import { PROVIDER_DB_EVENTS } from '@/events' @@ -30,108 +29,6 @@ export type ProviderDbRefreshResult = { message?: string } -const isPlainObject = (value: unknown): value is Record => { - return !!value && typeof value === 'object' && !Array.isArray(value) -} - -const cloneValue = (value: T): T => { - if (typeof structuredClone === 'function') { - return structuredClone(value) - } - - return JSON.parse(JSON.stringify(value)) as T -} - -const mergeDefinedValue = (base: T, override: unknown): T => { - if (override === undefined) { - return cloneValue(base) - } - - if (!isPlainObject(override)) { - return cloneValue(override as T) - } - - const result: Record = isPlainObject(base) ? cloneValue(base) : {} - - for (const [key, value] of Object.entries(override)) { - if (value === undefined) continue - - const current = result[key] - result[key] = - isPlainObject(current) && isPlainObject(value) - ? mergeDefinedValue(current, value) - : cloneValue(value) - } - - return result as T -} - -const mergeProviderModels = ( - baseModels: ProviderModel[], - supplementModels: ProviderModel[] | undefined -): ProviderModel[] => { - const mergedModels = baseModels.map((model) => cloneValue(model)) - - if (!supplementModels || supplementModels.length === 0) { - return mergedModels - } - - const modelIndexById = new Map(mergedModels.map((model, index) => [model.id, index])) - - for (const supplementModel of supplementModels) { - const existingIndex = modelIndexById.get(supplementModel.id) - - if (existingIndex === undefined) { - modelIndexById.set(supplementModel.id, mergedModels.length) - mergedModels.push(cloneValue(supplementModel)) - continue - } - - const existingModel = mergedModels[existingIndex] - const mergedModel = mergeDefinedValue(existingModel, supplementModel) - - if (existingModel.name) { - mergedModel.name = existingModel.name - } - - if (existingModel.display_name) { - mergedModel.display_name = existingModel.display_name - } - - mergedModels[existingIndex] = mergedModel - } - - return mergedModels -} - -const mergeProviderAggregate = ( - base: ProviderAggregate | null, - supplement: ProviderAggregate -): ProviderAggregate => { - const mergedProviders = base ? cloneValue(base.providers) : {} - - for (const [providerId, supplementProvider] of Object.entries(supplement.providers)) { - const existingProvider = mergedProviders[providerId] - - if (!existingProvider) { - mergedProviders[providerId] = cloneValue(supplementProvider) - continue - } - - const { models: _existingModels, ...existingWithoutModels } = existingProvider - const { models: supplementModels, ...supplementWithoutModels } = supplementProvider - - mergedProviders[providerId] = { - ...mergeDefinedValue(existingWithoutModels, supplementWithoutModels), - models: mergeProviderModels(existingProvider.models, supplementModels) - } - } - - return { - providers: mergedProviders - } -} - export class ProviderDbLoader { private cache: ProviderAggregate | null = null private userDataDir: string @@ -154,7 +51,7 @@ export class ProviderDbLoader { // Public: initialize on app start (non-blocking refresh) async initialize(): Promise { // Load from cache or built-in - this.cache = this.applySupplements(this.loadFromCache() ?? this.loadFromBuiltIn()) + this.cache = this.loadFromCache() ?? this.loadFromBuiltIn() if (this.cache) { try { const providersCount = Object.keys(this.cache.providers || {}).length @@ -179,7 +76,7 @@ export class ProviderDbLoader { getDb(): ProviderAggregate | null { if (this.cache) return this.cache // Lazy try again if not initialized yet - this.cache = this.applySupplements(this.loadFromCache() ?? this.loadFromBuiltIn()) + this.cache = this.loadFromCache() ?? this.loadFromBuiltIn() return this.cache } @@ -226,10 +123,6 @@ export class ProviderDbLoader { } } - private applySupplements(db: ProviderAggregate | null): ProviderAggregate { - return mergeProviderAggregate(db, PROVIDER_DB_SUPPLEMENTS) - } - private readMeta(): MetaFile | null { try { if (!fs.existsSync(this.metaFilePath)) return null @@ -386,7 +279,7 @@ export class ProviderDbLoader { // Write cache atomically and update in-memory this.writeCacheAtomically(sanitized) this.writeMeta(meta) - this.cache = this.applySupplements(sanitized) + this.cache = sanitized try { const providersCount = Object.keys(this.cache.providers || {}).length eventBus.send(PROVIDER_DB_EVENTS.UPDATED, SendTarget.ALL_WINDOWS, { diff --git a/src/main/presenter/configPresenter/providerDbSupplements.ts b/src/main/presenter/configPresenter/providerDbSupplements.ts deleted file mode 100644 index f38a9cfbc..000000000 --- a/src/main/presenter/configPresenter/providerDbSupplements.ts +++ /dev/null @@ -1,159 +0,0 @@ -import type { ProviderAggregate } from '@shared/types/model-db' - -const DOUBAO_THINKING_NOTE = 'doubao-thinking-parameter' - -const createDoubaoThinkingOverride = (id: string) => ({ - id, - extra_capabilities: { - reasoning: { - notes: [DOUBAO_THINKING_NOTE] - } - } -}) - -// Source notes: -// - https://www.volcengine.com/docs/6492/1544808?lang=zh -// - https://developer.volcengine.com/articles/7622677873391829033 -export const PROVIDER_DB_SUPPLEMENTS: ProviderAggregate = { - providers: { - doubao: { - id: 'doubao', - models: [ - createDoubaoThinkingOverride('doubao-seed-1-6-vision-250815'), - createDoubaoThinkingOverride('doubao-seed-1-6-250615'), - createDoubaoThinkingOverride('doubao-seed-1-6-flash-250715'), - createDoubaoThinkingOverride('doubao-seed-1-6-flash-250615'), - createDoubaoThinkingOverride('doubao-seed-1-6-thinking-250715'), - createDoubaoThinkingOverride('doubao-seed-1-6-thinking-250615'), - { - id: 'doubao-seed-1.8', - name: 'Doubao-Seed 1.8', - display_name: 'Doubao-Seed 1.8', - type: 'chat', - attachment: true, - reasoning: { - supported: true, - default: true - }, - tool_call: true, - temperature: true, - modalities: { - input: ['text', 'image', 'video'], - output: ['text'] - }, - limit: { - context: 256000, - output: 64000 - }, - extra_capabilities: { - reasoning: { - notes: [DOUBAO_THINKING_NOTE] - } - } - }, - { - id: 'doubao-seed-2.0-code', - name: 'Doubao-Seed 2.0 Code', - display_name: 'Doubao-Seed 2.0 Code', - type: 'chat', - attachment: true, - reasoning: { - supported: false - }, - tool_call: true, - temperature: true, - modalities: { - input: ['text', 'image'], - output: ['text'] - }, - limit: { - context: 256000, - output: 32000 - } - }, - { - id: 'doubao-seed-2.0-lite', - name: 'Doubao-Seed 2.0 Lite', - display_name: 'Doubao-Seed 2.0 Lite', - type: 'chat', - attachment: true, - reasoning: { - supported: true, - default: true - }, - tool_call: true, - temperature: true, - modalities: { - input: ['text', 'image', 'video'], - output: ['text'] - }, - limit: { - context: 256000, - output: 64000 - }, - extra_capabilities: { - reasoning: { - notes: [DOUBAO_THINKING_NOTE] - } - } - }, - { - id: 'doubao-seed-2.0-mini', - name: 'Doubao-Seed 2.0 Mini', - display_name: 'Doubao-Seed 2.0 Mini', - type: 'chat', - attachment: true, - reasoning: { - supported: true, - default: true - }, - tool_call: true, - temperature: true, - modalities: { - input: ['text', 'image', 'video'], - output: ['text'] - }, - limit: { - context: 256000, - output: 64000 - }, - extra_capabilities: { - reasoning: { - notes: [DOUBAO_THINKING_NOTE] - } - } - }, - { - id: 'doubao-seed-2.0-pro', - name: 'Doubao-Seed 2.0 Pro', - display_name: 'Doubao-Seed 2.0 Pro', - type: 'chat', - attachment: true, - reasoning: { - supported: true, - default: true - }, - tool_call: true, - temperature: true, - modalities: { - input: ['text', 'image', 'video'], - output: ['text'] - }, - limit: { - context: 256000, - output: 64000 - }, - extra_capabilities: { - reasoning: { - notes: [DOUBAO_THINKING_NOTE] - } - } - } - ] - } - } -} - -export const PROVIDER_DB_SUPPLEMENT_NOTES = { - doubaoThinking: DOUBAO_THINKING_NOTE -} as const diff --git a/src/main/presenter/llmProviderPresenter/providers/doubaoProvider.ts b/src/main/presenter/llmProviderPresenter/providers/doubaoProvider.ts index c41ae0541..f278913d8 100644 --- a/src/main/presenter/llmProviderPresenter/providers/doubaoProvider.ts +++ b/src/main/presenter/llmProviderPresenter/providers/doubaoProvider.ts @@ -16,9 +16,10 @@ import { } from '@shared/modelConfigDefaults' import { OpenAICompatibleProvider } from './openAICompatibleProvider' import { providerDbLoader } from '../../configPresenter/providerDbLoader' -import { PROVIDER_DB_SUPPLEMENT_NOTES } from '../../configPresenter/providerDbSupplements' import type { ProviderMcpRuntimePort } from '../runtimePorts' +const DOUBAO_THINKING_NOTE = 'doubao-thinking-parameter' + export class DoubaoProvider extends OpenAICompatibleProvider { constructor( provider: LLM_PROVIDER, @@ -32,7 +33,7 @@ export class DoubaoProvider extends OpenAICompatibleProvider { private supportsThinking(modelId: string): boolean { const model = providerDbLoader.getModel(this.provider.id, modelId) const notes = model?.extra_capabilities?.reasoning?.notes - return Array.isArray(notes) && notes.includes(PROVIDER_DB_SUPPLEMENT_NOTES.doubaoThinking) + return Array.isArray(notes) && notes.includes(DOUBAO_THINKING_NOTE) } /** diff --git a/test/main/presenter/configPresenter/providerDbLoader.test.ts b/test/main/presenter/configPresenter/providerDbLoader.test.ts index d8c3fb208..cc9eb48b0 100644 --- a/test/main/presenter/configPresenter/providerDbLoader.test.ts +++ b/test/main/presenter/configPresenter/providerDbLoader.test.ts @@ -140,10 +140,9 @@ describe('ProviderDbLoader', () => { await loader.initialize() expect(loader.getDb()?.providers).toHaveProperty('openai') - expect(loader.getDb()?.providers).toHaveProperty('doubao') expect(fetchMock).toHaveBeenCalledTimes(1) expect(state.send).toHaveBeenCalledWith('provider-db:loaded', 'ALL_WINDOWS', { - providersCount: 2 + providersCount: 1 }) }) @@ -227,80 +226,14 @@ describe('ProviderDbLoader', () => { const result = await loader.refreshIfNeeded(true) expect(result.status).toBe('updated') - expect(result.providersCount).toBe(3) + expect(result.providersCount).toBe(2) expect(loader.getDb()?.providers).toHaveProperty('anthropic') - expect(loader.getDb()?.providers).toHaveProperty('doubao') expect(state.send).toHaveBeenCalledWith('provider-db:updated', 'ALL_WINDOWS', { - providersCount: 3, + providersCount: 2, lastUpdated: expect.any(Number) }) }) - it('appends missing doubao supplement models without duplicating upstream models', async () => { - writeCachedDb({ - providers: { - doubao: { - id: 'doubao', - name: 'Doubao', - models: [ - { - id: 'doubao-seed-1-6-250615', - name: 'Doubao Seed 1.6' - } - ] - } - } - }) - - const ProviderDbLoader = await importLoader() - const loader = new ProviderDbLoader() - - const doubaoModels = loader.getProvider('doubao')?.models ?? [] - const modelIds = doubaoModels.map((model) => model.id) - - expect(modelIds).toContain('doubao-seed-1-6-250615') - expect(modelIds).toContain('doubao-seed-1.8') - expect(modelIds).toContain('doubao-seed-2.0-pro') - expect(modelIds.filter((id) => id === 'doubao-seed-1-6-250615')).toHaveLength(1) - }) - - it('uses upstream provider data as the merge base for overlapping doubao models', async () => { - writeCachedDb({ - providers: { - doubao: { - id: 'doubao', - name: 'Doubao', - models: [ - { - id: 'doubao-seed-2.0-pro', - name: 'Upstream Doubao Seed 2.0 Pro', - tool_call: false, - modalities: { - input: ['text'], - output: ['text'] - } - } - ] - } - } - }) - - const ProviderDbLoader = await importLoader() - const loader = new ProviderDbLoader() - const model = loader.getModel('doubao', 'doubao-seed-2.0-pro') - - expect(model).toMatchObject({ - id: 'doubao-seed-2.0-pro', - name: 'Upstream Doubao Seed 2.0 Pro', - tool_call: true, - modalities: { - input: ['text', 'image', 'video'], - output: ['text'] - } - }) - expect(model?.extra_capabilities?.reasoning?.notes).toContain('doubao-thinking-parameter') - }) - it('returns an error result and preserves the existing cache when refresh fails', async () => { const cachedAggregate = createAggregate(['openai']) writeCachedDb(cachedAggregate) diff --git a/test/main/presenter/llmProviderPresenter/doubaoProvider.test.ts b/test/main/presenter/llmProviderPresenter/doubaoProvider.test.ts index 50c81d799..2c8f63013 100644 --- a/test/main/presenter/llmProviderPresenter/doubaoProvider.test.ts +++ b/test/main/presenter/llmProviderPresenter/doubaoProvider.test.ts @@ -72,12 +72,6 @@ vi.mock('../../../../src/main/presenter/configPresenter/providerDbLoader', () => } })) -vi.mock('../../../../src/main/presenter/configPresenter/providerDbSupplements', () => ({ - PROVIDER_DB_SUPPLEMENT_NOTES: { - doubaoThinking: 'doubao-thinking-parameter' - } -})) - vi.mock('../../../../src/main/presenter/configPresenter/modelCapabilities', () => ({ modelCapabilities: { supportsReasoningEffort: vi.fn().mockReturnValue(false), @@ -143,7 +137,7 @@ describe('DoubaoProvider', () => { ) }) - it('maps supplemented doubao catalog entries into provider models', async () => { + it('maps doubao catalog entries into provider models', async () => { mockGetProvider.mockReturnValue({ id: 'doubao', name: 'Doubao', @@ -182,7 +176,7 @@ describe('DoubaoProvider', () => { ]) }) - it('adds Doubao thinking parameter for supplemented reasoning models based on metadata notes', async () => { + it('adds Doubao thinking parameter for reasoning models based on metadata notes', async () => { mockGetProvider.mockReturnValue({ id: 'doubao', name: 'Doubao',