Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/main/presenter/configPresenter/providerDbLoader.ts
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,7 @@ export class ProviderDbLoader {
this.writeMeta(meta)
this.cache = sanitized
try {
const providersCount = Object.keys(sanitized.providers || {}).length
const providersCount = Object.keys(this.cache.providers || {}).length
eventBus.send(PROVIDER_DB_EVENTS.UPDATED, SendTarget.ALL_WINDOWS, {
providersCount,
lastUpdated: meta.lastUpdated
Expand Down
62 changes: 59 additions & 3 deletions src/main/presenter/llmProviderPresenter/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,9 @@ import {
AcpDebugRunResult
} from '@shared/presenter'
import { ProviderChange, ProviderBatchUpdate } from '@shared/provider-operations'
import { isProviderDbBackedProvider } from '@shared/providerDbCatalog'
import { eventBus } from '@/eventbus'
import { CONFIG_EVENTS } from '@/events'
import { CONFIG_EVENTS, PROVIDER_DB_EVENTS } from '@/events'
import { BaseLLMProvider } from './baseProvider'
import { ProviderConfig, StreamState } from './types'
import { RateLimitManager } from './managers/rateLimitManager'
Expand Down Expand Up @@ -47,6 +48,8 @@ const createAbortError = (): Error => {
export class LLMProviderPresenter implements ILlmProviderPresenter {
private currentProviderId: string | null = null
private readonly activeStreams: Map<string, StreamState> = new Map()
private readonly modelRefreshPromises: Map<string, Promise<void>> = new Map()
private readonly configPresenter: IConfigPresenter
Comment on lines +51 to +52
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Don't drop newer refresh requests behind an older in-flight refresh.

Lines 408-410 collapse every concurrent request for the same provider into the first promise and never schedule a trailing rerun. If another provider-db update lands while the first provider.refreshModels() is already reading the previous snapshot, that later refresh is discarded and the local model list can settle on stale data.

♻️ Suggested coalescing pattern
 export class LLMProviderPresenter implements ILlmProviderPresenter {
   private currentProviderId: string | null = null
   private readonly activeStreams: Map<string, StreamState> = new Map()
   private readonly modelRefreshPromises: Map<string, Promise<void>> = new Map()
+  private readonly pendingModelRefreshes: Set<string> = new Set()
   private readonly configPresenter: IConfigPresenter
 ...
   private enqueueProviderModelRefresh(providerId: string): Promise<void> {
     const existingRefresh = this.modelRefreshPromises.get(providerId)
     if (existingRefresh) {
+      this.pendingModelRefreshes.add(providerId)
       return existingRefresh
     }

-    const refreshPromise = (async () => {
-      const provider = this.getProviderInstance(providerId)
-      await provider.refreshModels()
-    })().finally(() => {
+    const runRefresh = async (): Promise<void> => {
+      do {
+        this.pendingModelRefreshes.delete(providerId)
+        const provider = this.getProviderInstance(providerId)
+        await provider.refreshModels()
+      } while (this.pendingModelRefreshes.has(providerId))
+    }
+
+    const refreshPromise = runRefresh().finally(() => {
+      this.pendingModelRefreshes.delete(providerId)
       if (this.modelRefreshPromises.get(providerId) === refreshPromise) {
         this.modelRefreshPromises.delete(providerId)
       }
     })

     this.modelRefreshPromises.set(providerId, refreshPromise)
     return refreshPromise
   }

A regression where a second refresh request arrives before the first promise resolves would make this behavior much easier to lock down.

Also applies to: 407-423

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/main/presenter/llmProviderPresenter/index.ts` around lines 51 - 52, The
current coalescing uses modelRefreshPromises to collapse concurrent refreshes
into the first in-flight promise and drops any newer requests; change this so
that when a provider.refreshModels() is already in-flight you record that a
trailing refresh was requested (e.g., set a pending flag or increment a counter
keyed by provider id), and when the in-flight promise resolves you check that
pending indicator and, if set, invoke provider.refreshModels() again (updating
modelRefreshPromises to the new promise and clearing the pending flag);
reference the existing modelRefreshPromises map and the calls to
provider.refreshModels() so the map is updated with each actual run and newer
refresh requests are not discarded.

private readonly config: ProviderConfig = {
maxConcurrentStreams: 10
}
Expand All @@ -63,6 +66,7 @@ export class LLMProviderPresenter implements ILlmProviderPresenter {
sqlitePresenter: ISQLitePresenter,
mcpRuntime?: ProviderMcpRuntimePort
) {
this.configPresenter = configPresenter
this.rateLimitManager = new RateLimitManager(configPresenter)
this.acpSessionPersistence = new AcpSessionPersistence(sqlitePresenter)
this.providerInstanceManager = new ProviderInstanceManager({
Expand Down Expand Up @@ -105,6 +109,10 @@ export class LLMProviderPresenter implements ILlmProviderPresenter {
eventBus.on(CONFIG_EVENTS.PROVIDER_BATCH_UPDATE, (batchUpdate: ProviderBatchUpdate) => {
this.providerInstanceManager.handleProviderBatchUpdate(batchUpdate)
})

eventBus.on(PROVIDER_DB_EVENTS.UPDATED, () => {
this.refreshEnabledProviderDbBackedModelsInBackground('provider-db-updated')
})
}

getProviders(): LLM_PROVIDER[] {
Expand Down Expand Up @@ -378,10 +386,58 @@ export class LLMProviderPresenter implements ILlmProviderPresenter {
return provider.getKeyStatus()
}

async refreshModels(providerId: string): Promise<void> {
try {
private getEnabledProviderIdsUsingProviderDb(): string[] {
return this.providerInstanceManager
.getProviders()
.filter((provider) => provider.enable && isProviderDbBackedProvider(provider.id))
.map((provider) => provider.id)
}

private async syncProviderDbBeforeRefresh(providerId: string): Promise<void> {
if (!isProviderDbBackedProvider(providerId)) {
return
}

const result = await this.configPresenter.refreshProviderDb(true)
if (result.status === 'error') {
throw new Error(result.message || 'Provider DB refresh failed')
}
}

private enqueueProviderModelRefresh(providerId: string): Promise<void> {
const existingRefresh = this.modelRefreshPromises.get(providerId)
if (existingRefresh) {
return existingRefresh
}

const refreshPromise = (async () => {
const provider = this.getProviderInstance(providerId)
await provider.refreshModels()
})().finally(() => {
if (this.modelRefreshPromises.get(providerId) === refreshPromise) {
this.modelRefreshPromises.delete(providerId)
}
})

this.modelRefreshPromises.set(providerId, refreshPromise)
return refreshPromise
}

private refreshEnabledProviderDbBackedModelsInBackground(reason: string): void {
for (const providerId of this.getEnabledProviderIdsUsingProviderDb()) {
void this.enqueueProviderModelRefresh(providerId).catch((error) => {
console.warn(
`[LLMProviderPresenter] Failed to refresh models for provider ${providerId} during ${reason}:`,
error
)
})
}
}

async refreshModels(providerId: string): Promise<void> {
try {
await this.syncProviderDbBeforeRefresh(providerId)
await this.enqueueProviderModelRefresh(providerId)
} catch (error) {
console.error(`Failed to refresh models for provider ${providerId}:`, error)
const errorMessage = error instanceof Error ? error.message : String(error)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,21 +16,11 @@ import {
} from '@shared/modelConfigDefaults'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import { providerDbLoader } from '../../configPresenter/providerDbLoader'
import { modelCapabilities } from '../../configPresenter/modelCapabilities'
import type { ProviderMcpRuntimePort } from '../runtimePorts'

export class DoubaoProvider extends OpenAICompatibleProvider {
// List of models that support thinking parameter
private static readonly THINKING_MODELS: string[] = [
'deepseek-v3-1-250821',
'doubao-seed-1-6-vision-250815',
'doubao-seed-1-6-250615',
'doubao-seed-1-6-flash-250615',
'doubao-1-5-thinking-vision-pro-250428',
'doubao-1-5-ui-tars-250428',
'doubao-1-5-thinking-pro-m-250428'
]
const DOUBAO_THINKING_NOTE = 'doubao-thinking-parameter'

export class DoubaoProvider extends OpenAICompatibleProvider {
constructor(
provider: LLM_PROVIDER,
configPresenter: IConfigPresenter,
Expand All @@ -41,7 +31,9 @@ export class DoubaoProvider extends OpenAICompatibleProvider {
}

private supportsThinking(modelId: string): boolean {
return DoubaoProvider.THINKING_MODELS.includes(modelId)
const model = providerDbLoader.getModel(this.provider.id, modelId)
const notes = model?.extra_capabilities?.reasoning?.notes
return Array.isArray(notes) && notes.includes(DOUBAO_THINKING_NOTE)
}

/**
Expand Down Expand Up @@ -93,8 +85,7 @@ export class DoubaoProvider extends OpenAICompatibleProvider {
}

protected async fetchOpenAIModels(): Promise<MODEL_META[]> {
const resolvedId = modelCapabilities.resolveProviderId(this.provider.id) || this.provider.id
const provider = providerDbLoader.getProvider(resolvedId)
const provider = providerDbLoader.getProvider(this.provider.id)
if (!provider || !Array.isArray(provider.models)) {
return []
}
Expand Down
27 changes: 26 additions & 1 deletion src/renderer/settings/components/ProviderApiConfig.vue
Original file line number Diff line number Diff line change
Expand Up @@ -165,8 +165,10 @@
: t('settings.provider.refreshModels')
}}
</Button>
<!-- Key Status Display -->
</div>
<p v-if="shouldRefreshProviderDbFirst" class="text-xs leading-5 text-muted-foreground">
{{ t('settings.provider.refreshModelsWithMetadataHint') }}
</p>
<div v-if="!provider.custom" class="text-xs text-muted-foreground">
{{ t('settings.provider.howToGet') }}: {{ t('settings.provider.getKeyTip') }}
<a :href="providerWebsites?.apiKey" target="_blank" class="text-primary">{{
Expand All @@ -193,8 +195,10 @@ import {
import { Icon } from '@iconify/vue'
import GitHubCopilotOAuth from './GitHubCopilotOAuth.vue'
import { usePresenter } from '@/composables/usePresenter'
import { useToast } from '@/components/use-toast'
import { useModelCheckStore } from '@/stores/modelCheck'
import type { LLM_PROVIDER, KeyStatus } from '@shared/presenter'
import { isProviderDbBackedProvider } from '@shared/providerDbCatalog'

interface ProviderWebsites {
official: string
Expand All @@ -207,6 +211,7 @@ interface ProviderWebsites {
const { t } = useI18n()
const llmProviderPresenter = usePresenter('llmproviderPresenter')
const modelCheckStore = useModelCheckStore()
const { toast } = useToast()

const EDITABLE_BASE_URL_PROVIDER_IDS = new Set([
'openai',
Expand Down Expand Up @@ -247,6 +252,7 @@ const isBaseUrlEditableByDefault = computed(
const showLockedBaseUrl = computed(
() => !isBaseUrlEditableByDefault.value && !baseUrlUnlocked.value
)
const shouldRefreshProviderDbFirst = computed(() => isProviderDbBackedProvider(props.provider.id))

watch(
() => props.provider,
Expand Down Expand Up @@ -323,8 +329,27 @@ const refreshModels = async () => {
isRefreshing.value = true
try {
await llmProviderPresenter.refreshModels(props.provider.id)
toast({
title: t('settings.provider.toast.refreshModelsSuccessTitle'),
description: t(
shouldRefreshProviderDbFirst.value
? 'settings.provider.toast.refreshModelsSuccessDescriptionWithMetadata'
: 'settings.provider.toast.refreshModelsSuccessDescription'
),
duration: 4000
})
} catch (error) {
console.error('Failed to refresh models:', error)
toast({
title: t('settings.provider.toast.refreshModelsFailedTitle'),
description: t(
shouldRefreshProviderDbFirst.value
? 'settings.provider.toast.refreshModelsFailedDescriptionWithMetadata'
: 'settings.provider.toast.refreshModelsFailedDescription'
),
variant: 'destructive',
duration: 4000
})
} finally {
isRefreshing.value = false
}
Expand Down
9 changes: 8 additions & 1 deletion src/renderer/src/i18n/da-DK/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -420,6 +420,7 @@
"openaiResponsesNotice": "OpenAI bruger som standard Responses API. Hvis et tredjeparts-endpoint kun understøtter Chat Completions, skal du bruge OpenAI Completions-udbyderen.",
"modifyBaseUrl": "Rediger",
"baseUrlLockedHint": "Denne udbyder er låst til den anbefalede basis-URL for at mindske fejlkonfiguration.",
"refreshModelsWithMetadataHint": "Opdatering af denne udbyder synkroniserer først upstream-metadata og genopbygger derefter den lokale modelliste.",
"modelList": "Modelliste",
"enableModels": "Aktivér modeller",
"disableAllModels": "Deaktiver alle modeller",
Expand Down Expand Up @@ -594,7 +595,13 @@
"backupSuccessTitle": "Backup fuldført",
"backupSuccessMessage": "Backup gemt {time} ({size})",
"importSuccessTitle": "Import fuldført",
"importSuccessMessage": "Importerede {count} samtaler"
"importSuccessMessage": "Importerede {count} samtaler",
"refreshModelsSuccessTitle": "Modeller opdateret",
"refreshModelsSuccessDescription": "Den nyeste modelliste er blevet synkroniseret for denne udbyder.",
"refreshModelsSuccessDescriptionWithMetadata": "Upstream-metadata og den nyeste modelliste er blevet synkroniseret for denne udbyder.",
"refreshModelsFailedTitle": "Opdatering mislykkedes",
"refreshModelsFailedDescription": "Modellerne for denne udbyder kunne ikke opdateres lige nu. Prøv igen senere.",
"refreshModelsFailedDescriptionWithMetadata": "Upstream-metadata og modeller for denne udbyder kunne ikke opdateres lige nu. Prøv igen senere."
},
"modelscope": {
"mcpSync": {
Expand Down
9 changes: 8 additions & 1 deletion src/renderer/src/i18n/en-US/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -548,6 +548,7 @@
"openaiResponsesNotice": "OpenAI defaults to the Responses API. If a third-party endpoint only supports Chat Completions, use the OpenAI Completions provider.",
"modifyBaseUrl": "Modify",
"baseUrlLockedHint": "This provider is pinned to the recommended Base URL to reduce misconfiguration.",
"refreshModelsWithMetadataHint": "Refreshing this provider will sync upstream metadata first, then rebuild the local model list.",
"modelList": "Model List",
"enableModels": "Enable Models",
"disableAllModels": "Disable All Models",
Expand Down Expand Up @@ -690,7 +691,13 @@
"backupSuccessTitle": "Backup completed",
"backupSuccessMessage": "Backup saved at {time} ({size})",
"importSuccessTitle": "Import completed",
"importSuccessMessage": "Successfully imported {count} conversations"
"importSuccessMessage": "Successfully imported {count} conversations",
"refreshModelsSuccessTitle": "Models refreshed",
"refreshModelsSuccessDescription": "The latest model list has been synced for this provider.",
"refreshModelsSuccessDescriptionWithMetadata": "Upstream metadata and the latest model list have been synced for this provider.",
"refreshModelsFailedTitle": "Refresh failed",
"refreshModelsFailedDescription": "Unable to refresh models for this provider right now. Please try again later.",
"refreshModelsFailedDescriptionWithMetadata": "Unable to refresh upstream metadata and models for this provider right now. Please try again later."
},
"modelscope": {
"mcpSync": {
Expand Down
9 changes: 8 additions & 1 deletion src/renderer/src/i18n/fa-IR/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -486,6 +486,7 @@
"openaiResponsesNotice": "OpenAI به صورت پیش‌فرض از Responses API استفاده می‌کند. اگر endpoint شخص ثالث فقط از Chat Completions پشتیبانی می‌کند، از ارائه‌دهنده OpenAI Completions استفاده کنید.",
"modifyBaseUrl": "ویرایش",
"baseUrlLockedHint": "این ارائه‌دهنده برای کاهش خطاهای پیکربندی روی آدرس پایه پیشنهادی قفل شده است.",
"refreshModelsWithMetadataHint": "نوسازی این ارائه‌دهنده ابتدا فرادادهٔ بالادستی را همگام می‌کند و سپس فهرست محلی مدل‌ها را بازسازی می‌کند.",
"modelList": "فهرست مدل‌ها",
"enableModels": "روشن کردن مدل‌ها",
"disableAllModels": "خاموش کردن همه مدل‌ها",
Expand Down Expand Up @@ -660,7 +661,13 @@
"backupSuccessTitle": "پشتیبان‌گیری کامل شد",
"backupSuccessMessage": "فایل پشتیبان در {time} ذخیره شد (حجم: {size})",
"importSuccessTitle": "درون‌ریزی با موفقیت انجام شد",
"importSuccessMessage": "{count} گفتگو با موفقیت درون‌ریزی شد"
"importSuccessMessage": "{count} گفتگو با موفقیت درون‌ریزی شد",
"refreshModelsSuccessTitle": "مدل‌ها نوسازی شدند",
"refreshModelsSuccessDescription": "جدیدترین فهرست مدل برای این ارائه‌دهنده همگام شد.",
"refreshModelsSuccessDescriptionWithMetadata": "فرادادهٔ بالادستی و جدیدترین فهرست مدل برای این ارائه‌دهنده همگام شد.",
"refreshModelsFailedTitle": "نوسازی ناموفق بود",
"refreshModelsFailedDescription": "در حال حاضر امکان نوسازی مدل‌های این ارائه‌دهنده وجود ندارد. لطفاً بعداً دوباره تلاش کنید.",
"refreshModelsFailedDescriptionWithMetadata": "در حال حاضر امکان همگام‌سازی فرادادهٔ بالادستی و مدل‌های این ارائه‌دهنده وجود ندارد. لطفاً بعداً دوباره تلاش کنید."
},
"apiKeyLabel": "کلید API",
"apiUrlLabel": "آدرس API",
Expand Down
9 changes: 8 additions & 1 deletion src/renderer/src/i18n/fr-FR/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -486,6 +486,7 @@
"openaiResponsesNotice": "OpenAI utilise par défaut l'API Responses. Si un endpoint tiers ne prend en charge que Chat Completions, utilisez le fournisseur OpenAI Completions.",
"modifyBaseUrl": "Modifier",
"baseUrlLockedHint": "Ce fournisseur est verrouillé sur l’URL de base recommandée afin de réduire les erreurs de configuration.",
"refreshModelsWithMetadataHint": "L’actualisation de ce fournisseur synchronise d’abord les métadonnées en amont, puis reconstruit la liste locale des modèles.",
"modelList": "Liste des modèles",
"enableModels": "Activer les modèles",
"disableAllModels": "Désactiver tous les modèles",
Expand Down Expand Up @@ -660,7 +661,13 @@
"backupSuccessTitle": "Sauvegarde terminée",
"backupSuccessMessage": "Sauvegarde enregistrée à {time} ({size})",
"importSuccessTitle": "Importation terminée",
"importSuccessMessage": "{count} conversations importées avec succès"
"importSuccessMessage": "{count} conversations importées avec succès",
"refreshModelsSuccessTitle": "Modèles actualisés",
"refreshModelsSuccessDescription": "La dernière liste de modèles a été synchronisée pour ce fournisseur.",
"refreshModelsSuccessDescriptionWithMetadata": "Les métadonnées en amont et la dernière liste de modèles ont été synchronisées pour ce fournisseur.",
"refreshModelsFailedTitle": "Actualisation échouée",
"refreshModelsFailedDescription": "Impossible d’actualiser les modèles de ce fournisseur pour le moment. Veuillez réessayer plus tard.",
"refreshModelsFailedDescriptionWithMetadata": "Impossible de synchroniser les métadonnées en amont et les modèles de ce fournisseur pour le moment. Veuillez réessayer plus tard."
},
"apiKeyLabel": "Clé API",
"apiUrlLabel": "URL API",
Expand Down
9 changes: 8 additions & 1 deletion src/renderer/src/i18n/he-IL/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -486,6 +486,7 @@
"openaiResponsesNotice": "OpenAI משתמשת כברירת מחדל ב-Responses API. אם endpoint של צד שלישי תומך רק ב-Chat Completions, השתמש בספק OpenAI Completions.",
"modifyBaseUrl": "ערוך",
"baseUrlLockedHint": "הספק הזה נעול לכתובת הבסיס המומלצת כדי לצמצם שגיאות תצורה.",
"refreshModelsWithMetadataHint": "רענון הספק הזה יסנכרן קודם את המטא־דאטה במעלה הזרם ולאחר מכן יבנה מחדש את רשימת המודלים המקומית.",
"modelList": "רשימת מודלים",
"enableModels": "הפעל מודלים",
"disableAllModels": "השבת את כל המודלים",
Expand Down Expand Up @@ -660,7 +661,13 @@
"backupSuccessTitle": "הגיבוי הושלם",
"backupSuccessMessage": "הגיבוי נשמר ב-{time} ({size})",
"importSuccessTitle": "הייבוא הושלם",
"importSuccessMessage": "יובאו בהצלחה {count} שיחות"
"importSuccessMessage": "יובאו בהצלחה {count} שיחות",
"refreshModelsSuccessTitle": "המודלים רועננו",
"refreshModelsSuccessDescription": "רשימת המודלים העדכנית סונכרנה עבור הספק הזה.",
"refreshModelsSuccessDescriptionWithMetadata": "המטא־דאטה במעלה הזרם ורשימת המודלים העדכנית סונכרנו עבור הספק הזה.",
"refreshModelsFailedTitle": "הרענון נכשל",
"refreshModelsFailedDescription": "לא ניתן לרענן כעת את המודלים של הספק הזה. נסה שוב מאוחר יותר.",
"refreshModelsFailedDescriptionWithMetadata": "לא ניתן לסנכרן כעת את המטא־דאטה במעלה הזרם ואת המודלים של הספק הזה. נסה שוב מאוחר יותר."
},
"modelscope": {
"mcpSync": {
Expand Down
Loading
Loading