diff --git a/core/http/endpoints/openai/mcp.go b/core/http/endpoints/openai/mcp.go
index fe018bbbd..a0e51f91e 100644
--- a/core/http/endpoints/openai/mcp.go
+++ b/core/http/endpoints/openai/mcp.go
@@ -72,12 +72,25 @@ func MCPCompletionEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader,
fragment = fragment.AddMessage(message.Role, message.StringContent)
}
- port := appConfig.APIAddress[strings.LastIndex(appConfig.APIAddress, ":")+1:]
+ // Extract port from APIAddress (format: ":8080" or "127.0.0.1:8080" or "0.0.0.0:8080")
+ port := "8080" // default
+ if appConfig.APIAddress != "" {
+ lastColon := strings.LastIndex(appConfig.APIAddress, ":")
+ if lastColon >= 0 && lastColon+1 < len(appConfig.APIAddress) {
+ port = appConfig.APIAddress[lastColon+1:]
+ } else {
+ log.Warn().Str("APIAddress", appConfig.APIAddress).Msg("[MCP] Could not extract port from APIAddress, using default 8080")
+ }
+ }
+
apiKey := ""
- if appConfig.ApiKeys != nil {
+ if len(appConfig.ApiKeys) > 0 {
apiKey = appConfig.ApiKeys[0]
}
+ baseURL := "http://127.0.0.1:" + port
+ log.Debug().Str("baseURL", baseURL).Str("model", config.Name).Msg("[MCP] Creating OpenAI LLM client for internal API calls")
+
ctxWithCancellation, cancel := context.WithCancel(ctx)
defer cancel()
handleConnectionCancellation(c, cancel, ctxWithCancellation)
@@ -85,7 +98,7 @@ func MCPCompletionEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader,
// and act like completion.go.
// We can do this as cogito expects an interface and we can create one that
// we satisfy to just call internally ComputeChoices
- defaultLLM := cogito.NewOpenAILLM(config.Name, apiKey, "http://127.0.0.1:"+port)
+ defaultLLM := cogito.NewOpenAILLM(config.Name, apiKey, baseURL)
cogitoOpts := []cogito.Option{
cogito.WithStatusCallback(func(s string) {
@@ -127,7 +140,8 @@ func MCPCompletionEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader,
cogitoOpts...,
)
if err != nil && !errors.Is(err, cogito.ErrNoToolSelected) {
- return err
+ log.Error().Err(err).Msgf("[MCP] ExecuteTools failed for model %s", config.Name)
+ return fmt.Errorf("failed to execute tools: %w", err)
}
f, err = defaultLLM.Ask(ctx, f)
diff --git a/core/http/static/chat.js b/core/http/static/chat.js
index 993c956ac..5bb1934d7 100644
--- a/core/http/static/chat.js
+++ b/core/http/static/chat.js
@@ -435,9 +435,28 @@ async function promptGPT(systemPrompt, input) {
}
if (!response.ok) {
+ // Try to get error details from response body
+ let errorMessage = `Error: POST ${endpoint} ${response.status}`;
+ try {
+ const errorData = await response.json();
+ if (errorData && errorData.error && errorData.error.message) {
+ errorMessage = `Error (${response.status}): ${errorData.error.message}`;
+ }
+ } catch (e) {
+ // If response is not JSON, try to get text
+ try {
+ const errorText = await response.text();
+ if (errorText) {
+ errorMessage = `Error (${response.status}): ${errorText.substring(0, 200)}`;
+ }
+ } catch (e2) {
+ // Ignore - use default error message
+ }
+ }
+
Alpine.store("chat").add(
"assistant",
- `Error: POST ${endpoint} ${response.status}`,
+ `${errorMessage}`,
);
toggleLoader(false);
currentAbortController = null;