remove old waveai backend code (#3195)

frontend was removed in the last release. cleaning up the backend code.
remove wsapi host (cloud service is also getting removed)
This commit is contained in:
Mike Sawka 2026-04-15 10:22:25 -07:00 committed by GitHub
parent 263eda42c6
commit a08c2d7431
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
14 changed files with 0 additions and 1209 deletions

View file

@ -34,9 +34,6 @@ export class BlockServiceType {
SaveTerminalState(blockId: string, state: string, stateType: string, ptyOffset: number, termSize: TermSize): Promise<void> {
return callBackendService(this?.waveEnv, "block", "SaveTerminalState", Array.from(arguments))
}
SaveWaveAiData(arg2: string, arg3: WaveAIPromptMessageType[]): Promise<void> {
return callBackendService(this?.waveEnv, "block", "SaveWaveAiData", Array.from(arguments))
}
}
export const BlockService = new BlockServiceType();

View file

@ -924,12 +924,6 @@ export class RpcApiType {
return client.wshRpcStream("streamtest", null, opts);
}
// command "streamwaveai" [responsestream]
StreamWaveAiCommand(client: WshClient, data: WaveAIStreamRequest, opts?: RpcOpts): AsyncGenerator<WaveAIPacketType, void, boolean> {
if (this.mockClient) return this.mockClient.mockWshRpcStream(client, "streamwaveai", data, opts);
return client.wshRpcStream("streamwaveai", data, opts);
}
// command "termgetscrollbacklines" [call]
TermGetScrollbackLinesCommand(client: WshClient, data: CommandTermGetScrollbackLinesData, opts?: RpcOpts): Promise<CommandTermGetScrollbackLinesRtnData> {
if (this.mockClient) return this.mockClient.mockWshRpcCall(client, "termgetscrollbacklines", data, opts);

View file

@ -2011,53 +2011,6 @@ declare global {
fullconfig: FullConfigType;
};
// wshrpc.WaveAIOptsType
type WaveAIOptsType = {
model: string;
apitype?: string;
apitoken: string;
orgid?: string;
apiversion?: string;
baseurl?: string;
proxyurl?: string;
maxtokens?: number;
maxchoices?: number;
timeoutms?: number;
};
// wshrpc.WaveAIPacketType
type WaveAIPacketType = {
type: string;
model?: string;
created?: number;
finish_reason?: string;
usage?: WaveAIUsageType;
index?: number;
text?: string;
error?: string;
};
// wshrpc.WaveAIPromptMessageType
type WaveAIPromptMessageType = {
role: string;
content: string;
name?: string;
};
// wshrpc.WaveAIStreamRequest
type WaveAIStreamRequest = {
clientid?: string;
opts: WaveAIOptsType;
prompt: WaveAIPromptMessageType[];
};
// wshrpc.WaveAIUsageType
type WaveAIUsageType = {
prompt_tokens?: number;
completion_tokens?: number;
total_tokens?: number;
};
// filestore.WaveFile
type WaveFile = {

View file

@ -5,7 +5,6 @@ package blockservice
import (
"context"
"encoding/json"
"fmt"
"time"
@ -68,28 +67,6 @@ func (bs *BlockService) SaveTerminalState(ctx context.Context, blockId string, s
return nil
}
func (bs *BlockService) SaveWaveAiData(ctx context.Context, blockId string, history []wshrpc.WaveAIPromptMessageType) error {
block, err := wstore.DBMustGet[*waveobj.Block](ctx, blockId)
if err != nil {
return err
}
viewName := block.Meta.GetString(waveobj.MetaKey_View, "")
if viewName != "waveai" {
return fmt.Errorf("invalid view type: %s", viewName)
}
historyBytes, err := json.Marshal(history)
if err != nil {
return fmt.Errorf("unable to serialize ai history: %v", err)
}
// ignore MakeFile error (already exists is ok)
filestore.WFS.MakeFile(ctx, blockId, "aidata", nil, wshrpc.FileOpts{})
err = filestore.WFS.WriteFile(ctx, blockId, "aidata", historyBytes)
if err != nil {
return fmt.Errorf("cannot save terminal state: %w", err)
}
return nil
}
func (*BlockService) CleanupOrphanedBlocks_Meta() tsgenmeta.MethodMeta {
return tsgenmeta.MethodMeta{
Desc: "queue a layout action to cleanup orphaned blocks in the tab",

View file

@ -1,316 +0,0 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
package waveai
import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"github.com/wavetermdev/waveterm/pkg/panichandler"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
)
type AnthropicBackend struct{}
var _ AIBackend = AnthropicBackend{}
// Claude API request types
type anthropicMessage struct {
Role string `json:"role"`
Content string `json:"content"`
}
type anthropicRequest struct {
Model string `json:"model"`
Messages []anthropicMessage `json:"messages"`
System string `json:"system,omitempty"`
MaxTokens int `json:"max_tokens,omitempty"`
Stream bool `json:"stream"`
Temperature float32 `json:"temperature,omitempty"`
}
// Claude API response types for SSE events
type anthropicContentBlock struct {
Type string `json:"type"` // "text" or other content types
Text string `json:"text,omitempty"`
}
type anthropicUsage struct {
InputTokens int `json:"input_tokens"`
OutputTokens int `json:"output_tokens"`
}
type anthropicResponseMessage struct {
ID string `json:"id"`
Type string `json:"type"`
Role string `json:"role"`
Content []anthropicContentBlock `json:"content"`
Model string `json:"model"`
StopReason string `json:"stop_reason,omitempty"`
StopSequence string `json:"stop_sequence,omitempty"`
Usage *anthropicUsage `json:"usage,omitempty"`
}
type anthropicStreamEventError struct {
Type string `json:"type"`
Message string `json:"message"`
}
type anthropicStreamEventDelta struct {
Text string `json:"text"`
}
type anthropicStreamEvent struct {
Type string `json:"type"`
Message *anthropicResponseMessage `json:"message,omitempty"`
ContentBlock *anthropicContentBlock `json:"content_block,omitempty"`
Delta *anthropicStreamEventDelta `json:"delta,omitempty"`
Error *anthropicStreamEventError `json:"error,omitempty"`
Usage *anthropicUsage `json:"usage,omitempty"`
}
// SSE event represents a parsed Server-Sent Event
type sseEvent struct {
Event string // The event type field
Data string // The data field
}
// parseSSE reads and parses SSE format from a bufio.Reader
func parseSSE(reader *bufio.Reader) (*sseEvent, error) {
var event sseEvent
for {
line, err := reader.ReadString('\n')
if err != nil {
return nil, err
}
line = strings.TrimSpace(line)
if line == "" {
// Empty line signals end of event
if event.Event != "" || event.Data != "" {
return &event, nil
}
continue
}
if strings.HasPrefix(line, "event:") {
event.Event = strings.TrimSpace(strings.TrimPrefix(line, "event:"))
} else if strings.HasPrefix(line, "data:") {
event.Data = strings.TrimSpace(strings.TrimPrefix(line, "data:"))
}
}
}
func (AnthropicBackend) StreamCompletion(ctx context.Context, request wshrpc.WaveAIStreamRequest) chan wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType] {
rtn := make(chan wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType])
go func() {
defer func() {
panicErr := panichandler.PanicHandler("AnthropicBackend.StreamCompletion", recover())
if panicErr != nil {
rtn <- makeAIError(panicErr)
}
close(rtn)
}()
if request.Opts == nil {
rtn <- makeAIError(errors.New("no anthropic opts found"))
return
}
model := request.Opts.Model
if model == "" {
model = "claude-3-sonnet-20250229" // default model
}
// Convert messages format
var messages []anthropicMessage
var systemPrompt string
for _, msg := range request.Prompt {
if msg.Role == "system" {
if systemPrompt != "" {
systemPrompt += "\n"
}
systemPrompt += msg.Content
continue
}
role := "user"
if msg.Role == "assistant" {
role = "assistant"
}
messages = append(messages, anthropicMessage{
Role: role,
Content: msg.Content,
})
}
anthropicReq := anthropicRequest{
Model: model,
Messages: messages,
System: systemPrompt,
Stream: true,
MaxTokens: request.Opts.MaxTokens,
}
reqBody, err := json.Marshal(anthropicReq)
if err != nil {
rtn <- makeAIError(fmt.Errorf("failed to marshal anthropic request: %v", err))
return
}
// Build endpoint allowing custom base URL from presets/settings
endpoint := "https://api.anthropic.com/v1/messages"
if request.Opts.BaseURL != "" {
endpoint = strings.TrimSpace(request.Opts.BaseURL)
}
req, err := http.NewRequestWithContext(ctx, "POST", endpoint, strings.NewReader(string(reqBody)))
if err != nil {
rtn <- makeAIError(fmt.Errorf("failed to create anthropic request: %v", err))
return
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "text/event-stream")
req.Header.Set("x-api-key", request.Opts.APIToken)
version := "2023-06-01"
if request.Opts.APIVersion != "" {
version = request.Opts.APIVersion
}
req.Header.Set("anthropic-version", version)
// Configure HTTP client with proxy if specified
client := &http.Client{}
if request.Opts.ProxyURL != "" {
proxyURL, err := url.Parse(request.Opts.ProxyURL)
if err != nil {
rtn <- makeAIError(fmt.Errorf("invalid proxy URL: %v", err))
return
}
transport := &http.Transport{
Proxy: http.ProxyURL(proxyURL),
}
client.Transport = transport
}
resp, err := client.Do(req)
if err != nil {
rtn <- makeAIError(fmt.Errorf("failed to send anthropic request: %v", err))
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
rtn <- makeAIError(fmt.Errorf("Anthropic API error: %s - %s", resp.Status, string(bodyBytes)))
return
}
reader := bufio.NewReader(resp.Body)
for {
// Check for context cancellation
select {
case <-ctx.Done():
rtn <- makeAIError(fmt.Errorf("request cancelled: %v", ctx.Err()))
return
default:
}
sse, err := parseSSE(reader)
if err == io.EOF {
break
}
if err != nil {
rtn <- makeAIError(fmt.Errorf("error reading SSE stream: %v", err))
break
}
if sse.Event == "ping" {
continue // Ignore ping events
}
var event anthropicStreamEvent
if err := json.Unmarshal([]byte(sse.Data), &event); err != nil {
rtn <- makeAIError(fmt.Errorf("error unmarshaling event data: %v", err))
break
}
if event.Error != nil {
rtn <- makeAIError(fmt.Errorf("Anthropic API error: %s - %s", event.Error.Type, event.Error.Message))
break
}
switch sse.Event {
case "message_start":
if event.Message != nil {
pk := MakeWaveAIPacket()
pk.Model = event.Message.Model
rtn <- wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType]{Response: *pk}
}
case "content_block_start":
if event.ContentBlock != nil && event.ContentBlock.Text != "" {
pk := MakeWaveAIPacket()
pk.Text = event.ContentBlock.Text
rtn <- wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType]{Response: *pk}
}
case "content_block_delta":
if event.Delta != nil && event.Delta.Text != "" {
pk := MakeWaveAIPacket()
pk.Text = event.Delta.Text
rtn <- wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType]{Response: *pk}
}
case "content_block_stop":
// Note: According to the docs, this just signals the end of a content block
// We might want to use this for tracking block boundaries, but for now
// we don't need to send anything special to match OpenAI's format
case "message_delta":
// Update message metadata, usage stats
if event.Usage != nil {
pk := MakeWaveAIPacket()
pk.Usage = &wshrpc.WaveAIUsageType{
PromptTokens: event.Usage.InputTokens,
CompletionTokens: event.Usage.OutputTokens,
TotalTokens: event.Usage.InputTokens + event.Usage.OutputTokens,
}
rtn <- wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType]{Response: *pk}
}
case "message_stop":
if event.Message != nil {
pk := MakeWaveAIPacket()
pk.FinishReason = event.Message.StopReason
if event.Message.Usage != nil {
pk.Usage = &wshrpc.WaveAIUsageType{
PromptTokens: event.Message.Usage.InputTokens,
CompletionTokens: event.Message.Usage.OutputTokens,
TotalTokens: event.Message.Usage.InputTokens + event.Message.Usage.OutputTokens,
}
}
rtn <- wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType]{Response: *pk}
}
default:
rtn <- makeAIError(fmt.Errorf("unknown Anthropic event type: %s", sse.Event))
return
}
}
}()
return rtn
}

View file

@ -1,127 +0,0 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
package waveai
import (
"context"
"encoding/json"
"fmt"
"io"
"log"
"time"
"github.com/gorilla/websocket"
"github.com/wavetermdev/waveterm/pkg/panichandler"
"github.com/wavetermdev/waveterm/pkg/wcloud"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
)
type WaveAICloudBackend struct{}
var _ AIBackend = WaveAICloudBackend{}
const CloudWebsocketConnectTimeout = 1 * time.Minute
const OpenAICloudReqStr = "openai-cloudreq"
const PacketEOFStr = "EOF"
type WaveAICloudReqPacketType struct {
Type string `json:"type"`
ClientId string `json:"clientid"`
Prompt []wshrpc.WaveAIPromptMessageType `json:"prompt"`
MaxTokens int `json:"maxtokens,omitempty"`
MaxChoices int `json:"maxchoices,omitempty"`
}
func MakeWaveAICloudReqPacket() *WaveAICloudReqPacketType {
return &WaveAICloudReqPacketType{
Type: OpenAICloudReqStr,
}
}
func (WaveAICloudBackend) StreamCompletion(ctx context.Context, request wshrpc.WaveAIStreamRequest) chan wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType] {
rtn := make(chan wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType])
wsEndpoint := wcloud.GetWSEndpoint()
go func() {
defer func() {
panicErr := panichandler.PanicHandler("WaveAICloudBackend.StreamCompletion", recover())
if panicErr != nil {
rtn <- makeAIError(panicErr)
}
close(rtn)
}()
if wsEndpoint == "" {
rtn <- makeAIError(fmt.Errorf("no cloud ws endpoint found"))
return
}
if request.Opts == nil {
rtn <- makeAIError(fmt.Errorf("no openai opts found"))
return
}
websocketContext, dialCancelFn := context.WithTimeout(context.Background(), CloudWebsocketConnectTimeout)
defer dialCancelFn()
conn, _, err := websocket.DefaultDialer.DialContext(websocketContext, wsEndpoint, nil)
if err == context.DeadlineExceeded {
rtn <- makeAIError(fmt.Errorf("OpenAI request, timed out connecting to cloud server: %v", err))
return
} else if err != nil {
rtn <- makeAIError(fmt.Errorf("OpenAI request, websocket connect error: %v", err))
return
}
defer func() {
err = conn.Close()
if err != nil {
rtn <- makeAIError(fmt.Errorf("unable to close openai channel: %v", err))
}
}()
var sendablePromptMsgs []wshrpc.WaveAIPromptMessageType
for _, promptMsg := range request.Prompt {
if promptMsg.Role == "error" {
continue
}
sendablePromptMsgs = append(sendablePromptMsgs, promptMsg)
}
reqPk := MakeWaveAICloudReqPacket()
reqPk.ClientId = request.ClientId
reqPk.Prompt = sendablePromptMsgs
reqPk.MaxTokens = request.Opts.MaxTokens
reqPk.MaxChoices = request.Opts.MaxChoices
configMessageBuf, err := json.Marshal(reqPk)
if err != nil {
rtn <- makeAIError(fmt.Errorf("OpenAI request, packet marshal error: %v", err))
return
}
err = conn.WriteMessage(websocket.TextMessage, configMessageBuf)
if err != nil {
rtn <- makeAIError(fmt.Errorf("OpenAI request, websocket write config error: %v", err))
return
}
for {
_, socketMessage, err := conn.ReadMessage()
if err == io.EOF {
break
}
if err != nil {
log.Printf("err received: %v", err)
rtn <- makeAIError(fmt.Errorf("OpenAI request, websocket error reading message: %v", err))
break
}
var streamResp *wshrpc.WaveAIPacketType
err = json.Unmarshal(socketMessage, &streamResp)
if err != nil {
rtn <- makeAIError(fmt.Errorf("OpenAI request, websocket response json decode error: %v", err))
break
}
if streamResp.Error == PacketEOFStr {
// got eof packet from socket
break
} else if streamResp.Error != "" {
// use error from server directly
rtn <- makeAIError(fmt.Errorf("%v", streamResp.Error))
break
}
rtn <- wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType]{Response: *streamResp}
}
}()
return rtn
}

View file

@ -1,117 +0,0 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
package waveai
import (
"context"
"fmt"
"log"
"net/http"
"net/url"
"github.com/google/generative-ai-go/genai"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
type GoogleBackend struct{}
var _ AIBackend = GoogleBackend{}
func (GoogleBackend) StreamCompletion(ctx context.Context, request wshrpc.WaveAIStreamRequest) chan wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType] {
var clientOptions []option.ClientOption
clientOptions = append(clientOptions, option.WithAPIKey(request.Opts.APIToken))
// Configure proxy if specified
if request.Opts.ProxyURL != "" {
proxyURL, err := url.Parse(request.Opts.ProxyURL)
if err != nil {
rtn := make(chan wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType])
go func() {
defer close(rtn)
rtn <- makeAIError(fmt.Errorf("invalid proxy URL: %v", err))
}()
return rtn
}
transport := &http.Transport{
Proxy: http.ProxyURL(proxyURL),
}
httpClient := &http.Client{
Transport: transport,
}
clientOptions = append(clientOptions, option.WithHTTPClient(httpClient))
}
client, err := genai.NewClient(ctx, clientOptions...)
if err != nil {
log.Printf("failed to create client: %v", err)
return nil
}
model := client.GenerativeModel(request.Opts.Model)
if model == nil {
log.Println("model not found")
client.Close()
return nil
}
cs := model.StartChat()
cs.History = extractHistory(request.Prompt)
iter := cs.SendMessageStream(ctx, extractPrompt(request.Prompt))
rtn := make(chan wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType])
go func() {
defer client.Close()
defer close(rtn)
for {
// Check for context cancellation
if err := ctx.Err(); err != nil {
rtn <- makeAIError(fmt.Errorf("request cancelled: %v", err))
break
}
resp, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
rtn <- makeAIError(fmt.Errorf("Google API error: %v", err))
break
}
rtn <- wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType]{Response: wshrpc.WaveAIPacketType{Text: convertCandidatesToText(resp.Candidates)}}
}
}()
return rtn
}
func extractHistory(history []wshrpc.WaveAIPromptMessageType) []*genai.Content {
var rtn []*genai.Content
for _, h := range history[:len(history)-1] {
if h.Role == "user" || h.Role == "model" {
rtn = append(rtn, &genai.Content{
Role: h.Role,
Parts: []genai.Part{genai.Text(h.Content)},
})
}
}
return rtn
}
func extractPrompt(prompt []wshrpc.WaveAIPromptMessageType) genai.Part {
p := prompt[len(prompt)-1]
return genai.Text(p.Content)
}
func convertCandidatesToText(candidates []*genai.Candidate) string {
var rtn string
for _, c := range candidates {
for _, p := range c.Content.Parts {
rtn += fmt.Sprintf("%v", p)
}
}
return rtn
}

View file

@ -1,179 +0,0 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
package waveai
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"regexp"
"strings"
openaiapi "github.com/sashabaranov/go-openai"
"github.com/wavetermdev/waveterm/pkg/panichandler"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
)
type OpenAIBackend struct{}
var _ AIBackend = OpenAIBackend{}
const DefaultAzureAPIVersion = "2023-05-15"
// copied from go-openai/config.go
func defaultAzureMapperFn(model string) string {
return regexp.MustCompile(`[.:]`).ReplaceAllString(model, "")
}
func isReasoningModel(model string) bool {
m := strings.ToLower(model)
return strings.HasPrefix(m, "o1") ||
strings.HasPrefix(m, "o3") ||
strings.HasPrefix(m, "o4") ||
strings.HasPrefix(m, "gpt-5") ||
strings.HasPrefix(m, "gpt-5.1")
}
func setApiType(opts *wshrpc.WaveAIOptsType, clientConfig *openaiapi.ClientConfig) error {
ourApiType := strings.ToLower(opts.APIType)
if ourApiType == "" || ourApiType == APIType_OpenAI || ourApiType == strings.ToLower(string(openaiapi.APITypeOpenAI)) {
clientConfig.APIType = openaiapi.APITypeOpenAI
return nil
} else if ourApiType == strings.ToLower(string(openaiapi.APITypeAzure)) {
clientConfig.APIType = openaiapi.APITypeAzure
clientConfig.APIVersion = DefaultAzureAPIVersion
clientConfig.AzureModelMapperFunc = defaultAzureMapperFn
return nil
} else if ourApiType == strings.ToLower(string(openaiapi.APITypeAzureAD)) {
clientConfig.APIType = openaiapi.APITypeAzureAD
clientConfig.APIVersion = DefaultAzureAPIVersion
clientConfig.AzureModelMapperFunc = defaultAzureMapperFn
return nil
} else if ourApiType == strings.ToLower(string(openaiapi.APITypeCloudflareAzure)) {
clientConfig.APIType = openaiapi.APITypeCloudflareAzure
clientConfig.APIVersion = DefaultAzureAPIVersion
clientConfig.AzureModelMapperFunc = defaultAzureMapperFn
return nil
} else {
return fmt.Errorf("invalid api type %q", opts.APIType)
}
}
func convertPrompt(prompt []wshrpc.WaveAIPromptMessageType) []openaiapi.ChatCompletionMessage {
var rtn []openaiapi.ChatCompletionMessage
for _, p := range prompt {
msg := openaiapi.ChatCompletionMessage{Role: p.Role, Content: p.Content, Name: p.Name}
rtn = append(rtn, msg)
}
return rtn
}
func (OpenAIBackend) StreamCompletion(ctx context.Context, request wshrpc.WaveAIStreamRequest) chan wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType] {
rtn := make(chan wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType])
go func() {
defer func() {
panicErr := panichandler.PanicHandler("OpenAIBackend.StreamCompletion", recover())
if panicErr != nil {
rtn <- makeAIError(panicErr)
}
close(rtn)
}()
if request.Opts == nil {
rtn <- makeAIError(errors.New("no openai opts found"))
return
}
if request.Opts.Model == "" {
rtn <- makeAIError(errors.New("no openai model specified"))
return
}
if request.Opts.BaseURL == "" && request.Opts.APIToken == "" {
rtn <- makeAIError(errors.New("no api token"))
return
}
clientConfig := openaiapi.DefaultConfig(request.Opts.APIToken)
if request.Opts.BaseURL != "" {
clientConfig.BaseURL = request.Opts.BaseURL
}
err := setApiType(request.Opts, &clientConfig)
if err != nil {
rtn <- makeAIError(err)
return
}
if request.Opts.OrgID != "" {
clientConfig.OrgID = request.Opts.OrgID
}
if request.Opts.APIVersion != "" {
clientConfig.APIVersion = request.Opts.APIVersion
}
// Configure proxy if specified
if request.Opts.ProxyURL != "" {
proxyURL, err := url.Parse(request.Opts.ProxyURL)
if err != nil {
rtn <- makeAIError(fmt.Errorf("invalid proxy URL: %v", err))
return
}
transport := &http.Transport{
Proxy: http.ProxyURL(proxyURL),
}
clientConfig.HTTPClient = &http.Client{
Transport: transport,
}
}
client := openaiapi.NewClientWithConfig(clientConfig)
req := openaiapi.ChatCompletionRequest{
Model: request.Opts.Model,
Messages: convertPrompt(request.Prompt),
}
// Set MaxCompletionTokens for reasoning models, MaxTokens for others
if isReasoningModel(request.Opts.Model) {
req.MaxCompletionTokens = request.Opts.MaxTokens
} else {
req.MaxTokens = request.Opts.MaxTokens
}
req.Stream = true
if request.Opts.MaxChoices > 1 {
req.N = request.Opts.MaxChoices
}
apiResp, err := client.CreateChatCompletionStream(ctx, req)
if err != nil {
rtn <- makeAIError(fmt.Errorf("error calling openai API: %v", err))
return
}
sentHeader := false
for {
streamResp, err := apiResp.Recv()
if err == io.EOF {
break
}
if err != nil {
rtn <- makeAIError(fmt.Errorf("OpenAI request, error reading message: %v", err))
break
}
if streamResp.Model != "" && !sentHeader {
pk := MakeWaveAIPacket()
pk.Model = streamResp.Model
pk.Created = streamResp.Created
rtn <- wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType]{Response: *pk}
sentHeader = true
}
for _, choice := range streamResp.Choices {
pk := MakeWaveAIPacket()
pk.Index = choice.Index
pk.Text = choice.Delta.Content
pk.FinishReason = string(choice.FinishReason)
rtn <- wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType]{Response: *pk}
}
}
}()
return rtn
}

View file

@ -1,193 +0,0 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
package waveai
import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"github.com/wavetermdev/waveterm/pkg/panichandler"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
)
type PerplexityBackend struct{}
var _ AIBackend = PerplexityBackend{}
// Perplexity API request types
type perplexityMessage struct {
Role string `json:"role"`
Content string `json:"content"`
}
type perplexityRequest struct {
Model string `json:"model"`
Messages []perplexityMessage `json:"messages"`
Stream bool `json:"stream"`
}
// Perplexity API response types
type perplexityResponseDelta struct {
Content string `json:"content"`
}
type perplexityResponseChoice struct {
Delta perplexityResponseDelta `json:"delta"`
FinishReason string `json:"finish_reason"`
}
type perplexityResponse struct {
ID string `json:"id"`
Choices []perplexityResponseChoice `json:"choices"`
Model string `json:"model"`
}
func (PerplexityBackend) StreamCompletion(ctx context.Context, request wshrpc.WaveAIStreamRequest) chan wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType] {
rtn := make(chan wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType])
go func() {
defer func() {
panicErr := panichandler.PanicHandler("PerplexityBackend.StreamCompletion", recover())
if panicErr != nil {
rtn <- makeAIError(panicErr)
}
close(rtn)
}()
if request.Opts == nil {
rtn <- makeAIError(errors.New("no perplexity opts found"))
return
}
model := request.Opts.Model
if model == "" {
model = "llama-3.1-sonar-small-128k-online"
}
// Convert messages format
var messages []perplexityMessage
for _, msg := range request.Prompt {
role := "user"
if msg.Role == "assistant" {
role = "assistant"
} else if msg.Role == "system" {
role = "system"
}
messages = append(messages, perplexityMessage{
Role: role,
Content: msg.Content,
})
}
perplexityReq := perplexityRequest{
Model: model,
Messages: messages,
Stream: true,
}
reqBody, err := json.Marshal(perplexityReq)
if err != nil {
rtn <- makeAIError(fmt.Errorf("failed to marshal perplexity request: %v", err))
return
}
req, err := http.NewRequestWithContext(ctx, "POST", "https://api.perplexity.ai/chat/completions", strings.NewReader(string(reqBody)))
if err != nil {
rtn <- makeAIError(fmt.Errorf("failed to create perplexity request: %v", err))
return
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+request.Opts.APIToken)
// Configure HTTP client with proxy if specified
client := &http.Client{}
if request.Opts.ProxyURL != "" {
proxyURL, err := url.Parse(request.Opts.ProxyURL)
if err != nil {
rtn <- makeAIError(fmt.Errorf("invalid proxy URL: %v", err))
return
}
transport := &http.Transport{
Proxy: http.ProxyURL(proxyURL),
}
client.Transport = transport
}
resp, err := client.Do(req)
if err != nil {
rtn <- makeAIError(fmt.Errorf("failed to send perplexity request: %v", err))
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
rtn <- makeAIError(fmt.Errorf("Perplexity API error: %s - %s", resp.Status, string(bodyBytes)))
return
}
reader := bufio.NewReader(resp.Body)
sentHeader := false
for {
// Check for context cancellation
select {
case <-ctx.Done():
rtn <- makeAIError(fmt.Errorf("request cancelled: %v", ctx.Err()))
return
default:
}
line, err := reader.ReadString('\n')
if err == io.EOF {
break
}
if err != nil {
rtn <- makeAIError(fmt.Errorf("error reading stream: %v", err))
break
}
line = strings.TrimSpace(line)
if !strings.HasPrefix(line, "data: ") {
continue
}
data := strings.TrimPrefix(line, "data: ")
if data == "[DONE]" {
break
}
var response perplexityResponse
if err := json.Unmarshal([]byte(data), &response); err != nil {
rtn <- makeAIError(fmt.Errorf("error unmarshaling response: %v", err))
break
}
if !sentHeader {
pk := MakeWaveAIPacket()
pk.Model = response.Model
rtn <- wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType]{Response: *pk}
sentHeader = true
}
for _, choice := range response.Choices {
pk := MakeWaveAIPacket()
pk.Text = choice.Delta.Content
pk.FinishReason = choice.FinishReason
rtn <- wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType]{Response: *pk}
}
}
}()
return rtn
}

View file

@ -1,118 +0,0 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
package waveai
import (
"context"
"log"
"net/url"
"strings"
"github.com/wavetermdev/waveterm/pkg/telemetry"
"github.com/wavetermdev/waveterm/pkg/telemetry/telemetrydata"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
)
const WaveAIPacketstr = "waveai"
const APIType_Anthropic = "anthropic"
const APIType_Perplexity = "perplexity"
const APIType_Google = "google"
const APIType_OpenAI = "openai"
type WaveAICmdInfoPacketOutputType struct {
Model string `json:"model,omitempty"`
Created int64 `json:"created,omitempty"`
FinishReason string `json:"finish_reason,omitempty"`
Message string `json:"message,omitempty"`
Error string `json:"error,omitempty"`
}
func MakeWaveAIPacket() *wshrpc.WaveAIPacketType {
return &wshrpc.WaveAIPacketType{Type: WaveAIPacketstr}
}
type WaveAICmdInfoChatMessage struct {
MessageID int `json:"messageid"`
IsAssistantResponse bool `json:"isassistantresponse,omitempty"`
AssistantResponse *WaveAICmdInfoPacketOutputType `json:"assistantresponse,omitempty"`
UserQuery string `json:"userquery,omitempty"`
UserEngineeredQuery string `json:"userengineeredquery,omitempty"`
}
type AIBackend interface {
StreamCompletion(
ctx context.Context,
request wshrpc.WaveAIStreamRequest,
) chan wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType]
}
func IsCloudAIRequest(opts *wshrpc.WaveAIOptsType) bool {
if opts == nil {
return true
}
return opts.BaseURL == "" && opts.APIToken == ""
}
func isLocalURL(baseURL string) bool {
if baseURL == "" {
return false
}
u, err := url.Parse(baseURL)
if err != nil {
return false
}
host := strings.ToLower(u.Hostname())
return host == "localhost" || host == "127.0.0.1" || host == "0.0.0.0" || strings.HasPrefix(host, "192.168.") || strings.HasPrefix(host, "10.") || (strings.HasPrefix(host, "172.") && len(host) > 4)
}
func makeAIError(err error) wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType] {
return wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType]{Error: err}
}
func RunAICommand(ctx context.Context, request wshrpc.WaveAIStreamRequest) chan wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType] {
telemetry.GoUpdateActivityWrap(wshrpc.ActivityUpdate{NumAIReqs: 1}, "RunAICommand")
endpoint := request.Opts.BaseURL
if endpoint == "" {
endpoint = "default"
}
var backend AIBackend
var backendType string
if request.Opts.APIType == APIType_Anthropic {
backend = AnthropicBackend{}
backendType = APIType_Anthropic
} else if request.Opts.APIType == APIType_Perplexity {
backend = PerplexityBackend{}
backendType = APIType_Perplexity
} else if request.Opts.APIType == APIType_Google {
backend = GoogleBackend{}
backendType = APIType_Google
} else if IsCloudAIRequest(request.Opts) {
endpoint = "waveterm cloud"
request.Opts.APIType = APIType_OpenAI
request.Opts.Model = "default"
backend = WaveAICloudBackend{}
backendType = "wave"
} else {
backend = OpenAIBackend{}
backendType = APIType_OpenAI
}
if backend == nil {
log.Printf("no backend found for %s\n", request.Opts.APIType)
return nil
}
aiLocal := backendType != "wave" && isLocalURL(request.Opts.BaseURL)
telemetry.GoRecordTEventWrap(&telemetrydata.TEvent{
Event: "action:runaicmd",
Props: telemetrydata.TEventProps{
AiBackendType: backendType,
AiLocal: aiLocal,
},
})
log.Printf("sending ai chat message to %s endpoint %q using model %s\n", request.Opts.APIType, endpoint, request.Opts.Model)
return backend.StreamCompletion(ctx, request)
}

View file

@ -25,12 +25,9 @@ import (
const WCloudEndpoint = "https://api.waveterm.dev/central"
const WCloudEndpointVarName = "WCLOUD_ENDPOINT"
const WCloudWSEndpoint = "wss://wsapi.waveterm.dev/"
const WCloudWSEndpointVarName = "WCLOUD_WS_ENDPOINT"
const WCloudPingEndpoint = "https://ping.waveterm.dev/central"
const WCloudPingEndpointVarName = "WCLOUD_PING_ENDPOINT"
var WCloudWSEndpoint_VarCache string
var WCloudEndpoint_VarCache string
var WCloudPingEndpoint_VarCache string
@ -59,12 +56,6 @@ func CacheAndRemoveEnvVars() error {
return err
}
os.Unsetenv(WCloudEndpointVarName)
WCloudWSEndpoint_VarCache = os.Getenv(WCloudWSEndpointVarName)
err = checkWSEndpointVar(WCloudWSEndpoint_VarCache, "wcloud ws endpoint", WCloudWSEndpointVarName)
if err != nil {
return err
}
os.Unsetenv(WCloudWSEndpointVarName)
WCloudPingEndpoint_VarCache = os.Getenv(WCloudPingEndpointVarName)
os.Unsetenv(WCloudPingEndpointVarName)
return nil
@ -80,17 +71,6 @@ func checkEndpointVar(endpoint string, debugName string, varName string) error {
return nil
}
func checkWSEndpointVar(endpoint string, debugName string, varName string) error {
if !wavebase.IsDevMode() {
return nil
}
log.Printf("checking endpoint %q\n", endpoint)
if endpoint == "" || !strings.HasPrefix(endpoint, "wss://") {
return fmt.Errorf("invalid %s, %s not set or invalid", debugName, varName)
}
return nil
}
func GetEndpoint() string {
if !wavebase.IsDevMode() {
return WCloudEndpoint
@ -99,14 +79,6 @@ func GetEndpoint() string {
return endpoint
}
func GetWSEndpoint() string {
if !wavebase.IsDevMode() {
return WCloudWSEndpoint
}
endpoint := WCloudWSEndpoint_VarCache
return endpoint
}
func GetPingEndpoint() string {
if !wavebase.IsDevMode() {
return WCloudPingEndpoint

View file

@ -918,11 +918,6 @@ func StreamTestCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) chan wshrpc.Resp
return sendRpcRequestResponseStreamHelper[int](w, "streamtest", nil, opts)
}
// command "streamwaveai", wshserver.StreamWaveAiCommand
func StreamWaveAiCommand(w *wshutil.WshRpc, data wshrpc.WaveAIStreamRequest, opts *wshrpc.RpcOpts) chan wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType] {
return sendRpcRequestResponseStreamHelper[wshrpc.WaveAIPacketType](w, "streamwaveai", data, opts)
}
// command "termgetscrollbacklines", wshserver.TermGetScrollbackLinesCommand
func TermGetScrollbackLinesCommand(w *wshutil.WshRpc, data wshrpc.CommandTermGetScrollbackLinesData, opts *wshrpc.RpcOpts) (*wshrpc.CommandTermGetScrollbackLinesRtnData, error) {
resp, err := sendRpcRequestCallHelper[*wshrpc.CommandTermGetScrollbackLinesRtnData](w, "termgetscrollbacklines", data, opts)

View file

@ -71,7 +71,6 @@ type WshRpcInterface interface {
GetTempDirCommand(ctx context.Context, data CommandGetTempDirData) (string, error)
WriteTempFileCommand(ctx context.Context, data CommandWriteTempFileData) (string, error)
StreamTestCommand(ctx context.Context) chan RespOrErrorUnion[int]
StreamWaveAiCommand(ctx context.Context, request WaveAIStreamRequest) chan RespOrErrorUnion[WaveAIPacketType]
StreamCpuDataCommand(ctx context.Context, request CpuDataRequest) chan RespOrErrorUnion[TimeSeriesData]
TestCommand(ctx context.Context, data string) error
TestMultiArgCommand(ctx context.Context, arg1 string, arg2 int, arg3 bool) (string, error)
@ -342,47 +341,6 @@ type CommandEventReadHistoryData struct {
MaxItems int `json:"maxitems"`
}
type WaveAIStreamRequest struct {
ClientId string `json:"clientid,omitempty"`
Opts *WaveAIOptsType `json:"opts"`
Prompt []WaveAIPromptMessageType `json:"prompt"`
}
type WaveAIPromptMessageType struct {
Role string `json:"role"`
Content string `json:"content"`
Name string `json:"name,omitempty"`
}
type WaveAIOptsType struct {
Model string `json:"model"`
APIType string `json:"apitype,omitempty"`
APIToken string `json:"apitoken"`
OrgID string `json:"orgid,omitempty"`
APIVersion string `json:"apiversion,omitempty"`
BaseURL string `json:"baseurl,omitempty"`
ProxyURL string `json:"proxyurl,omitempty"`
MaxTokens int `json:"maxtokens,omitempty"`
MaxChoices int `json:"maxchoices,omitempty"`
TimeoutMs int `json:"timeoutms,omitempty"`
}
type WaveAIPacketType struct {
Type string `json:"type"`
Model string `json:"model,omitempty"`
Created int64 `json:"created,omitempty"`
FinishReason string `json:"finish_reason,omitempty"`
Usage *WaveAIUsageType `json:"usage,omitempty"`
Index int `json:"index,omitempty"`
Text string `json:"text,omitempty"`
Error string `json:"error,omitempty"`
}
type WaveAIUsageType struct {
PromptTokens int `json:"prompt_tokens,omitempty"`
CompletionTokens int `json:"completion_tokens,omitempty"`
TotalTokens int `json:"total_tokens,omitempty"`
}
type CpuDataRequest struct {
Id string `json:"id"`

View file

@ -43,7 +43,6 @@ import (
"github.com/wavetermdev/waveterm/pkg/util/envutil"
"github.com/wavetermdev/waveterm/pkg/util/shellutil"
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
"github.com/wavetermdev/waveterm/pkg/waveai"
"github.com/wavetermdev/waveterm/pkg/waveappstore"
"github.com/wavetermdev/waveterm/pkg/waveapputil"
"github.com/wavetermdev/waveterm/pkg/wavebase"
@ -114,10 +113,6 @@ func (ws *WshServer) StreamTestCommand(ctx context.Context) chan wshrpc.RespOrEr
return rtn
}
func (ws *WshServer) StreamWaveAiCommand(ctx context.Context, request wshrpc.WaveAIStreamRequest) chan wshrpc.RespOrErrorUnion[wshrpc.WaveAIPacketType] {
return waveai.RunAICommand(ctx, request)
}
func MakePlotData(ctx context.Context, blockId string) error {
block, err := wstore.DBMustGet[*waveobj.Block](ctx, blockId)
if err != nil {