Do not allow large/recursive file transfers (for now), remove s3 and wavefile fs implementations (#2808)

Big simplification. Remove the FileShare interface that abstracted
wsh://, s3://, and wavefile:// files.
It produced a lot of complexity for very little usage. We're just going
to focus on the wsh:// implementation since that's core to our remote
workflows.

* remove s3 implementation (and connections, and picker items for
preview)
* remove capabilities for FE
* remove wavefile backend impl as well
* simplify wsh file remote backend
* remove ability to copy/move/ls recursively
* limit file transfers to 32m

the longer term fix here is to use the new streaming RPC primitives.
they have full end-to-end flow-control built in and will not create
pipeline stalls, blocking other requests, and OOM issues.

these other impls had to be removed (or fixed) because transferring
large files could cause stalls or crashes with the new router
infrastructure.
This commit is contained in:
Mike Sawka 2026-01-28 14:28:31 -08:00 committed by GitHub
parent 01a26d59e6
commit 93b7269304
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
46 changed files with 788 additions and 3760 deletions

View file

@ -31,7 +31,6 @@ func GenerateWshClient() error {
"github.com/wavetermdev/waveterm/pkg/waveobj",
"github.com/wavetermdev/waveterm/pkg/wps",
"github.com/wavetermdev/waveterm/pkg/vdom",
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes",
"github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes",
})
wshDeclMap := wshrpc.GenerateWshCommandDeclMap()

View file

@ -14,7 +14,6 @@ import (
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fsutil"
"github.com/wavetermdev/waveterm/pkg/util/fileutil"
"github.com/wavetermdev/waveterm/pkg/util/wavefileutil"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
)
@ -96,90 +95,6 @@ func streamReadFromFile(ctx context.Context, fileData wshrpc.FileData, writer io
return fsutil.ReadFileStreamToWriter(ctx, ch, writer)
}
type fileListResult struct {
info *wshrpc.FileInfo
err error
}
func streamFileList(zoneId string, path string, recursive bool, filesOnly bool) (<-chan fileListResult, error) {
resultChan := make(chan fileListResult)
// If path doesn't end in /, do a single file lookup
if path != "" && !strings.HasSuffix(path, "/") {
go func() {
defer close(resultChan)
fileData := wshrpc.FileData{
Info: &wshrpc.FileInfo{
Path: fmt.Sprintf(wavefileutil.WaveFilePathPattern, zoneId, path)},
}
info, err := wshclient.FileInfoCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: 2000})
err = convertNotFoundErr(err)
if err == fs.ErrNotExist {
resultChan <- fileListResult{err: fmt.Errorf("%s: No such file or directory", path)}
return
}
if err != nil {
resultChan <- fileListResult{err: err}
return
}
resultChan <- fileListResult{info: info}
}()
return resultChan, nil
}
// Directory listing case
go func() {
defer close(resultChan)
prefix := path
prefixLen := len(prefix)
offset := 0
foundAny := false
for {
listData := wshrpc.FileListData{
Path: fmt.Sprintf(wavefileutil.WaveFilePathPattern, zoneId, prefix),
Opts: &wshrpc.FileListOpts{
All: recursive,
Offset: offset,
Limit: 100}}
files, err := wshclient.FileListCommand(RpcClient, listData, &wshrpc.RpcOpts{Timeout: 2000})
if err != nil {
resultChan <- fileListResult{err: err}
return
}
if len(files) == 0 {
if !foundAny && prefix != "" {
resultChan <- fileListResult{err: fmt.Errorf("%s: No such file or directory", path)}
}
return
}
for _, f := range files {
if filesOnly && f.IsDir {
continue
}
foundAny = true
if prefixLen > 0 {
f.Name = f.Name[prefixLen:]
}
resultChan <- fileListResult{info: f}
}
if len(files) < 100 {
return
}
offset += len(files)
}
}()
return resultChan, nil
}
func fixRelativePaths(path string) (string, error) {
conn, err := connparse.ParseURI(path)
if err != nil {

View file

@ -11,14 +11,11 @@ import (
"io"
"log"
"os"
"path"
"path/filepath"
"strings"
"text/tabwriter"
"time"
"github.com/spf13/cobra"
"github.com/wavetermdev/waveterm/pkg/util/colprint"
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
@ -26,9 +23,7 @@ import (
)
const (
MaxFileSize = 10 * 1024 * 1024 // 10MB
WaveFileScheme = "wavefile"
WaveFilePrefix = "wavefile://"
MaxFileSize = 10 * 1024 * 1024 // 10MB
TimeoutYear = int64(365) * 24 * 60 * 60 * 1000
@ -53,33 +48,16 @@ Supported URI schemes:
[path] a relative or absolute path on the current remote
//[remote]/[path] a path on a remote
/~/[path] a path relative to the home directory on your local
computer
s3:
Used to access files on S3-compatible systems.
Requires S3 credentials to be set up, either in the AWS CLI configuration
files, or in "profiles.json" in the Wave configuration directory.
If no profile is provided, the default from your AWS CLI configuration will
be used. Profiles from the AWS CLI must be prefixed with "aws:".
Format: s3://[bucket]/[path]
aws:[profile]:s3://[bucket]/[path]
[profile]:s3://[bucket]/[path]
wavefile:
Used to retrieve blockfiles from the internal Wave filesystem.
Format: wavefile://[zoneid]/[path]`
computer`
)
var fileCmd = &cobra.Command{
Use: "file",
Short: "manage files across different storage systems",
Long: `Manage files across different storage systems.
Short: "manage files across local and remote systems",
Long: `Manage files across local and remote systems.
Wave Terminal is capable of managing files from remote SSH hosts, S3-compatible
systems, and the internal Wave filesystem. Files are addressed via URIs, which
vary depending on the storage system.` + UriHelpText}
Wave Terminal is capable of managing files from remote SSH hosts and your local
computer. Files are addressed via URIs.` + UriHelpText}
var fileTimeout int64
@ -88,7 +66,6 @@ func init() {
fileCmd.PersistentFlags().Int64VarP(&fileTimeout, "timeout", "t", 15000, "timeout in milliseconds for long operations")
fileListCmd.Flags().BoolP("recursive", "r", false, "list subdirectories recursively")
fileListCmd.Flags().BoolP("long", "l", false, "use long listing format")
fileListCmd.Flags().BoolP("one", "1", false, "list one file per line")
fileListCmd.Flags().BoolP("files", "f", false, "list files only")
@ -103,7 +80,6 @@ func init() {
fileCpCmd.Flags().BoolP("merge", "m", false, "merge directories")
fileCpCmd.Flags().BoolP("force", "f", false, "force overwrite of existing files")
fileCmd.AddCommand(fileCpCmd)
fileMvCmd.Flags().BoolP("recursive", "r", false, "move directories recursively")
fileMvCmd.Flags().BoolP("force", "f", false, "force overwrite of existing files")
fileCmd.AddCommand(fileMvCmd)
}
@ -113,7 +89,7 @@ var fileListCmd = &cobra.Command{
Aliases: []string{"list"},
Short: "list files",
Long: "List files in a directory. By default, lists files in the current directory." + UriHelpText,
Example: " wsh file ls wsh://user@ec2/home/user/\n wsh file ls wavefile://client/configs/",
Example: " wsh file ls wsh://user@ec2/home/user/",
RunE: activityWrap("file", fileListRun),
PreRunE: preRunSetupRpcClient,
}
@ -122,7 +98,7 @@ var fileCatCmd = &cobra.Command{
Use: "cat [uri]",
Short: "display contents of a file",
Long: "Display the contents of a file." + UriHelpText,
Example: " wsh file cat wsh://user@ec2/home/user/config.txt\n wsh file cat wavefile://client/settings.json",
Example: " wsh file cat wsh://user@ec2/home/user/config.txt",
Args: cobra.ExactArgs(1),
RunE: activityWrap("file", fileCatRun),
PreRunE: preRunSetupRpcClient,
@ -132,7 +108,7 @@ var fileInfoCmd = &cobra.Command{
Use: "info [uri]",
Short: "show wave file information",
Long: "Show information about a file." + UriHelpText,
Example: " wsh file info wsh://user@ec2/home/user/config.txt\n wsh file info wavefile://client/settings.json",
Example: " wsh file info wsh://user@ec2/home/user/config.txt",
Args: cobra.ExactArgs(1),
RunE: activityWrap("file", fileInfoRun),
PreRunE: preRunSetupRpcClient,
@ -142,7 +118,7 @@ var fileRmCmd = &cobra.Command{
Use: "rm [uri]",
Short: "remove a file",
Long: "Remove a file." + UriHelpText,
Example: " wsh file rm wsh://user@ec2/home/user/config.txt\n wsh file rm wavefile://client/settings.json",
Example: " wsh file rm wsh://user@ec2/home/user/config.txt",
Args: cobra.ExactArgs(1),
RunE: activityWrap("file", fileRmRun),
PreRunE: preRunSetupRpcClient,
@ -152,7 +128,7 @@ var fileWriteCmd = &cobra.Command{
Use: "write [uri]",
Short: "write stdin into a file (up to 10MB)",
Long: "Write stdin into a file, buffering input (10MB total file size limit)." + UriHelpText,
Example: " echo 'hello' | wsh file write wavefile://block/greeting.txt",
Example: " echo 'hello' | wsh file write ./greeting.txt",
Args: cobra.ExactArgs(1),
RunE: activityWrap("file", fileWriteRun),
PreRunE: preRunSetupRpcClient,
@ -162,7 +138,7 @@ var fileAppendCmd = &cobra.Command{
Use: "append [uri]",
Short: "append stdin to a file",
Long: "Append stdin to a file, buffering input (10MB total file size limit)." + UriHelpText,
Example: " tail -f log.txt | wsh file append wavefile://block/app.log",
Example: " tail -f log.txt | wsh file append ./app.log",
Args: cobra.ExactArgs(1),
RunE: activityWrap("file", fileAppendRun),
PreRunE: preRunSetupRpcClient,
@ -173,7 +149,7 @@ var fileCpCmd = &cobra.Command{
Aliases: []string{"copy"},
Short: "copy files between storage systems, recursively if needed",
Long: "Copy files between different storage systems." + UriHelpText,
Example: " wsh file cp wavefile://block/config.txt ./local-config.txt\n wsh file cp ./local-config.txt wavefile://block/config.txt\n wsh file cp wsh://user@ec2/home/user/config.txt wavefile://client/config.txt",
Example: " wsh file cp wsh://user@ec2/home/user/config.txt ./local-config.txt\n wsh file cp ./local-config.txt wsh://user@ec2/home/user/config.txt",
Args: cobra.ExactArgs(2),
RunE: activityWrap("file", fileCpRun),
PreRunE: preRunSetupRpcClient,
@ -184,7 +160,7 @@ var fileMvCmd = &cobra.Command{
Aliases: []string{"move"},
Short: "move files between storage systems",
Long: "Move files between different storage systems. The source file will be deleted once the operation completes successfully." + UriHelpText,
Example: " wsh file mv wavefile://block/config.txt ./local-config.txt\n wsh file mv ./local-config.txt wavefile://block/config.txt\n wsh file mv wsh://user@ec2/home/user/config.txt wavefile://client/config.txt",
Example: " wsh file mv wsh://user@ec2/home/user/config.txt ./local-config.txt\n wsh file mv ./local-config.txt wsh://user@ec2/home/user/config.txt",
Args: cobra.ExactArgs(2),
RunE: activityWrap("file", fileMvRun),
PreRunE: preRunSetupRpcClient,
@ -195,6 +171,12 @@ func fileCatRun(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
_, err = checkFileSize(path, MaxFileSize)
if err != nil {
return err
}
fileData := wshrpc.FileData{
Info: &wshrpc.FileInfo{
Path: path}}
@ -270,31 +252,18 @@ func fileWriteRun(cmd *cobra.Command, args []string) error {
Info: &wshrpc.FileInfo{
Path: path}}
capability, err := wshclient.FileShareCapabilityCommand(RpcClient, fileData.Info.Path, &wshrpc.RpcOpts{Timeout: fileTimeout})
limitReader := io.LimitReader(WrappedStdin, MaxFileSize+1)
data, err := io.ReadAll(limitReader)
if err != nil {
return fmt.Errorf("getting fileshare capability: %w", err)
return fmt.Errorf("reading input: %w", err)
}
if capability.CanAppend {
err = streamWriteToFile(fileData, WrappedStdin)
if err != nil {
return fmt.Errorf("writing file: %w", err)
}
} else {
buf := make([]byte, MaxFileSize)
n, err := WrappedStdin.Read(buf)
if err != nil && err != io.EOF {
return fmt.Errorf("reading input: %w", err)
}
if int64(n) == MaxFileSize {
if _, err := WrappedStdin.Read(make([]byte, 1)); err != io.EOF {
return fmt.Errorf("input exceeds maximum file size of %d bytes", MaxFileSize)
}
}
fileData.Data64 = base64.StdEncoding.EncodeToString(buf[:n])
err = wshclient.FileWriteCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: fileTimeout})
if err != nil {
return fmt.Errorf("writing file: %w", err)
}
if len(data) > MaxFileSize {
return fmt.Errorf("input exceeds maximum file size of %d bytes", MaxFileSize)
}
fileData.Data64 = base64.StdEncoding.EncodeToString(data)
err = wshclient.FileWriteCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: fileTimeout})
if err != nil {
return fmt.Errorf("writing file: %w", err)
}
return nil
@ -358,34 +327,26 @@ func fileAppendRun(cmd *cobra.Command, args []string) error {
return nil
}
func getTargetPath(src, dst string) (string, error) {
var srcBase string
if strings.HasPrefix(src, WaveFilePrefix) {
srcBase = path.Base(src)
} else {
srcBase = filepath.Base(src)
}
func checkFileSize(path string, maxSize int64) (*wshrpc.FileInfo, error) {
fileData := wshrpc.FileData{
Info: &wshrpc.FileInfo{
Path: path}}
if strings.HasPrefix(dst, WaveFilePrefix) {
// For wavefile URLs
if strings.HasSuffix(dst, "/") {
return dst + srcBase, nil
}
return dst, nil
info, err := wshclient.FileInfoCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: fileTimeout})
err = convertNotFoundErr(err)
if err != nil {
return nil, fmt.Errorf("getting file info: %w", err)
}
// For local paths
dstInfo, err := os.Stat(dst)
if err == nil && dstInfo.IsDir() {
// If it's an existing directory, use the source filename
return filepath.Join(dst, srcBase), nil
if info.NotFound {
return nil, fmt.Errorf("%s: no such file", path)
}
if err != nil && !os.IsNotExist(err) {
// Return error if it's something other than not exists
return "", fmt.Errorf("checking destination path: %w", err)
if info.IsDir {
return nil, fmt.Errorf("%s: is a directory", path)
}
return dst, nil
if info.Size > maxSize {
return nil, fmt.Errorf("file size (%d bytes) exceeds maximum of %d bytes", info.Size, maxSize)
}
return info, nil
}
func fileCpRun(cmd *cobra.Command, args []string) error {
@ -403,6 +364,12 @@ func fileCpRun(cmd *cobra.Command, args []string) error {
if err != nil {
return fmt.Errorf("unable to parse src path: %w", err)
}
_, err = checkFileSize(srcPath, MaxFileSize)
if err != nil {
return err
}
destPath, err := fixRelativePaths(dst)
if err != nil {
return fmt.Errorf("unable to parse dest path: %w", err)
@ -418,10 +385,6 @@ func fileCpRun(cmd *cobra.Command, args []string) error {
func fileMvRun(cmd *cobra.Command, args []string) error {
src, dst := args[0], args[1]
recursive, err := cmd.Flags().GetBool("recursive")
if err != nil {
return err
}
force, err := cmd.Flags().GetBool("force")
if err != nil {
return err
@ -431,13 +394,19 @@ func fileMvRun(cmd *cobra.Command, args []string) error {
if err != nil {
return fmt.Errorf("unable to parse src path: %w", err)
}
_, err = checkFileSize(srcPath, MaxFileSize)
if err != nil {
return err
}
destPath, err := fixRelativePaths(dst)
if err != nil {
return fmt.Errorf("unable to parse dest path: %w", err)
}
log.Printf("Moving %s to %s; recursive: %v, force: %v", srcPath, destPath, recursive, force)
log.Printf("Moving %s to %s; force: %v", srcPath, destPath, force)
rpcOpts := &wshrpc.RpcOpts{Timeout: TimeoutYear}
err = wshclient.FileMoveCommand(RpcClient, wshrpc.CommandFileCopyData{SrcUri: srcPath, DestUri: destPath, Opts: &wshrpc.FileCopyOpts{Overwrite: force, Timeout: TimeoutYear, Recursive: recursive}}, rpcOpts)
err = wshclient.FileMoveCommand(RpcClient, wshrpc.CommandFileCopyData{SrcUri: srcPath, DestUri: destPath, Opts: &wshrpc.FileCopyOpts{Overwrite: force, Timeout: TimeoutYear}}, rpcOpts)
if err != nil {
return fmt.Errorf("moving file: %w", err)
}
@ -445,48 +414,66 @@ func fileMvRun(cmd *cobra.Command, args []string) error {
}
func filePrintColumns(filesChan <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]) error {
width := 80 // default if we can't get terminal
if w, _, err := term.GetSize(int(os.Stdout.Fd())); err == nil {
width := 80
w, _, err := term.GetSize(int(os.Stdout.Fd()))
if err == nil {
width = w
}
numCols := width / 10
var allNames []string
maxLen := 0
for respUnion := range filesChan {
if respUnion.Error != nil {
return respUnion.Error
}
for _, f := range respUnion.Response.FileInfo {
allNames = append(allNames, f.Name)
if len(f.Name) > maxLen {
maxLen = len(f.Name)
}
}
}
colWidth := maxLen + 2
numCols := width / colWidth
if numCols < 1 {
numCols = 1
}
return colprint.PrintColumnsArray(
filesChan,
numCols,
100, // sample size
func(respUnion wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]) ([]string, error) {
if respUnion.Error != nil {
return []string{}, respUnion.Error
}
strs := make([]string, len(respUnion.Response.FileInfo))
for i, f := range respUnion.Response.FileInfo {
strs[i] = f.Name
}
return strs, nil
},
os.Stdout,
)
col := 0
for _, name := range allNames {
fmt.Fprintf(os.Stdout, "%-*s", colWidth, name)
col++
if col >= numCols {
fmt.Fprintln(os.Stdout)
col = 0
}
}
if col > 0 {
fmt.Fprintln(os.Stdout)
}
return nil
}
func filePrintLong(filesChan <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]) error {
// Sample first 100 files to determine name width
maxNameLen := 0
var samples []*wshrpc.FileInfo
var allFiles []*wshrpc.FileInfo
for respUnion := range filesChan {
if respUnion.Error != nil {
return respUnion.Error
}
resp := respUnion.Response
samples = append(samples, resp.FileInfo...)
allFiles = append(allFiles, resp.FileInfo...)
}
maxNameLen := 0
for _, fi := range allFiles {
if len(fi.Name) > maxNameLen {
maxNameLen = len(fi.Name)
}
}
// Use sampled width, but cap it at 60 chars to prevent excessive width
nameWidth := maxNameLen + 2
if nameWidth > 60 {
nameWidth = 60
@ -494,8 +481,7 @@ func filePrintLong(filesChan <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemote
writer := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
// Print samples
for _, f := range samples {
for _, f := range allFiles {
name := f.Name
t := time.Unix(f.ModTime/1000, 0)
timestamp := utilfn.FormatLsTime(t)
@ -505,30 +491,12 @@ func filePrintLong(filesChan <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemote
fmt.Fprintf(writer, "%-*s\t%8d\t%s\n", nameWidth, name, f.Size, timestamp)
}
}
// Continue with remaining files
for respUnion := range filesChan {
if respUnion.Error != nil {
return respUnion.Error
}
for _, f := range respUnion.Response.FileInfo {
name := f.Name
t := time.Unix(f.ModTime/1000, 0)
timestamp := utilfn.FormatLsTime(t)
if f.Size == 0 && strings.HasSuffix(name, "/") {
fmt.Fprintf(writer, "%-*s\t%8s\t%s\n", nameWidth, name, "-", timestamp)
} else {
fmt.Fprintf(writer, "%-*s\t%8d\t%s\n", nameWidth, name, f.Size, timestamp)
}
}
}
writer.Flush()
return nil
}
func fileListRun(cmd *cobra.Command, args []string) error {
recursive, _ := cmd.Flags().GetBool("recursive")
longForm, _ := cmd.Flags().GetBool("long")
onePerLine, _ := cmd.Flags().GetBool("one")
@ -548,7 +516,7 @@ func fileListRun(cmd *cobra.Command, args []string) error {
return err
}
filesChan := wshclient.FileListStreamCommand(RpcClient, wshrpc.FileListData{Path: path, Opts: &wshrpc.FileListOpts{All: recursive}}, &wshrpc.RpcOpts{Timeout: 2000})
filesChan := wshclient.FileListStreamCommand(RpcClient, wshrpc.FileListData{Path: path, Opts: &wshrpc.FileListOpts{All: false}}, &wshrpc.RpcOpts{Timeout: 2000})
// Drain the channel when done
defer utilfn.DrainChannelSafe(filesChan, "fileListRun")
if longForm {
@ -564,8 +532,8 @@ func fileListRun(cmd *cobra.Command, args []string) error {
for _, f := range respUnion.Response.FileInfo {
fmt.Fprintln(os.Stdout, f.Name)
}
return nil
}
return nil
}
return filePrintColumns(filesChan)

View file

@ -4,14 +4,9 @@
package cmd
import (
"encoding/base64"
"fmt"
"io/fs"
"sort"
"github.com/spf13/cobra"
"github.com/wavetermdev/waveterm/pkg/util/envutil"
"github.com/wavetermdev/waveterm/pkg/util/wavefileutil"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
)
@ -113,39 +108,23 @@ func getVarRun(cmd *cobra.Command, args []string) error {
}
func getAllVariables(zoneId string) error {
fileData := wshrpc.FileData{
Info: &wshrpc.FileInfo{
Path: fmt.Sprintf(wavefileutil.WaveFilePathPattern, zoneId, getVarFileName)}}
data, err := wshclient.FileReadCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: 2000})
err = convertNotFoundErr(err)
if err == fs.ErrNotExist {
return nil
}
if err != nil {
return fmt.Errorf("reading variables: %w", err)
}
envBytes, err := base64.StdEncoding.DecodeString(data.Data64)
if err != nil {
return fmt.Errorf("decoding variables: %w", err)
commandData := wshrpc.CommandVarData{
ZoneId: zoneId,
FileName: getVarFileName,
}
envMap := envutil.EnvToMap(string(envBytes))
vars, err := wshclient.GetAllVarsCommand(RpcClient, commandData, &wshrpc.RpcOpts{Timeout: 2000})
if err != nil {
return fmt.Errorf("getting variables: %w", err)
}
terminator := "\n"
if getVarNullTerminate {
terminator = "\x00"
}
// Sort keys for consistent output
keys := make([]string, 0, len(envMap))
for k := range envMap {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
WriteStdout("%s=%s%s", k, envMap[k], terminator)
for _, v := range vars {
WriteStdout("%s=%s%s", v.Key, v.Val, terminator)
}
return nil

View file

@ -4,13 +4,15 @@
package cmd
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"os"
"github.com/spf13/cobra"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
"github.com/wavetermdev/waveterm/pkg/wshutil"
)
var jobDebugCmd = &cobra.Command{
@ -337,21 +339,36 @@ func jobDebugReconnectConnRun(cmd *cobra.Command, args []string) error {
}
func jobDebugGetOutputRun(cmd *cobra.Command, args []string) error {
fileData, err := wshclient.FileReadCommand(RpcClient, wshrpc.FileData{
Info: &wshrpc.FileInfo{
Path: fmt.Sprintf("wavefile://%s/term", jobIdFlag),
},
}, &wshrpc.RpcOpts{Timeout: 10000})
if err != nil {
return fmt.Errorf("reading job output: %w", err)
broker := RpcClient.StreamBroker
if broker == nil {
return fmt.Errorf("stream broker not available")
}
if fileData.Data64 != "" {
decoded, err := base64.StdEncoding.DecodeString(fileData.Data64)
if err != nil {
return fmt.Errorf("decoding output data: %w", err)
}
fmt.Printf("%s", string(decoded))
readerRouteId, err := wshclient.ControlGetRouteIdCommand(RpcClient, &wshrpc.RpcOpts{Route: wshutil.ControlRoute})
if err != nil {
return fmt.Errorf("getting route id: %w", err)
}
if readerRouteId == "" {
return fmt.Errorf("no route to receive data")
}
writerRouteId := "" // main server route
reader, streamMeta := broker.CreateStreamReader(readerRouteId, writerRouteId, 64*1024)
defer reader.Close()
data := wshrpc.CommandWaveFileReadStreamData{
ZoneId: jobIdFlag,
Name: "term",
StreamMeta: *streamMeta,
}
_, err = wshclient.WaveFileReadStreamCommand(RpcClient, data, nil)
if err != nil {
return fmt.Errorf("starting stream read: %w", err)
}
_, err = io.Copy(os.Stdout, reader)
if err != nil {
return fmt.Errorf("reading stream: %w", err)
}
return nil
}

View file

@ -4,13 +4,13 @@
package cmd
import (
"encoding/base64"
"fmt"
"io"
"os"
"github.com/spf13/cobra"
"github.com/wavetermdev/waveterm/pkg/util/wavefileutil"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
"github.com/wavetermdev/waveterm/pkg/wshutil"
)
var readFileCmd = &cobra.Command{
@ -19,6 +19,7 @@ var readFileCmd = &cobra.Command{
Args: cobra.ExactArgs(1),
Run: runReadFile,
PreRunE: preRunSetupRpcClient,
Hidden: true,
}
func init() {
@ -31,15 +32,41 @@ func runReadFile(cmd *cobra.Command, args []string) {
WriteStderr("[error] %v\n", err)
return
}
data, err := wshclient.FileReadCommand(RpcClient, wshrpc.FileData{Info: &wshrpc.FileInfo{Path: fmt.Sprintf(wavefileutil.WaveFilePathPattern, fullORef.OID, args[0])}}, &wshrpc.RpcOpts{Timeout: 5000})
if err != nil {
WriteStderr("[error] reading file: %v\n", err)
broker := RpcClient.StreamBroker
if broker == nil {
WriteStderr("[error] stream broker not available\n")
return
}
resp, err := base64.StdEncoding.DecodeString(data.Data64)
readerRouteId, err := wshclient.ControlGetRouteIdCommand(RpcClient, &wshrpc.RpcOpts{Route: wshutil.ControlRoute})
if err != nil {
WriteStderr("[error] decoding file: %v\n", err)
WriteStderr("[error] getting route id: %v\n", err)
return
}
if readerRouteId == "" {
WriteStderr("[error] no route to receive data\n")
return
}
writerRouteId := ""
reader, streamMeta := broker.CreateStreamReader(readerRouteId, writerRouteId, 64*1024)
defer reader.Close()
data := wshrpc.CommandWaveFileReadStreamData{
ZoneId: fullORef.OID,
Name: args[0],
StreamMeta: *streamMeta,
}
_, err = wshclient.WaveFileReadStreamCommand(RpcClient, data, nil)
if err != nil {
WriteStderr("[error] starting stream read: %v\n", err)
return
}
_, err = io.Copy(os.Stdout, reader)
if err != nil {
WriteStderr("[error] reading stream: %v\n", err)
return
}
WriteStdout("%s", string(resp))
}

View file

@ -497,12 +497,11 @@ This allows setting various options in the `config/settings.json` file. It will
## file
The `file` command provides a set of subcommands for managing files across different storage systems, such as `wavefile`, `wsh` remote servers, and S3.
The `file` command provides a set of subcommands for managing files across different storage systems, such as `wsh` remote servers.
:::note
Wave Terminal is capable of managing files from remote SSH hosts, S3-compatible
systems, and the internal Wave filesystem. Files are addressed via URIs, which
Wave Terminal is capable of managing files from remote SSH hosts. Files are addressed via URIs, which
vary depending on the storage system. If no scheme is specified, the file will be treated as a local connection.
URI format: `[profile]:[uri-scheme]://[connection]/[path]`
@ -522,30 +521,6 @@ Supported URI schemes:
`//[remote]/[path]` a path on a remote
`/~/[path]` a path relative to the home directory on your local computer
- `s3` - Used to access files on S3-compatible systems.
Requires S3 credentials to be set up, either in the AWS CLI configuration files, or in "profiles.json" in the Wave configuration directory.
If no profile is provided, the default from your AWS CLI configuration will be used. Profiles from the AWS CLI must be prefixed with "aws:".
Format:
- `s3://[bucket]/[path]`
- `aws:[profile]:s3://[bucket]/[path]`
- `[profile]:s3://[bucket]/[path]`
- `wavefile` - Used to retrieve blockfiles from the internal Wave filesystem.
Format: `wavefile://[zoneid]/[path]`
Wave file locations can be:
- `wavefile://block/...` - stored in the current block ("this" is also an alias for "block")
- `wavefile://tab/...` - stored in the current tab
- `wavefile://workspace/...` - stored in the current workspace ("ws" is also an alias for "workspace")
- `wavefile://client/...` - stored globally for the client ("global" is also an alias for "client")
- `wavefile://temp/...` - stored globally, but removed on startup/shutdown
- `wavefile://[uuid]/...` - an entity id (can be a block, tab, workspace, etc.)
:::
### cat
@ -554,11 +529,11 @@ Supported URI schemes:
wsh file cat [file-uri]
```
Display the contents of a file. For example:
Display the contents of a file (maximum file size 10MB). For example:
```sh
wsh file cat wavefile://block/config.txt
wsh file cat wavefile://client/settings.json
wsh file cat wsh://user@ec2/home/user/config.txt
wsh file cat ./local-config.txt
```
### write
@ -570,7 +545,7 @@ wsh file write [file-uri]
Write data from stdin to a file. The maximum file size is 10MB. For example:
```sh
echo "hello" | wsh file write wavefile://block/greeting.txt
echo "hello" | wsh file write ./greeting.txt
cat config.json | wsh file write //ec2-user@remote01/~/config.json
```
@ -580,11 +555,11 @@ cat config.json | wsh file write //ec2-user@remote01/~/config.json
wsh file append [file-uri]
```
Append data from stdin to a file, respecting a 10MB total file size limit. This is useful for log files or accumulating data. For example:
Append data from stdin to a file. Input is buffered locally (up to 10MB total file size limit) before being written. For example:
```sh
tail -f app.log | wsh file append wavefile://block/logs.txt
echo "new line" | wsh file append wavefile://client/notes.txt
cat additional-content.txt | wsh file append ./notes.txt
echo "new line" | wsh file append //user@remote/~/notes.txt
```
### rm
@ -597,7 +572,7 @@ Remove a file. For example:
```sh
wsh file rm wsh://user@ec2/home/user/config.txt
wsh file rm wavefile://client/settings.json
wsh file rm ./local-config.txt
```
Flags:
@ -614,7 +589,7 @@ Display information about a file including size, creation time, modification tim
```sh
wsh file info wsh://user@ec2/home/user/config.txt
wsh file info wavefile://client/settings.json
wsh file info ./local-config.txt
```
### cp
@ -623,25 +598,21 @@ wsh file info wavefile://client/settings.json
wsh file cp [flags] [source-uri] [destination-uri]
```
Copy files between different storage systems. For example:
Copy files between different storage systems (maximum file size 10MB). For example:
```sh
# Copy a wave file into your local filesystem
wsh file cp wavefile://block/config.txt ./local-config.txt
# Copy a remote file to your local filesystem
wsh file cp wsh://user@ec2/home/user/config.txt ./local-config.txt
# Copy a local file into the wave filesystem
wsh file cp ./local-config.txt wavefile://block/config.txt
# Copy a local file to a remote system
wsh file cp ./local-config.txt wsh://user@ec2/home/user/config.txt
# Copy a remote file into the wave filesystem
wsh file cp wsh://user@ec2/home/user/config.txt wavefile://client/config.txt
# Recursively copy a directory between two remote computers
wsh file cp wsh://user@ec2-1/home/user/.config wsh://user@ec2-2/home/user/.config -r
# Copy between remote systems
wsh file cp wsh://user@ec2/home/user/config.txt wsh://user@server2/home/user/backup.txt
```
Flags:
- `-r, --recursive` - copies all files in a directory recursively
- `-f, --force` - overwrites any conflicts when copying
- `-m, --merge` - does not clear existing directory entries when copying a directory, instead merging its contents with the destination's
@ -651,25 +622,21 @@ Flags:
wsh file mv [flags] [source-uri] [destination-uri]
```
Move files between different storage systems. The source file will be deleted once the operation completes successfully. For example:
Move files between different storage systems (maximum file size 10MB). The source file will be deleted once the operation completes successfully. For example:
```sh
# Move a wave file into your local filesystem
wsh file mv wavefile://block/config.txt ./local-config.txt
# Move a remote file to your local filesystem
wsh file mv wsh://user@ec2/home/user/config.txt ./local-config.txt
# Move a local file into the wave filesystem
wsh file mv ./local-config.txt wavefile://block/config.txt
# Move a local file to a remote system
wsh file mv ./local-config.txt wsh://user@ec2/home/user/config.txt
# Move a remote file into the wave filesystem
wsh file mv wsh://user@ec2/home/user/config.txt wavefile://client/config.txt
# Recursively move a directory between two remote computers
wsh file mv wsh://user@ec2-1/home/user/.config wsh://user@ec2-2/home/user/.config -r
# Move between remote systems
wsh file mv wsh://user@ec2/home/user/config.txt wsh://user@server2/home/user/backup.txt
```
Flags:
- `-r, --recursive` - moves all files in a directory recursively
- `-f, --force` - overwrites any conflicts when moving
### ls
@ -684,13 +651,12 @@ Examples:
```sh
wsh file ls wsh://user@ec2/home/user/
wsh file ls wavefile://client/configs/
wsh file ls ./local-dir/
```
Flags:
- `-l, --long` - use long listing format showing size, timestamps, and metadata
- `-r, --recursive` - list subdirectories recursively
- `-1, --one` - list one file per line
- `-f, --files` - list only files (no directories)
@ -698,7 +664,7 @@ When output is piped to another command, automatically switches to one-file-per-
```sh
# Easy to process with grep, awk, etc.
wsh file ls wavefile://client/ | grep ".json$"
wsh file ls ./ | grep ".json$"
```
---

View file

@ -38,7 +38,7 @@ ls -la | wsh ai - "what are the largest files here?"
### Persistent State
`wsh` can maintain state across terminal sessions through its variable and file storage system:
`wsh` can maintain state across terminal sessions through its variable system:
```bash
# Store a variable that persists across sessions
@ -51,12 +51,31 @@ wsh setvar -b workspace DEPLOY_ENV=staging
# Use stored variables in commands
curl -H "Authorization: $(wsh getvar API_KEY)" https://api.example.com
```
# Store a file that can be accessed from any block
echo "data" | wsh file write wavefile://global/config.txt
### Accessing Local Files from Remote
# Append logs from multiple terminals
echo "Terminal 1 log" | wsh file append wavefile://workspace/logs.txt
When working on remote machines, you can access files on your local computer using the `wsh://local/~/` path prefix with `wsh file` commands. The shorthand `/~/` can also be used as an alias for `wsh://local/~/`:
```bash
# Read a local file from a remote machine
wsh file cat wsh://local/~/config/app.json
# Run a local script on the remote machine using shell process substitution
bash <(wsh file cat wsh://local/~/scripts/deploy.sh)
python <(wsh file cat wsh://local/~/scripts/deploy.py)
# Append remote output to a local log file
echo "Remote machine log entry" | wsh file append wsh://local/~/app.log
# Copy a local file to the remote machine
wsh file cp wsh://local/~/data.csv ./remote-data.csv
# Copy remote file back to local machine
wsh file cp ./results.txt wsh://local/~/results.txt
# You can also use the shorthand /~/ instead of wsh://local/~/
wsh file cat /~/config/app.json
```
### Block Management

View file

@ -255,8 +255,7 @@ const BlockFrame_Header = ({
icon: "link-slash",
title: "wsh is not installed for this connection",
};
const showNoWshButton =
manageConnection && wshProblem && !util.isLocalConnName(connName) && !connName.startsWith("aws:");
const showNoWshButton = manageConnection && wshProblem && !util.isLocalConnName(connName);
return (
<div

View file

@ -106,31 +106,6 @@ function createFilteredLocalSuggestionItem(
return [];
}
function createS3SuggestionItems(
s3Profiles: Array<string>,
connStatusMap: Map<string, ConnStatus>,
connection: string
): Array<SuggestionConnectionItem> {
// TODO-S3 rewrite this so it fits the way the
// s3 connections work. is there a connection status?
// probably not, so color may be weird
// also, this currently only changes the connection
// an onSelect option must be added for different
// behavior
return s3Profiles.map((profileName) => {
const connStatus = connStatusMap.get(profileName);
const item: SuggestionConnectionItem = {
status: "connected",
icon: "database",
iconColor: "var(--accent-color)",
value: profileName,
label: profileName,
current: profileName == connection,
};
return item;
});
}
function getReconnectItem(
connStatus: ConnStatus,
connSelected: string,
@ -216,27 +191,6 @@ function getRemoteSuggestions(
return remoteSuggestions;
}
function getS3Suggestions(
s3Profiles: Array<string>,
connection: string,
connSelected: string,
connStatusMap: Map<string, ConnStatus>,
fullConfig: FullConfigType,
filterOutNowsh: boolean
): SuggestionConnectionScope | null {
const filtered = filterConnections(s3Profiles, connSelected, fullConfig, filterOutNowsh);
const s3Items = createS3SuggestionItems(filtered, connStatusMap, connection);
const sortedS3Items = sortConnSuggestionItems(s3Items, fullConfig);
if (sortedS3Items.length == 0) {
return null;
}
const s3Suggestions: SuggestionConnectionScope = {
headerText: "S3",
items: sortedS3Items,
};
return s3Suggestions;
}
function getDisconnectItem(
connection: string,
connStatusMap: Map<string, ConnStatus>
@ -296,11 +250,10 @@ function getNewConnectionSuggestionItem(
localName: string,
remoteConns: Array<string>,
wslConns: Array<string>,
s3Conns: Array<string>,
changeConnection: (connName: string) => Promise<void>,
changeConnModalAtom: jotai.PrimitiveAtom<boolean>
): SuggestionConnectionItem | null {
const allCons = ["", localName, ...remoteConns, ...wslConns, ...s3Conns];
const allCons = ["", localName, ...remoteConns, ...wslConns];
if (allCons.includes(connSelected)) {
// do not offer to create a new connection if one
// with the exact name already exists
@ -345,13 +298,11 @@ const ChangeConnectionBlockModal = React.memo(
const connStatus = jotai.useAtomValue(connStatusAtom);
const [connList, setConnList] = React.useState<Array<string>>([]);
const [wslList, setWslList] = React.useState<Array<string>>([]);
const [s3List, setS3List] = React.useState<Array<string>>([]);
const allConnStatus = jotai.useAtomValue(atoms.allConnStatus);
const [rowIndex, setRowIndex] = React.useState(0);
const connStatusMap = new Map<string, ConnStatus>();
const fullConfig = jotai.useAtomValue(atoms.fullConfigAtom);
let filterOutNowsh = util.useAtomValueSafe(viewModel.filterOutNowsh) ?? true;
const showS3 = util.useAtomValueSafe(viewModel.showS3) ?? false;
const hasGitBash = jotai.useAtomValue(ConnectionsModel.getInstance().hasGitBashAtom);
let maxActiveConnNum = 1;
@ -382,9 +333,6 @@ const ChangeConnectionBlockModal = React.memo(
// typeahead was opened. good candidate for verbose log level.
//console.log("unable to load wsl list from backend. using blank list: ", e)
});
RpcApi.ConnListAWSCommand(TabRpcClient, { timeout: 2000 })
.then((s3List) => setS3List(s3List ?? []))
.catch((e) => console.log("unable to load s3 list from backend:", e));
}, [changeConnModalOpen]);
const changeConnection = React.useCallback(
@ -395,16 +343,8 @@ const ChangeConnectionBlockModal = React.memo(
if (connName == blockData?.meta?.connection) {
return;
}
const isAws = connName?.startsWith("aws:");
const oldFile = blockData?.meta?.file ?? "";
let newFile: string;
if (oldFile == "") {
newFile = "";
} else if (isAws) {
newFile = "/";
} else {
newFile = "~";
}
const newFile = oldFile == "" ? "" : "~";
await RpcApi.SetMetaCommand(TabRpcClient, {
oref: WOS.makeORef("block", blockId),
meta: { connection: connName, file: newFile, "cmd:cwd": null },
@ -443,17 +383,6 @@ const ChangeConnectionBlockModal = React.memo(
fullConfig,
filterOutNowsh
);
let s3Suggestions: SuggestionConnectionScope = null;
if (showS3) {
s3Suggestions = getS3Suggestions(
s3List,
connection,
connSelected,
connStatusMap,
fullConfig,
filterOutNowsh
);
}
const connectionsEditItem = getConnectionsEditItem(changeConnModalAtom, connSelected);
const disconnectItem = getDisconnectItem(connection, connStatusMap);
const newConnectionSuggestionItem = getNewConnectionSuggestionItem(
@ -461,7 +390,6 @@ const ChangeConnectionBlockModal = React.memo(
localName,
connList,
wslList,
s3List,
changeConnection,
changeConnModalAtom
);
@ -470,7 +398,6 @@ const ChangeConnectionBlockModal = React.memo(
...(reconnectSuggestionItem ? [reconnectSuggestionItem] : []),
...(localSuggestions ? [localSuggestions] : []),
...(remoteSuggestions ? [remoteSuggestions] : []),
...(s3Suggestions ? [s3Suggestions] : []),
...(disconnectItem ? [disconnectItem] : []),
...(connectionsEditItem ? [connectionsEditItem] : []),
...(newConnectionSuggestionItem ? [newConnectionSuggestionItem] : []),

View file

@ -722,17 +722,6 @@ function getConnStatusAtom(conn: string): PrimitiveAtom<ConnStatus> {
wshenabled: false,
};
rtn = atom(connStatus);
} else if (conn.startsWith("aws:")) {
const connStatus: ConnStatus = {
connection: conn,
connected: true,
error: null,
status: "connected",
hasconnected: true,
activeconnnum: 0,
wshenabled: false,
};
rtn = atom(connStatus);
} else {
const connStatus: ConnStatus = {
connection: conn,

View file

@ -87,11 +87,6 @@ class RpcApiType {
return client.wshRpcCall("connlist", null, opts);
}
// command "connlistaws" [call]
ConnListAWSCommand(client: WshClient, opts?: RpcOpts): Promise<string[]> {
return client.wshRpcCall("connlistaws", null, opts);
}
// command "connreinstallwsh" [call]
ConnReinstallWshCommand(client: WshClient, data: ConnExtData, opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("connreinstallwsh", data, opts);
@ -112,6 +107,11 @@ class RpcApiType {
return client.wshRpcCall("connupdatewsh", data, opts);
}
// command "controlgetrouteid" [call]
ControlGetRouteIdCommand(client: WshClient, opts?: RpcOpts): Promise<string> {
return client.wshRpcCall("controlgetrouteid", null, opts);
}
// command "controllerappendoutput" [call]
ControllerAppendOutputCommand(client: WshClient, data: CommandControllerAppendOutputData, opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("controllerappendoutput", data, opts);
@ -227,11 +227,6 @@ class RpcApiType {
return client.wshRpcCall("fileappend", data, opts);
}
// command "fileappendijson" [call]
FileAppendIJsonCommand(client: WshClient, data: CommandAppendIJsonData, opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("fileappendijson", data, opts);
}
// command "filecopy" [call]
FileCopyCommand(client: WshClient, data: CommandFileCopyData, opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("filecopy", data, opts);
@ -292,16 +287,6 @@ class RpcApiType {
return client.wshRpcCall("filerestorebackup", data, opts);
}
// command "filesharecapability" [call]
FileShareCapabilityCommand(client: WshClient, data: string, opts?: RpcOpts): Promise<FileShareCapability> {
return client.wshRpcCall("filesharecapability", data, opts);
}
// command "filestreamtar" [responsestream]
FileStreamTarCommand(client: WshClient, data: CommandRemoteStreamTarData, opts?: RpcOpts): AsyncGenerator<Packet, void, boolean> {
return client.wshRpcStream("filestreamtar", data, opts);
}
// command "filewrite" [call]
FileWriteCommand(client: WshClient, data: FileData, opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("filewrite", data, opts);
@ -317,6 +302,11 @@ class RpcApiType {
return client.wshRpcCall("focuswindow", data, opts);
}
// command "getallvars" [call]
GetAllVarsCommand(client: WshClient, data: CommandVarData, opts?: RpcOpts): Promise<CommandVarResponseData[]> {
return client.wshRpcCall("getallvars", data, opts);
}
// command "getbuilderoutput" [call]
GetBuilderOutputCommand(client: WshClient, data: string, opts?: RpcOpts): Promise<string[]> {
return client.wshRpcCall("getbuilderoutput", data, opts);
@ -597,11 +587,6 @@ class RpcApiType {
return client.wshRpcStream("remotestreamfile", data, opts);
}
// command "remotetarstream" [responsestream]
RemoteTarStreamCommand(client: WshClient, data: CommandRemoteStreamTarData, opts?: RpcOpts): AsyncGenerator<Packet, void, boolean> {
return client.wshRpcStream("remotetarstream", data, opts);
}
// command "remoteterminatejobmanager" [call]
RemoteTerminateJobManagerCommand(client: WshClient, data: CommandRemoteTerminateJobManagerData, opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("remoteterminatejobmanager", data, opts);
@ -777,6 +762,11 @@ class RpcApiType {
return client.wshRpcCall("waveaitoolapprove", data, opts);
}
// command "wavefilereadstream" [call]
WaveFileReadStreamCommand(client: WshClient, data: CommandWaveFileReadStreamData, opts?: RpcOpts): Promise<WaveFileInfo> {
return client.wshRpcCall("wavefilereadstream", data, opts);
}
// command "waveinfo" [call]
WaveInfoCommand(client: WshClient, opts?: RpcOpts): Promise<WaveInfoData> {
return client.wshRpcCall("waveinfo", null, opts);

View file

@ -89,7 +89,6 @@ export function handleRename(
path: string,
newPath: string,
isDir: boolean,
recursive: boolean,
setErrorMsg: (msg: ErrorMsg) => void
) {
fireAndForget(async () => {
@ -101,32 +100,14 @@ export function handleRename(
await RpcApi.FileMoveCommand(TabRpcClient, {
srcuri,
desturi: await model.formatRemoteUri(newPath, globalStore.get),
opts: {
recursive,
},
});
} catch (e) {
const errorText = `${e}`;
console.warn(`Rename failed: ${errorText}`);
let errorMsg: ErrorMsg;
if (errorText.includes(recursiveError) && !recursive) {
errorMsg = {
status: "Confirm Rename Directory",
text: "Renaming a directory requires the recursive flag. Proceed?",
level: "warning",
buttons: [
{
text: "Rename Recursively",
onClick: () => handleRename(model, path, newPath, isDir, true, setErrorMsg),
},
],
};
} else {
errorMsg = {
status: "Rename Failed",
text: `${e}`,
};
}
const errorMsg: ErrorMsg = {
status: "Rename Failed",
text: `${e}`,
};
setErrorMsg(errorMsg);
}
model.refreshCallback();

View file

@ -199,7 +199,7 @@ function DirectoryTable({
const lastInstance = path.lastIndexOf(fileName);
newPath = path.substring(0, lastInstance) + newName;
console.log(`replacing ${fileName} with ${newName}: ${path}`);
handleRename(model, path, newPath, isDir, false, setErrorMsg);
handleRename(model, path, newPath, isDir, setErrorMsg);
}
setEntryManagerProps(undefined);
},
@ -777,7 +777,6 @@ function DirectoryPreview({ model }: DirectoryPreviewProps) {
const timeoutYear = 31536000000; // one year
const opts: FileCopyOpts = {
timeout: timeoutYear,
recursive: true,
};
const desturi = await model.formatRemoteUri(dirPath, globalStore.get);
const data: CommandFileCopyData = {

View file

@ -167,8 +167,6 @@ export class PreviewModel implements ViewModel {
directoryKeyDownHandler: (waveEvent: WaveKeyboardEvent) => boolean;
codeEditKeyDownHandler: (waveEvent: WaveKeyboardEvent) => boolean;
showS3 = atom(true);
constructor(blockId: string, nodeModel: BlockNodeModel, tabModel: TabModel) {
this.viewType = "preview";
this.blockId = blockId;
@ -677,12 +675,6 @@ export class PreviewModel implements ViewModel {
}
async handleOpenFile(filePath: string) {
const conn = globalStore.get(this.connectionImmediate);
if (!isBlank(conn) && conn.startsWith("aws:")) {
if (!isBlank(filePath) && filePath != "/" && filePath.startsWith("/")) {
filePath = filePath.substring(1);
}
}
const fileInfo = await globalStore.get(this.statFile);
this.updateOpenFileModalAndError(false);
if (fileInfo == null) {

View file

@ -61,9 +61,6 @@ function StreamingPreview({ model }: SpecializedViewProps) {
const remotePath = formatRemoteUri(filePath, conn);
const usp = new URLSearchParams();
usp.set("path", remotePath);
if (conn != null) {
usp.set("connection", conn);
}
const streamingUrl = `${getWebServerEndpoint()}/wave/stream-file?${usp.toString()}`;
if (fileInfo.mimetype === "application/pdf") {
return (

View file

@ -70,7 +70,7 @@ const fetchSuggestions = async (
): Promise<FetchSuggestionsResponse> => {
const conn = await globalStore.get(model.connection);
let route = makeConnRoute(conn);
if (isBlank(conn) || conn.startsWith("aws:")) {
if (isBlank(conn)) {
route = null;
}
if (reqContext?.dispose) {

View file

@ -326,9 +326,6 @@ declare global {
// If true, filters out 'nowsh' connections (when managing connections)
filterOutNowsh?: jotai.Atom<boolean>;
// if true, show s3 connections in picker
showS3?: jotai.Atom<boolean>;
// If true, removes padding inside the block content area.
noPadding?: jotai.Atom<boolean>;

View file

@ -137,7 +137,7 @@ declare global {
tabid: string;
workspaceid: string;
block: Block;
files: FileInfo[];
files: WaveFileInfo[];
};
// wshrpc.BlocksListEntry
@ -182,13 +182,6 @@ declare global {
newactivetabid?: string;
};
// wshrpc.CommandAppendIJsonData
type CommandAppendIJsonData = {
zoneid: string;
filename: string;
data: {[key: string]: any};
};
// wshrpc.CommandAuthenticateJobManagerData
type CommandAuthenticateJobManagerData = {
jobid: string;
@ -517,12 +510,6 @@ declare global {
byterange?: string;
};
// wshrpc.CommandRemoteStreamTarData
type CommandRemoteStreamTarData = {
path: string;
opts?: FileCopyOpts;
};
// wshrpc.CommandRemoteTerminateJobManagerData
type CommandRemoteTerminateJobManagerData = {
jobid: string;
@ -677,6 +664,13 @@ declare global {
approval?: string;
};
// wshrpc.CommandWaveFileReadStreamData
type CommandWaveFileReadStreamData = {
zoneid: string;
name: string;
streammeta: StreamMeta;
};
// wshrpc.CommandWebSelectorData
type CommandWebSelectorData = {
workspaceid: string;
@ -906,12 +900,6 @@ declare global {
append?: boolean;
};
// wshrpc.FileShareCapability
type FileShareCapability = {
canappend: boolean;
canmkdir: boolean;
};
// wconfig.FullConfigType
type FullConfigType = {
settings: SettingsType;
@ -1134,12 +1122,6 @@ declare global {
"waveai:maxoutputtokens"?: number;
};
// iochantypes.Packet
type Packet = {
Data: string;
Checksum: string;
};
// wshrpc.PathCommandData
type PathCommandData = {
pathtype: string;
@ -1898,6 +1880,17 @@ declare global {
meta: {[key: string]: any};
};
// wshrpc.WaveFileInfo
type WaveFileInfo = {
zoneid: string;
name: string;
opts: FileOpts;
createdts: number;
size: number;
modts: number;
meta: {[key: string]: any};
};
// wshrpc.WaveInfoData
type WaveInfoData = {
version: string;

View file

@ -56,22 +56,19 @@ export function addOpenMenuItems(menu: ContextMenuItem[], conn: string, finfo: F
}),
});
}
// TODO: improve behavior as we add more connection types
if (!conn?.startsWith("aws:")) {
menu.push({
label: "Open Terminal Here",
click: () => {
const termBlockDef: BlockDef = {
meta: {
controller: "shell",
view: "term",
"cmd:cwd": finfo.isdir ? finfo.path : finfo.dir,
connection: conn,
},
};
fireAndForget(() => createBlock(termBlockDef));
},
});
}
menu.push({
label: "Open Terminal Here",
click: () => {
const termBlockDef: BlockDef = {
meta: {
controller: "shell",
view: "term",
"cmd:cwd": finfo.isdir ? finfo.path : finfo.dir,
connection: conn,
},
};
fireAndForget(() => createBlock(termBlockDef));
},
});
return menu;
}

View file

@ -91,12 +91,5 @@ export function computeBgStyleFromMeta(meta: MetaType, defaultOpacity: number =
export function formatRemoteUri(path: string, connection: string): string {
connection = connection ?? "local";
// TODO: We need a better way to handle s3 paths
let retVal: string;
if (connection.startsWith("aws:")) {
retVal = `${connection}:s3://${path ?? ""}`;
} else {
retVal = `wsh://${connection}/${path}`;
}
return retVal;
return `wsh://${connection}/${path}`;
}

20
go.mod
View file

@ -5,10 +5,6 @@ go 1.25.6
require (
github.com/Microsoft/go-winio v0.6.2
github.com/alexflint/go-filemutex v1.3.0
github.com/aws/aws-sdk-go-v2 v1.41.0
github.com/aws/aws-sdk-go-v2/config v1.32.6
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0
github.com/aws/smithy-go v1.24.0
github.com/creack/pty v1.1.24
github.com/emirpasic/gods v1.18.1
github.com/fsnotify/fsnotify v1.9.0
@ -41,7 +37,6 @@ require (
golang.org/x/sys v0.40.0
golang.org/x/term v0.38.0
google.golang.org/api v0.259.0
gopkg.in/ini.v1 v1.67.0
)
require (
@ -51,21 +46,6 @@ require (
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.9.0 // indirect
cloud.google.com/go/longrunning v0.6.7 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.19.6 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 // indirect
github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 // indirect
github.com/bahlo/generic-list-go v0.2.0 // indirect
github.com/buger/jsonparser v1.1.1 // indirect
github.com/ebitengine/purego v0.9.1 // indirect

40
go.sum
View file

@ -18,44 +18,6 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/alexflint/go-filemutex v1.3.0 h1:LgE+nTUWnQCyRKbpoceKZsPQbs84LivvgwUymZXdOcM=
github.com/alexflint/go-filemutex v1.3.0/go.mod h1:U0+VA/i30mGBlLCrFPGtTe9y6wGQfNAWPBTekHQ+c8A=
github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4=
github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=
github.com/aws/aws-sdk-go-v2/config v1.32.6 h1:hFLBGUKjmLAekvi1evLi5hVvFQtSo3GYwi+Bx4lpJf8=
github.com/aws/aws-sdk-go-v2/config v1.32.6/go.mod h1:lcUL/gcd8WyjCrMnxez5OXkO3/rwcNmvfno62tnXNcI=
github.com/aws/aws-sdk-go-v2/credentials v1.19.6 h1:F9vWao2TwjV2MyiyVS+duza0NIRtAslgLUM0vTA1ZaE=
github.com/aws/aws-sdk-go-v2/credentials v1.19.6/go.mod h1:SgHzKjEVsdQr6Opor0ihgWtkWdfRAIwxYzSJ8O85VHY=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 h1:80+uETIWS1BqjnN9uJ0dBUaETh+P1XwFy5vwHwK5r9k=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16/go.mod h1:wOOsYuxYuB/7FlnVtzeBYRcjSRtQpAW0hCP7tIULMwo=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 h1:CjMzUs78RDDv4ROu3JnJn/Ig1r6ZD7/T2DXLLRpejic=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16/go.mod h1:uVW4OLBqbJXSHJYA9svT9BluSvvwbzLQ2Crf6UPzR3c=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 h1:DIBqIrJ7hv+e4CmIk2z3pyKT+3B6qVMgRsawHiR3qso=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7/go.mod h1:vLm00xmBke75UmpNvOcZQ/Q30ZFjbczeLFqGx5urmGo=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 h1:NSbvS17MlI2lurYgXnCOLvCFX38sBW4eiVER7+kkgsU=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16/go.mod h1:SwT8Tmqd4sA6G1qaGdzWCJN99bUmPGHfRwwq3G5Qb+A=
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0 h1:MIWra+MSq53CFaXXAywB2qg9YvVZifkk6vEGl/1Qor0=
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0/go.mod h1:79S2BdqCJpScXZA2y+cpZuocWsjGjJINyXnOsf5DTz8=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 h1:HpI7aMmJ+mm1wkSHIA2t5EaFFv5EFYXePW30p1EIrbQ=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.4/go.mod h1:C5RdGMYGlfM0gYq/tifqgn4EbyX99V15P2V3R+VHbQU=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 h1:aM/Q24rIlS3bRAhTyFurowU8A0SMyGDtEOY/l/s/1Uw=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.8/go.mod h1:+fWt2UHSb4kS7Pu8y+BMBvJF0EWx+4H0hzNwtDNRTrg=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 h1:AHDr0DaHIAo8c9t1emrzAlVDFp+iMMKnPdYy6XO4MCE=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12/go.mod h1:GQ73XawFFiWxyWXMHWfhiomvP3tXtdNar/fi8z18sx0=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk=
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
@ -254,8 +216,6 @@ google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=

View file

@ -1,153 +0,0 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
// Description: This package is used to create a connection to AWS services.
package awsconn
import (
"context"
"errors"
"fmt"
"log"
"os"
"regexp"
"strings"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go"
"github.com/wavetermdev/waveterm/pkg/waveobj"
"gopkg.in/ini.v1"
)
const (
ProfileConfigKey = "profile:config"
ProfileCredentialsKey = "profile:credentials"
ProfilePrefix = "aws:"
TempFilePattern = "waveterm-awsconfig-%s"
)
var connectionRe = regexp.MustCompile(`^(.*):\w+:\/\/.*$`)
var tempfiles map[string]string = make(map[string]string)
func GetConfig(ctx context.Context, profile string) (*aws.Config, error) {
optfns := []func(*config.LoadOptions) error{}
// If profile is empty, use default config
if profile != "" {
connMatch := connectionRe.FindStringSubmatch(profile)
if connMatch == nil {
return nil, fmt.Errorf("invalid connection string: %s)", profile)
}
profile = connMatch[1]
// TODO: Reimplement generic profile support
// profiles, cerrs := wconfig.ReadWaveHomeConfigFile(wconfig.ProfilesFile)
// if len(cerrs) > 0 {
// return nil, fmt.Errorf("error reading config file: %v", cerrs[0])
// }
// if profiles[profile] != nil {
// configfilepath, _ := getTempFileFromConfig(profiles, ProfileConfigKey, profile)
// credentialsfilepath, _ := getTempFileFromConfig(profiles, ProfileCredentialsKey, profile)
// if configfilepath != "" {
// log.Printf("configfilepath: %s", configfilepath)
// optfns = append(optfns, config.WithSharedConfigFiles([]string{configfilepath}))
// tempfiles[profile+"_config"] = configfilepath
// }
// if credentialsfilepath != "" {
// log.Printf("credentialsfilepath: %s", credentialsfilepath)
// optfns = append(optfns, config.WithSharedCredentialsFiles([]string{credentialsfilepath}))
// tempfiles[profile+"_credentials"] = credentialsfilepath
// }
// }
optfns = append(optfns, config.WithRegion("us-west-2"))
trimmedProfile := strings.TrimPrefix(profile, ProfilePrefix)
optfns = append(optfns, config.WithSharedConfigProfile(trimmedProfile))
}
cfg, err := config.LoadDefaultConfig(ctx, optfns...)
if err != nil {
return nil, fmt.Errorf("error loading config: %v", err)
}
return &cfg, nil
}
func getTempFileFromConfig(config waveobj.MetaMapType, key string, profile string) (string, error) {
connectionconfig := config.GetMap(profile)
if connectionconfig[key] != "" {
awsConfig := connectionconfig.GetString(key, "")
if awsConfig != "" {
tempfile, err := os.CreateTemp("", fmt.Sprintf(TempFilePattern, profile))
if err != nil {
return "", fmt.Errorf("error creating temp file: %v", err)
}
_, err = tempfile.WriteString(awsConfig)
if err != nil {
return "", fmt.Errorf("error writing to temp file: %v", err)
}
return tempfile.Name(), nil
}
}
return "", nil
}
func ParseProfiles() map[string]struct{} {
profiles := make(map[string]struct{})
fname := config.DefaultSharedConfigFilename()
errs := []error{}
f, err := ini.Load(fname) // Load ini file
if err != nil {
errs = append(errs, err)
} else {
for _, v := range f.Sections() {
if len(v.Keys()) != 0 { // Get only the sections having Keys
parts := strings.Split(v.Name(), " ")
if len(parts) == 2 && parts[0] == "profile" { // skip default
profiles[ProfilePrefix+parts[1]] = struct{}{}
}
}
}
}
fname = config.DefaultSharedCredentialsFilename()
f, err = ini.Load(fname)
if err != nil {
errs = append(errs, err)
} else {
for _, v := range f.Sections() {
profiles[ProfilePrefix+v.Name()] = struct{}{}
}
}
if len(errs) > 0 {
log.Printf("error reading aws config/credentials file: %v", errs)
}
return profiles
}
func ListBuckets(ctx context.Context, client *s3.Client) ([]types.Bucket, error) {
var err error
var output *s3.ListBucketsOutput
var buckets []types.Bucket
bucketPaginator := s3.NewListBucketsPaginator(client, &s3.ListBucketsInput{})
for bucketPaginator.HasMorePages() {
output, err = bucketPaginator.NextPage(ctx)
if err != nil {
CheckAccessDeniedErr(&err)
return nil, fmt.Errorf("error listing buckets: %v", err)
} else {
buckets = append(buckets, output.Buckets...)
}
}
return buckets, nil
}
func CheckAccessDeniedErr(err *error) bool {
var apiErr smithy.APIError
if err != nil && errors.As(*err, &apiErr) && apiErr.ErrorCode() == "AccessDenied" {
*err = apiErr
return true
}
return false
}

View file

@ -14,9 +14,7 @@ import (
)
const (
ConnectionTypeWsh = "wsh"
ConnectionTypeS3 = "s3"
ConnectionTypeWave = "wavefile"
ConnectionTypeWsh = "wsh"
ConnHostCurrent = "current"
ConnHostWaveSrv = "wavesrv"

View file

@ -19,7 +19,6 @@ import (
"github.com/wavetermdev/waveterm/pkg/blocklogger"
"github.com/wavetermdev/waveterm/pkg/genconn"
"github.com/wavetermdev/waveterm/pkg/remote/awsconn"
"github.com/wavetermdev/waveterm/pkg/util/iterfn"
"github.com/wavetermdev/waveterm/pkg/util/shellutil"
"github.com/wavetermdev/waveterm/pkg/wavebase"
@ -198,10 +197,5 @@ func ParseProfiles() []string {
return nil
}
awsProfiles := awsconn.ParseProfiles()
for profile := range awsProfiles {
connfile[profile] = struct{}{}
}
return iterfn.MapKeysToSorted(connfile)
}

View file

@ -1,204 +0,0 @@
package fileshare
import (
"context"
"fmt"
"log"
"github.com/wavetermdev/waveterm/pkg/remote/awsconn"
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fstype"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/s3fs"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/wavefs"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/wshfs"
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshutil"
)
const (
ErrorParsingConnection = "error creating fileshare client, could not parse connection %s"
)
// CreateFileShareClient creates a fileshare client based on the connection string
// Returns the client and the parsed connection
func CreateFileShareClient(ctx context.Context, connection string) (fstype.FileShareClient, *connparse.Connection) {
conn, err := connparse.ParseURIAndReplaceCurrentHost(ctx, connection)
if err != nil {
log.Printf("error parsing connection: %v", err)
return nil, nil
}
conntype := conn.GetType()
if conntype == connparse.ConnectionTypeS3 {
config, err := awsconn.GetConfig(ctx, connection)
if err != nil {
log.Printf("error getting aws config: %v", err)
return nil, nil
}
return s3fs.NewS3Client(config), conn
} else if conntype == connparse.ConnectionTypeWave {
return wavefs.NewWaveClient(), conn
} else if conntype == connparse.ConnectionTypeWsh {
return wshfs.NewWshClient(), conn
} else {
log.Printf("unsupported connection type: %s", conntype)
return nil, nil
}
}
func Read(ctx context.Context, data wshrpc.FileData) (*wshrpc.FileData, error) {
log.Printf("Read: %v", data.Info.Path)
client, conn := CreateFileShareClient(ctx, data.Info.Path)
if conn == nil || client == nil {
return nil, fmt.Errorf(ErrorParsingConnection, data.Info.Path)
}
return client.Read(ctx, conn, data)
}
func ReadStream(ctx context.Context, data wshrpc.FileData) <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData] {
log.Printf("ReadStream: %v", data.Info.Path)
client, conn := CreateFileShareClient(ctx, data.Info.Path)
if conn == nil || client == nil {
return wshutil.SendErrCh[wshrpc.FileData](fmt.Errorf(ErrorParsingConnection, data.Info.Path))
}
return client.ReadStream(ctx, conn, data)
}
func ReadTarStream(ctx context.Context, data wshrpc.CommandRemoteStreamTarData) <-chan wshrpc.RespOrErrorUnion[iochantypes.Packet] {
log.Printf("ReadTarStream: %v", data.Path)
client, conn := CreateFileShareClient(ctx, data.Path)
if conn == nil || client == nil {
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf(ErrorParsingConnection, data.Path))
}
return client.ReadTarStream(ctx, conn, data.Opts)
}
func ListEntries(ctx context.Context, path string, opts *wshrpc.FileListOpts) ([]*wshrpc.FileInfo, error) {
log.Printf("ListEntries: %v", path)
client, conn := CreateFileShareClient(ctx, path)
if conn == nil || client == nil {
return nil, fmt.Errorf(ErrorParsingConnection, path)
}
return client.ListEntries(ctx, conn, opts)
}
func ListEntriesStream(ctx context.Context, path string, opts *wshrpc.FileListOpts) <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
log.Printf("ListEntriesStream: %v", path)
client, conn := CreateFileShareClient(ctx, path)
if conn == nil || client == nil {
return wshutil.SendErrCh[wshrpc.CommandRemoteListEntriesRtnData](fmt.Errorf(ErrorParsingConnection, path))
}
return client.ListEntriesStream(ctx, conn, opts)
}
func Stat(ctx context.Context, path string) (*wshrpc.FileInfo, error) {
log.Printf("Stat: %v", path)
client, conn := CreateFileShareClient(ctx, path)
if conn == nil || client == nil {
return nil, fmt.Errorf(ErrorParsingConnection, path)
}
return client.Stat(ctx, conn)
}
func PutFile(ctx context.Context, data wshrpc.FileData) error {
log.Printf("PutFile: %v", data.Info.Path)
client, conn := CreateFileShareClient(ctx, data.Info.Path)
if conn == nil || client == nil {
return fmt.Errorf(ErrorParsingConnection, data.Info.Path)
}
return client.PutFile(ctx, conn, data)
}
func Mkdir(ctx context.Context, path string) error {
log.Printf("Mkdir: %v", path)
client, conn := CreateFileShareClient(ctx, path)
if conn == nil || client == nil {
return fmt.Errorf(ErrorParsingConnection, path)
}
return client.Mkdir(ctx, conn)
}
func Move(ctx context.Context, data wshrpc.CommandFileCopyData) error {
opts := data.Opts
if opts == nil {
opts = &wshrpc.FileCopyOpts{}
}
log.Printf("Move: srcuri: %v, desturi: %v, opts: %v", data.SrcUri, data.DestUri, opts)
srcClient, srcConn := CreateFileShareClient(ctx, data.SrcUri)
if srcConn == nil || srcClient == nil {
return fmt.Errorf("error creating fileshare client, could not parse source connection %s", data.SrcUri)
}
destClient, destConn := CreateFileShareClient(ctx, data.DestUri)
if destConn == nil || destClient == nil {
return fmt.Errorf("error creating fileshare client, could not parse destination connection %s", data.DestUri)
}
if srcConn.Host != destConn.Host {
isDir, err := destClient.CopyRemote(ctx, srcConn, destConn, srcClient, opts)
if err != nil {
return fmt.Errorf("cannot copy %q to %q: %w", data.SrcUri, data.DestUri, err)
}
return srcClient.Delete(ctx, srcConn, opts.Recursive && isDir)
} else {
return srcClient.MoveInternal(ctx, srcConn, destConn, opts)
}
}
func Copy(ctx context.Context, data wshrpc.CommandFileCopyData) error {
opts := data.Opts
if opts == nil {
opts = &wshrpc.FileCopyOpts{}
}
opts.Recursive = true
log.Printf("Copy: srcuri: %v, desturi: %v, opts: %v", data.SrcUri, data.DestUri, opts)
srcClient, srcConn := CreateFileShareClient(ctx, data.SrcUri)
if srcConn == nil || srcClient == nil {
return fmt.Errorf("error creating fileshare client, could not parse source connection %s", data.SrcUri)
}
destClient, destConn := CreateFileShareClient(ctx, data.DestUri)
if destConn == nil || destClient == nil {
return fmt.Errorf("error creating fileshare client, could not parse destination connection %s", data.DestUri)
}
if srcConn.Host != destConn.Host {
_, err := destClient.CopyRemote(ctx, srcConn, destConn, srcClient, opts)
return err
} else {
_, err := srcClient.CopyInternal(ctx, srcConn, destConn, opts)
return err
}
}
func Delete(ctx context.Context, data wshrpc.CommandDeleteFileData) error {
log.Printf("Delete: %v", data)
client, conn := CreateFileShareClient(ctx, data.Path)
if conn == nil || client == nil {
return fmt.Errorf(ErrorParsingConnection, data.Path)
}
return client.Delete(ctx, conn, data.Recursive)
}
func Join(ctx context.Context, path string, parts ...string) (*wshrpc.FileInfo, error) {
log.Printf("Join: %v", path)
client, conn := CreateFileShareClient(ctx, path)
if conn == nil || client == nil {
return nil, fmt.Errorf(ErrorParsingConnection, path)
}
return client.Join(ctx, conn, parts...)
}
func Append(ctx context.Context, data wshrpc.FileData) error {
log.Printf("Append: %v", data.Info.Path)
client, conn := CreateFileShareClient(ctx, data.Info.Path)
if conn == nil || client == nil {
return fmt.Errorf(ErrorParsingConnection, data.Info.Path)
}
return client.AppendFile(ctx, conn, data)
}
func GetCapability(ctx context.Context, path string) (wshrpc.FileShareCapability, error) {
log.Printf("GetCapability: %v", path)
client, conn := CreateFileShareClient(ctx, path)
if conn == nil || client == nil {
return wshrpc.FileShareCapability{}, fmt.Errorf(ErrorParsingConnection, path)
}
return client.GetCapability(), nil
}

View file

@ -1,58 +0,0 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
package fstype
import (
"context"
"os"
"time"
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
)
const (
DefaultTimeout = 30 * time.Second
FileMode os.FileMode = 0644
DirMode os.FileMode = 0755 | os.ModeDir
RecursiveRequiredError = "recursive flag must be set for directory operations"
MergeRequiredError = "directory already exists at %q, set overwrite flag to delete the existing contents or set merge flag to merge the contents"
OverwriteRequiredError = "file already exists at %q, set overwrite flag to delete the existing file"
)
type FileShareClient interface {
// Stat returns the file info at the given parsed connection path
Stat(ctx context.Context, conn *connparse.Connection) (*wshrpc.FileInfo, error)
// Read returns the file info at the given path, if it's a directory, then the list of entries
Read(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) (*wshrpc.FileData, error)
// ReadStream returns a stream of file data at the given path. If it's a directory, then the list of entries
ReadStream(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData]
// ReadTarStream returns a stream of tar data at the given path
ReadTarStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileCopyOpts) <-chan wshrpc.RespOrErrorUnion[iochantypes.Packet]
// ListEntries returns the list of entries at the given path, or nothing if the path is a file
ListEntries(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) ([]*wshrpc.FileInfo, error)
// ListEntriesStream returns a stream of entries at the given path
ListEntriesStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]
// PutFile writes the given data to the file at the given path
PutFile(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) error
// AppendFile appends the given data to the file at the given path
AppendFile(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) error
// Mkdir creates a directory at the given path
Mkdir(ctx context.Context, conn *connparse.Connection) error
// Move moves the file within the same connection
MoveInternal(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error
// Copy copies the file within the same connection. Returns whether the copy source was a directory
CopyInternal(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) (bool, error)
// CopyRemote copies the file between different connections. Returns whether the copy source was a directory
CopyRemote(ctx context.Context, srcConn, destConn *connparse.Connection, srcClient FileShareClient, opts *wshrpc.FileCopyOpts) (bool, error)
// Delete deletes the entry at the given path
Delete(ctx context.Context, conn *connparse.Connection, recursive bool) error
// Join joins the given parts to the connection path
Join(ctx context.Context, conn *connparse.Connection, parts ...string) (*wshrpc.FileInfo, error)
// GetConnectionType returns the type of connection for the fileshare
GetConnectionType() string
// GetCapability returns the capability of the fileshare
GetCapability() wshrpc.FileShareCapability
}

View file

@ -1,22 +1,15 @@
package fsutil
import (
"archive/tar"
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"io/fs"
"log"
"strings"
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fspath"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fstype"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/pathtree"
"github.com/wavetermdev/waveterm/pkg/util/tarcopy"
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
)
@ -43,153 +36,6 @@ func GetParentPathString(hostAndPath string) string {
return hostAndPath[:lastSlash+1]
}
func PrefixCopyInternal(ctx context.Context, srcConn, destConn *connparse.Connection, c fstype.FileShareClient, opts *wshrpc.FileCopyOpts, listEntriesPrefix func(ctx context.Context, host string, path string) ([]string, error), copyFunc func(ctx context.Context, host string, path string) error) (bool, error) {
log.Printf("PrefixCopyInternal: %v -> %v", srcConn.GetFullURI(), destConn.GetFullURI())
srcHasSlash := strings.HasSuffix(srcConn.Path, fspath.Separator)
srcPath, destPath, srcInfo, err := DetermineCopyDestPath(ctx, srcConn, destConn, c, c, opts)
if err != nil {
return false, err
}
recursive := opts != nil && opts.Recursive
if srcInfo.IsDir {
if !recursive {
return false, fmt.Errorf(fstype.RecursiveRequiredError)
}
if !srcHasSlash {
srcPath += fspath.Separator
}
destPath += fspath.Separator
log.Printf("Copying directory: %v -> %v", srcPath, destPath)
entries, err := listEntriesPrefix(ctx, srcConn.Host, srcPath)
if err != nil {
return false, fmt.Errorf("error listing source directory: %w", err)
}
tree := pathtree.NewTree(srcPath, fspath.Separator)
for _, entry := range entries {
tree.Add(entry)
}
/* tree.Walk will return false, the full path in the source bucket for each item.
prefixToRemove specifies how much of that path we want in the destination subtree.
If the source path has a trailing slash, we don't want to include the source directory itself in the destination subtree.*/
prefixToRemove := srcPath
if !srcHasSlash {
prefixToRemove = fspath.Dir(srcPath) + fspath.Separator
}
return true, tree.Walk(func(path string, numChildren int) error {
// since this is a prefix filesystem, we only care about leafs
if numChildren > 0 {
return nil
}
destFilePath := destPath + strings.TrimPrefix(path, prefixToRemove)
return copyFunc(ctx, path, destFilePath)
})
} else {
return false, copyFunc(ctx, srcPath, destPath)
}
}
func PrefixCopyRemote(ctx context.Context, srcConn, destConn *connparse.Connection, srcClient, destClient fstype.FileShareClient, destPutFile func(host string, path string, size int64, reader io.Reader) error, opts *wshrpc.FileCopyOpts) (bool, error) {
// prefix to be used if the destination is a directory. The destPath returned in the following call only applies if the destination is not a directory.
destPathPrefix, err := CleanPathPrefix(destConn.Path)
if err != nil {
return false, fmt.Errorf("error cleaning destination path: %w", err)
}
destPathPrefix += fspath.Separator
_, destPath, srcInfo, err := DetermineCopyDestPath(ctx, srcConn, destConn, srcClient, destClient, opts)
if err != nil {
return false, err
}
log.Printf("Copying: %v -> %v", srcConn.GetFullURI(), destConn.GetFullURI())
readCtx, cancel := context.WithCancelCause(ctx)
defer cancel(nil)
ioch := srcClient.ReadTarStream(readCtx, srcConn, opts)
err = tarcopy.TarCopyDest(readCtx, cancel, ioch, func(next *tar.Header, reader *tar.Reader, singleFile bool) error {
if next.Typeflag == tar.TypeDir {
return nil
}
if singleFile && srcInfo.IsDir {
return fmt.Errorf("protocol error: source is a directory, but only a single file is being copied")
}
fileName, err := CleanPathPrefix(fspath.Join(destPathPrefix, next.Name))
if singleFile {
fileName = destPath
}
if err != nil {
return fmt.Errorf("error cleaning path: %w", err)
}
log.Printf("CopyRemote: writing file: %s; size: %d\n", fileName, next.Size)
return destPutFile(destConn.Host, fileName, next.Size, reader)
})
if err != nil {
cancel(err)
return false, err
}
return srcInfo.IsDir, nil
}
func DetermineCopyDestPath(ctx context.Context, srcConn, destConn *connparse.Connection, srcClient, destClient fstype.FileShareClient, opts *wshrpc.FileCopyOpts) (srcPath, destPath string, srcInfo *wshrpc.FileInfo, err error) {
merge := opts != nil && opts.Merge
overwrite := opts != nil && opts.Overwrite
recursive := opts != nil && opts.Recursive
if overwrite && merge {
return "", "", nil, fmt.Errorf("cannot specify both overwrite and merge")
}
srcHasSlash := strings.HasSuffix(srcConn.Path, fspath.Separator)
srcPath = srcConn.Path
destHasSlash := strings.HasSuffix(destConn.Path, fspath.Separator)
destPath, err = CleanPathPrefix(destConn.Path)
if err != nil {
return "", "", nil, fmt.Errorf("error cleaning destination path: %w", err)
}
srcInfo, err = srcClient.Stat(ctx, srcConn)
if err != nil {
return "", "", nil, fmt.Errorf("error getting source file info: %w", err)
} else if srcInfo.NotFound {
return "", "", nil, fmt.Errorf("source file not found: %w", err)
}
destInfo, err := destClient.Stat(ctx, destConn)
destExists := err == nil && !destInfo.NotFound
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return "", "", nil, fmt.Errorf("error getting destination file info: %w", err)
}
originalDestPath := destPath
if !srcHasSlash {
if (destExists && destInfo.IsDir) || (!destExists && !destHasSlash && srcInfo.IsDir) {
destPath = fspath.Join(destPath, fspath.Base(srcConn.Path))
}
}
destConn.Path = destPath
if originalDestPath != destPath {
destInfo, err = destClient.Stat(ctx, destConn)
destExists = err == nil && !destInfo.NotFound
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return "", "", nil, fmt.Errorf("error getting destination file info: %w", err)
}
}
if destExists {
if overwrite {
log.Printf("Deleting existing file: %s\n", destConn.GetFullURI())
err = destClient.Delete(ctx, destConn, destInfo.IsDir && recursive)
if err != nil {
return "", "", nil, fmt.Errorf("error deleting conflicting destination file: %w", err)
}
} else if destInfo.IsDir && srcInfo.IsDir {
if !merge {
return "", "", nil, fmt.Errorf(fstype.MergeRequiredError, destConn.GetFullURI())
}
} else {
return "", "", nil, fmt.Errorf(fstype.OverwriteRequiredError, destConn.GetFullURI())
}
}
return srcPath, destPath, srcInfo, nil
}
// CleanPathPrefix corrects paths for prefix filesystems (i.e. ones that don't have directories)
func CleanPathPrefix(path string) (string, error) {
if path == "" {

View file

@ -1,127 +0,0 @@
package pathtree
import (
"log"
"strings"
)
type WalkFunc func(path string, numChildren int) error
type Tree struct {
Root *Node
RootPath string
nodes map[string]*Node
delimiter string
}
type Node struct {
Children map[string]*Node
}
func (n *Node) Walk(curPath string, walkFunc WalkFunc, delimiter string) error {
if err := walkFunc(curPath, len(n.Children)); err != nil {
return err
}
for name, child := range n.Children {
if err := child.Walk(curPath+delimiter+name, walkFunc, delimiter); err != nil {
return err
}
}
return nil
}
func NewTree(path string, delimiter string) *Tree {
if len(delimiter) > 1 {
log.Printf("pathtree.NewTree: Warning: multi-character delimiter '%s' may cause unexpected behavior", delimiter)
}
if path != "" && !strings.HasSuffix(path, delimiter) {
path += delimiter
}
return &Tree{
Root: &Node{
Children: make(map[string]*Node),
},
nodes: make(map[string]*Node),
RootPath: path,
delimiter: delimiter,
}
}
func (t *Tree) Add(path string) {
// Validate input
if path == "" {
return
}
var relativePath string
if t.RootPath == "" {
relativePath = path
} else {
relativePath = strings.TrimPrefix(path, t.RootPath)
// If the path is not a child of the root path, ignore it
if relativePath == path {
return
}
}
// If the path is already in the tree, ignore it
if t.nodes[relativePath] != nil {
return
}
components := strings.Split(relativePath, t.delimiter)
// Validate path components
for _, component := range components {
if component == "" || component == "." || component == ".." {
log.Printf("pathtree.Add: invalid path component: %s", component)
return // Skip invalid paths
}
}
// Quick check to see if the parent path is already in the tree, in which case we can skip the loop
if parent := t.tryAddToExistingParent(components); parent {
return
}
t.addNewPath(components)
}
func (t *Tree) tryAddToExistingParent(components []string) bool {
if len(components) <= 1 {
return false
}
parentPath := strings.Join(components[:len(components)-1], t.delimiter)
if t.nodes[parentPath] == nil {
return false
}
lastPathComponent := components[len(components)-1]
t.nodes[parentPath].Children[lastPathComponent] = &Node{
Children: make(map[string]*Node),
}
t.nodes[strings.Join(components, t.delimiter)] = t.nodes[parentPath].Children[lastPathComponent]
return true
}
func (t *Tree) addNewPath(components []string) {
currentNode := t.Root
for i, component := range components {
if _, ok := currentNode.Children[component]; !ok {
currentNode.Children[component] = &Node{
Children: make(map[string]*Node),
}
curPath := strings.Join(components[:i+1], t.delimiter)
t.nodes[curPath] = currentNode.Children[component]
}
currentNode = currentNode.Children[component]
}
}
func (t *Tree) Walk(walkFunc WalkFunc) error {
for key, child := range t.Root.Children {
if err := child.Walk(t.RootPath+key, walkFunc, t.delimiter); err != nil {
return err
}
}
return nil
}

View file

@ -1,112 +0,0 @@
package pathtree_test
import (
"errors"
"log"
"testing"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/pathtree"
)
func TestAdd(t *testing.T) {
t.Parallel()
tree := initializeTree()
// Check that the tree has the expected structure
if len(tree.Root.Children) != 3 {
t.Errorf("expected 3 children, got %d", len(tree.Root.Children))
}
if len(tree.Root.Children["a"].Children) != 3 {
t.Errorf("expected 3 children, got %d", len(tree.Root.Children["a"].Children))
}
if len(tree.Root.Children["b"].Children) != 1 {
t.Errorf("expected 1 child, got %d", len(tree.Root.Children["b"].Children))
}
if len(tree.Root.Children["b"].Children["g"].Children) != 1 {
t.Errorf("expected 1 child, got %d", len(tree.Root.Children["b"].Children["g"].Children))
}
if len(tree.Root.Children["b"].Children["g"].Children["h"].Children) != 0 {
t.Errorf("expected 0 children, got %d", len(tree.Root.Children["b"].Children["g"].Children["h"].Children))
}
if len(tree.Root.Children["c"].Children) != 0 {
t.Errorf("expected 0 children, got %d", len(tree.Root.Children["c"].Children))
}
// Check that adding the same path again does not change the tree
tree.Add("root/a/d")
if len(tree.Root.Children["a"].Children) != 3 {
t.Errorf("expected 3 children, got %d", len(tree.Root.Children["a"].Children))
}
// Check that adding a path that is not a child of the root path does not change the tree
tree.Add("etc/passwd")
if len(tree.Root.Children) != 3 {
t.Errorf("expected 3 children, got %d", len(tree.Root.Children))
}
}
func TestWalk(t *testing.T) {
t.Parallel()
tree := initializeTree()
// Check that the tree traverses all nodes and identifies leaf nodes correctly
pathMap := make(map[string]int)
err := tree.Walk(func(path string, numChildren int) error {
pathMap[path] = numChildren
return nil
})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
expectedPathMap := map[string]int{
"root/a": 3,
"root/a/d": 0,
"root/a/e": 0,
"root/a/f": 0,
"root/b": 1,
"root/b/g": 1,
"root/b/g/h": 0,
"root/c": 0,
}
log.Printf("pathMap: %v", pathMap)
for path, numChildren := range expectedPathMap {
if pathMap[path] != numChildren {
t.Errorf("expected %d children for path %s, got %d", numChildren, path, pathMap[path])
}
}
expectedError := errors.New("test error")
// Check that the walk function returns an error if it is returned by the walk function
err = tree.Walk(func(path string, numChildren int) error {
return expectedError
})
if err != expectedError {
t.Errorf("expected error %v, got %v", expectedError, err)
}
}
func initializeTree() *pathtree.Tree {
tree := pathtree.NewTree("root/", "/")
tree.Add("root/a")
tree.Add("root/b")
tree.Add("root/c")
tree.Add("root/a/d")
tree.Add("root/a/e")
tree.Add("root/a/f")
tree.Add("root/b/g")
tree.Add("root/b/g/h")
log.Printf("tree: %v", tree)
return tree
}

View file

@ -1,812 +0,0 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
package s3fs
import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"log"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go"
"github.com/wavetermdev/waveterm/pkg/remote/awsconn"
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fspath"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fstype"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fsutil"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/pathtree"
"github.com/wavetermdev/waveterm/pkg/util/fileutil"
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes"
"github.com/wavetermdev/waveterm/pkg/util/tarcopy"
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshutil"
)
type S3Client struct {
client *s3.Client
}
var _ fstype.FileShareClient = S3Client{}
func NewS3Client(config *aws.Config) *S3Client {
return &S3Client{
client: s3.NewFromConfig(*config),
}
}
func (c S3Client) Read(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) (*wshrpc.FileData, error) {
rtnCh := c.ReadStream(ctx, conn, data)
return fsutil.ReadStreamToFileData(ctx, rtnCh)
}
func (c S3Client) ReadStream(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData] {
bucket := conn.Host
objectKey := conn.Path
rtn := make(chan wshrpc.RespOrErrorUnion[wshrpc.FileData], 16)
go func() {
defer close(rtn)
finfo, err := c.Stat(ctx, conn)
if err != nil {
rtn <- wshutil.RespErr[wshrpc.FileData](err)
return
}
rtn <- wshrpc.RespOrErrorUnion[wshrpc.FileData]{Response: wshrpc.FileData{Info: finfo}}
if finfo.NotFound {
rtn <- wshrpc.RespOrErrorUnion[wshrpc.FileData]{Response: wshrpc.FileData{Entries: []*wshrpc.FileInfo{
{
Path: finfo.Dir,
Dir: fspath.Dir(finfo.Dir),
Name: "..",
IsDir: true,
Size: 0,
ModTime: time.Now().Unix(),
MimeType: "directory",
},
}}}
return
}
if finfo.IsDir {
listEntriesCh := c.ListEntriesStream(ctx, conn, nil)
defer func() {
utilfn.DrainChannelSafe(listEntriesCh, "s3fs.ReadStream")
}()
for respUnion := range listEntriesCh {
if respUnion.Error != nil {
rtn <- wshutil.RespErr[wshrpc.FileData](respUnion.Error)
return
}
resp := respUnion.Response
if len(resp.FileInfo) > 0 {
rtn <- wshrpc.RespOrErrorUnion[wshrpc.FileData]{Response: wshrpc.FileData{Entries: resp.FileInfo}}
}
}
} else {
var result *s3.GetObjectOutput
var err error
if data.At != nil {
log.Printf("reading %v with offset %d and size %d", conn.GetFullURI(), data.At.Offset, data.At.Size)
result, err = c.client.GetObject(ctx, &s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(objectKey),
Range: aws.String(fmt.Sprintf("bytes=%d-%d", data.At.Offset, data.At.Offset+int64(data.At.Size)-1)),
})
} else {
result, err = c.client.GetObject(ctx, &s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(objectKey),
})
}
if err != nil {
log.Printf("error getting object %v:%v: %v", bucket, objectKey, err)
var noKey *types.NoSuchKey
if errors.As(err, &noKey) {
err = noKey
}
rtn <- wshutil.RespErr[wshrpc.FileData](err)
return
}
size := int64(0)
if result.ContentLength != nil {
size = *result.ContentLength
}
finfo := &wshrpc.FileInfo{
Name: objectKey,
IsDir: false,
Size: size,
ModTime: result.LastModified.UnixMilli(),
Path: conn.GetFullURI(),
Dir: fsutil.GetParentPath(conn),
}
fileutil.AddMimeTypeToFileInfo(finfo.Path, finfo)
rtn <- wshrpc.RespOrErrorUnion[wshrpc.FileData]{Response: wshrpc.FileData{Info: finfo}}
if size == 0 {
log.Printf("no data to read")
return
}
defer utilfn.GracefulClose(result.Body, "s3fs", conn.GetFullURI())
bytesRemaining := size
for {
select {
case <-ctx.Done():
rtn <- wshutil.RespErr[wshrpc.FileData](context.Cause(ctx))
return
default:
buf := make([]byte, min(bytesRemaining, wshrpc.FileChunkSize))
n, err := result.Body.Read(buf)
if err != nil && !errors.Is(err, io.EOF) {
rtn <- wshutil.RespErr[wshrpc.FileData](err)
return
}
if n == 0 {
break
}
bytesRemaining -= int64(n)
rtn <- wshrpc.RespOrErrorUnion[wshrpc.FileData]{Response: wshrpc.FileData{Data64: base64.StdEncoding.EncodeToString(buf[:n])}}
if bytesRemaining == 0 || errors.Is(err, io.EOF) {
return
}
}
}
}
}()
return rtn
}
func (c S3Client) ReadTarStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileCopyOpts) <-chan wshrpc.RespOrErrorUnion[iochantypes.Packet] {
recursive := opts != nil && opts.Recursive
bucket := conn.Host
if bucket == "" || bucket == "/" {
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf("bucket must be specified"))
}
// whether the operation is on the whole bucket
wholeBucket := conn.Path == "" || conn.Path == fspath.Separator
// get the object if it's a single file operation
var singleFileResult *s3.GetObjectOutput
// this ensures we don't leak the object if we error out before copying it
closeSingleFileResult := true
defer func() {
// in case we error out before the object gets copied, make sure to close it
if singleFileResult != nil && closeSingleFileResult {
utilfn.GracefulClose(singleFileResult.Body, "s3fs", conn.Path)
}
}()
var err error
if !wholeBucket {
singleFileResult, err = c.client.GetObject(ctx, &s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(conn.Path), // does not care if the path has a prefixed slash
})
if err != nil {
// if the object doesn't exist, we can assume the prefix is a directory and continue
var noKey *types.NoSuchKey
var notFound *types.NotFound
if !errors.As(err, &noKey) && !errors.As(err, &notFound) {
return wshutil.SendErrCh[iochantypes.Packet](err)
}
}
}
// whether the operation is on a single file
singleFile := singleFileResult != nil
if !singleFile && !recursive {
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf(fstype.RecursiveRequiredError))
}
// whether to include the directory itself in the tar
includeDir := (wholeBucket && conn.Path == "") || (singleFileResult == nil && conn.Path != "" && !strings.HasSuffix(conn.Path, fspath.Separator))
timeout := fstype.DefaultTimeout
if opts.Timeout > 0 {
timeout = time.Duration(opts.Timeout) * time.Millisecond
}
readerCtx, cancel := context.WithTimeout(context.Background(), timeout)
// the prefix that should be removed from the tar paths
tarPathPrefix := conn.Path
if wholeBucket {
// we treat the bucket name as the root directory. If we're not including the directory itself, we need to remove the bucket name from the tar paths
if includeDir {
tarPathPrefix = ""
} else {
tarPathPrefix = bucket
}
} else if singleFile || includeDir {
// if we're including the directory itself, we need to remove the last part of the path
tarPathPrefix = fsutil.GetParentPathString(tarPathPrefix)
}
rtn, writeHeader, fileWriter, tarClose := tarcopy.TarCopySrc(readerCtx, tarPathPrefix)
go func() {
defer func() {
tarClose()
cancel()
}()
// below we get the objects concurrently so we need to store the results in a map
objMap := make(map[string]*s3.GetObjectOutput)
// close the objects when we're done
defer func() {
for key, obj := range objMap {
utilfn.GracefulClose(obj.Body, "s3fs", key)
}
}()
// tree to keep track of the paths we've added and insert fake directories for subpaths
tree := pathtree.NewTree(tarPathPrefix, "/")
if singleFile {
objMap[conn.Path] = singleFileResult
tree.Add(conn.Path)
} else {
// list the objects in the bucket and add them to a tree that we can then walk to write the tar entries
var input *s3.ListObjectsV2Input
if wholeBucket {
// get all the objects in the bucket
input = &s3.ListObjectsV2Input{
Bucket: aws.String(bucket),
}
} else {
objectPrefix := conn.Path
if !strings.HasSuffix(objectPrefix, fspath.Separator) {
objectPrefix = objectPrefix + fspath.Separator
}
input = &s3.ListObjectsV2Input{
Bucket: aws.String(bucket),
Prefix: aws.String(objectPrefix),
}
}
errs := make([]error, 0)
// mutex to protect the tree and objMap since we're fetching objects concurrently
treeMapMutex := sync.Mutex{}
// wait group to await the finished fetches
wg := sync.WaitGroup{}
getObjectAndFileInfo := func(obj *types.Object) {
defer wg.Done()
result, err := c.client.GetObject(ctx, &s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: obj.Key,
})
if err != nil {
errs = append(errs, err)
return
}
path := *obj.Key
if wholeBucket {
path = fspath.Join(bucket, path)
}
treeMapMutex.Lock()
defer treeMapMutex.Unlock()
objMap[path] = result
tree.Add(path)
}
if err := c.listFilesPrefix(ctx, input, func(obj *types.Object) (bool, error) {
wg.Add(1)
go getObjectAndFileInfo(obj)
return true, nil
}); err != nil {
rtn <- wshutil.RespErr[iochantypes.Packet](err)
return
}
wg.Wait()
if len(errs) > 0 {
rtn <- wshutil.RespErr[iochantypes.Packet](errors.Join(errs...))
return
}
}
// Walk the tree and write the tar entries
if err := tree.Walk(func(path string, numChildren int) error {
mapEntry, isFile := objMap[path]
// default vals assume entry is dir, since mapEntry might not exist
modTime := int64(time.Now().Unix())
mode := fstype.DirMode
size := int64(numChildren)
if isFile {
mode = fstype.FileMode
size = *mapEntry.ContentLength
if mapEntry.LastModified != nil {
modTime = mapEntry.LastModified.UnixMilli()
}
}
finfo := &wshrpc.FileInfo{
Name: path,
IsDir: !isFile,
Size: size,
ModTime: modTime,
Mode: mode,
}
if err := writeHeader(fileutil.ToFsFileInfo(finfo), path, singleFile); err != nil {
return err
}
if isFile {
if n, err := io.Copy(fileWriter, mapEntry.Body); err != nil {
return err
} else if n != size {
return fmt.Errorf("error copying %v; expected to read %d bytes, but read %d", path, size, n)
}
}
return nil
}); err != nil {
log.Printf("error walking tree: %v", err)
rtn <- wshutil.RespErr[iochantypes.Packet](err)
return
}
}()
// we've handed singleFileResult off to the tar writer, so we don't want to close it
closeSingleFileResult = false
return rtn
}
func (c S3Client) ListEntries(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) ([]*wshrpc.FileInfo, error) {
var entries []*wshrpc.FileInfo
rtnCh := c.ListEntriesStream(ctx, conn, opts)
for respUnion := range rtnCh {
if respUnion.Error != nil {
return nil, respUnion.Error
}
resp := respUnion.Response
entries = append(entries, resp.FileInfo...)
}
return entries, nil
}
func (c S3Client) ListEntriesStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
bucket := conn.Host
objectKeyPrefix := conn.Path
if objectKeyPrefix != "" && !strings.HasSuffix(objectKeyPrefix, fspath.Separator) {
objectKeyPrefix = objectKeyPrefix + "/"
}
numToFetch := wshrpc.MaxDirSize
if opts != nil && opts.Limit > 0 {
numToFetch = min(opts.Limit, wshrpc.MaxDirSize)
}
numFetched := 0
if bucket == "" || bucket == fspath.Separator {
buckets, err := awsconn.ListBuckets(ctx, c.client)
if err != nil {
return wshutil.SendErrCh[wshrpc.CommandRemoteListEntriesRtnData](err)
}
var entries []*wshrpc.FileInfo
for _, bucket := range buckets {
if numFetched >= numToFetch {
break
}
if bucket.Name != nil {
entries = append(entries, &wshrpc.FileInfo{
Path: *bucket.Name,
Name: *bucket.Name,
Dir: fspath.Separator,
ModTime: bucket.CreationDate.UnixMilli(),
IsDir: true,
MimeType: "directory",
})
numFetched++
}
}
rtn := make(chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData], 1)
defer close(rtn)
rtn <- wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]{Response: wshrpc.CommandRemoteListEntriesRtnData{FileInfo: entries}}
return rtn
} else {
rtn := make(chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData], 16)
// keep track of "directories" that have been used to avoid duplicates between pages
prevUsedDirKeys := make(map[string]any)
go func() {
defer close(rtn)
entryMap := make(map[string]*wshrpc.FileInfo)
if err := c.listFilesPrefix(ctx, &s3.ListObjectsV2Input{
Bucket: aws.String(bucket),
Prefix: aws.String(objectKeyPrefix),
}, func(obj *types.Object) (bool, error) {
if numFetched >= numToFetch {
return false, nil
}
lastModTime := int64(0)
if obj.LastModified != nil {
lastModTime = obj.LastModified.UnixMilli()
}
// get the first level directory name or file name
name, isDir := fspath.FirstLevelDir(strings.TrimPrefix(*obj.Key, objectKeyPrefix))
path := fspath.Join(conn.GetPathWithHost(), name)
if isDir {
if entryMap[name] == nil {
if _, ok := prevUsedDirKeys[name]; !ok {
entryMap[name] = &wshrpc.FileInfo{
Path: path,
Name: name,
IsDir: true,
Dir: objectKeyPrefix,
ModTime: lastModTime,
Size: 0,
}
fileutil.AddMimeTypeToFileInfo(path, entryMap[name])
prevUsedDirKeys[name] = struct{}{}
numFetched++
}
} else if entryMap[name].ModTime < lastModTime {
entryMap[name].ModTime = lastModTime
}
return true, nil
}
size := int64(0)
if obj.Size != nil {
size = *obj.Size
}
entryMap[name] = &wshrpc.FileInfo{
Name: name,
IsDir: false,
Dir: objectKeyPrefix,
Path: path,
ModTime: lastModTime,
Size: size,
}
fileutil.AddMimeTypeToFileInfo(path, entryMap[name])
numFetched++
return true, nil
}); err != nil {
rtn <- wshutil.RespErr[wshrpc.CommandRemoteListEntriesRtnData](err)
return
}
entries := make([]*wshrpc.FileInfo, 0, wshrpc.DirChunkSize)
for _, entry := range entryMap {
entries = append(entries, entry)
if len(entries) == wshrpc.DirChunkSize {
rtn <- wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]{Response: wshrpc.CommandRemoteListEntriesRtnData{FileInfo: entries}}
entries = make([]*wshrpc.FileInfo, 0, wshrpc.DirChunkSize)
}
}
if len(entries) > 0 {
rtn <- wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]{Response: wshrpc.CommandRemoteListEntriesRtnData{FileInfo: entries}}
}
}()
return rtn
}
}
func (c S3Client) Stat(ctx context.Context, conn *connparse.Connection) (*wshrpc.FileInfo, error) {
bucketName := conn.Host
objectKey := conn.Path
if bucketName == "" || bucketName == fspath.Separator {
// root, refers to list all buckets
return &wshrpc.FileInfo{
Name: fspath.Separator,
IsDir: true,
Size: 0,
ModTime: 0,
Path: fspath.Separator,
Dir: fspath.Separator,
MimeType: "directory",
}, nil
}
if objectKey == "" || objectKey == fspath.Separator {
_, err := c.client.HeadBucket(ctx, &s3.HeadBucketInput{
Bucket: aws.String(bucketName),
})
exists := true
if err != nil {
var apiError smithy.APIError
if errors.As(err, &apiError) {
switch apiError.(type) {
case *types.NotFound:
exists = false
default:
}
}
}
if exists {
return &wshrpc.FileInfo{
Name: bucketName,
Path: bucketName,
Dir: fspath.Separator,
IsDir: true,
Size: 0,
ModTime: 0,
MimeType: "directory",
}, nil
} else {
return &wshrpc.FileInfo{
Name: bucketName,
Path: bucketName,
Dir: fspath.Separator,
NotFound: true,
IsDir: true,
}, nil
}
}
result, err := c.client.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{
Bucket: aws.String(bucketName),
Key: aws.String(objectKey),
ObjectAttributes: []types.ObjectAttributes{
types.ObjectAttributesObjectSize,
},
})
if err != nil {
var noKey *types.NoSuchKey
var notFound *types.NotFound
if errors.As(err, &noKey) || errors.As(err, &notFound) {
// try to list a single object to see if the prefix exists
if !strings.HasSuffix(objectKey, fspath.Separator) {
objectKey += fspath.Separator
}
entries, err := c.client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
Bucket: aws.String(bucketName),
Prefix: aws.String(objectKey),
MaxKeys: aws.Int32(1),
})
if err == nil {
if entries.Contents != nil {
return &wshrpc.FileInfo{
Name: objectKey,
Path: conn.GetPathWithHost(),
Dir: fsutil.GetParentPath(conn),
IsDir: true,
Size: 0,
Mode: fstype.DirMode,
MimeType: "directory",
}, nil
}
} else if !errors.As(err, &noKey) && !errors.As(err, &notFound) {
return nil, err
}
return &wshrpc.FileInfo{
Name: objectKey,
Path: conn.GetPathWithHost(),
Dir: fsutil.GetParentPath(conn),
IsDir: true,
NotFound: true,
}, nil
}
return nil, err
}
size := int64(0)
if result.ObjectSize != nil {
size = *result.ObjectSize
}
lastModified := int64(0)
if result.LastModified != nil {
lastModified = result.LastModified.UnixMilli()
}
rtn := &wshrpc.FileInfo{
Name: objectKey,
Path: conn.GetPathWithHost(),
Dir: fsutil.GetParentPath(conn),
IsDir: false,
Size: size,
ModTime: lastModified,
}
fileutil.AddMimeTypeToFileInfo(rtn.Path, rtn)
return rtn, nil
}
func (c S3Client) PutFile(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) error {
if data.At != nil {
return errors.Join(errors.ErrUnsupported, fmt.Errorf("file data offset and size not supported"))
}
bucket := conn.Host
objectKey := conn.Path
if bucket == "" || bucket == "/" || objectKey == "" || objectKey == "/" {
return errors.Join(errors.ErrUnsupported, fmt.Errorf("bucket and object key must be specified"))
}
contentMaxLength := base64.StdEncoding.DecodedLen(len(data.Data64))
var decodedBody []byte
var contentLength int
var err error
if contentMaxLength > 0 {
decodedBody = make([]byte, contentMaxLength)
contentLength, err = base64.StdEncoding.Decode(decodedBody, []byte(data.Data64))
if err != nil {
return err
}
} else {
decodedBody = []byte("\n")
contentLength = 1
}
bodyReaderSeeker := bytes.NewReader(decodedBody[:contentLength])
_, err = c.client.PutObject(ctx, &s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(objectKey),
Body: bodyReaderSeeker,
ContentLength: aws.Int64(int64(contentLength)),
})
if err != nil {
log.Printf("PutFile: error putting object %v:%v: %v", bucket, objectKey, err)
}
return err
}
func (c S3Client) AppendFile(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) error {
return errors.Join(errors.ErrUnsupported, fmt.Errorf("append file not supported"))
}
func (c S3Client) Mkdir(ctx context.Context, conn *connparse.Connection) error {
return errors.Join(errors.ErrUnsupported, fmt.Errorf("mkdir not supported"))
}
func (c S3Client) MoveInternal(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error {
isDir, err := c.CopyInternal(ctx, srcConn, destConn, opts)
if err != nil {
return err
}
recursive := opts != nil && opts.Recursive
return c.Delete(ctx, srcConn, recursive && isDir)
}
func (c S3Client) CopyRemote(ctx context.Context, srcConn, destConn *connparse.Connection, srcClient fstype.FileShareClient, opts *wshrpc.FileCopyOpts) (bool, error) {
if srcConn.Scheme == connparse.ConnectionTypeS3 && destConn.Scheme == connparse.ConnectionTypeS3 {
return c.CopyInternal(ctx, srcConn, destConn, opts)
}
destBucket := destConn.Host
if destBucket == "" || destBucket == fspath.Separator {
return false, fmt.Errorf("destination bucket must be specified")
}
return fsutil.PrefixCopyRemote(ctx, srcConn, destConn, srcClient, c, func(bucket, path string, size int64, reader io.Reader) error {
_, err := c.client.PutObject(ctx, &s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(path),
Body: reader,
ContentLength: aws.Int64(size),
})
return err
}, opts)
}
func (c S3Client) CopyInternal(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) (bool, error) {
srcBucket := srcConn.Host
destBucket := destConn.Host
if srcBucket == "" || srcBucket == fspath.Separator || destBucket == "" || destBucket == fspath.Separator {
return false, fmt.Errorf("source and destination bucket must be specified")
}
return fsutil.PrefixCopyInternal(ctx, srcConn, destConn, c, opts, func(ctx context.Context, bucket, prefix string) ([]string, error) {
var entries []string
err := c.listFilesPrefix(ctx, &s3.ListObjectsV2Input{
Bucket: aws.String(bucket),
Prefix: aws.String(prefix),
}, func(obj *types.Object) (bool, error) {
entries = append(entries, *obj.Key)
return true, nil
})
return entries, err
}, func(ctx context.Context, srcPath, destPath string) error {
_, err := c.client.CopyObject(ctx, &s3.CopyObjectInput{
Bucket: aws.String(destBucket),
Key: aws.String(destPath),
CopySource: aws.String(fspath.Join(srcBucket, srcPath)),
})
if err != nil {
return fmt.Errorf("error copying %v:%v to %v:%v: %w", srcBucket, srcPath, destBucket, destPath, err)
}
return nil
})
}
func (c S3Client) Delete(ctx context.Context, conn *connparse.Connection, recursive bool) error {
bucket := conn.Host
objectKey := conn.Path
if bucket == "" || bucket == fspath.Separator {
return errors.Join(errors.ErrUnsupported, fmt.Errorf("bucket must be specified"))
}
if objectKey == "" || objectKey == fspath.Separator {
return errors.Join(errors.ErrUnsupported, fmt.Errorf("object key must be specified"))
}
var err error
if recursive {
log.Printf("Deleting objects with prefix %v:%v", bucket, objectKey)
if !strings.HasSuffix(objectKey, fspath.Separator) {
objectKey = objectKey + fspath.Separator
}
objects := make([]types.ObjectIdentifier, 0)
err = c.listFilesPrefix(ctx, &s3.ListObjectsV2Input{
Bucket: aws.String(bucket),
Prefix: aws.String(objectKey),
}, func(obj *types.Object) (bool, error) {
objects = append(objects, types.ObjectIdentifier{Key: obj.Key})
return true, nil
})
if err != nil {
return err
}
if len(objects) == 0 {
return nil
}
_, err = c.client.DeleteObjects(ctx, &s3.DeleteObjectsInput{
Bucket: aws.String(bucket),
Delete: &types.Delete{
Objects: objects,
},
})
} else {
log.Printf("Deleting object %v:%v", bucket, objectKey)
_, err = c.client.DeleteObject(ctx, &s3.DeleteObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(objectKey),
})
}
if err != nil {
return err
}
// verify the object was deleted
finfo, err := c.Stat(ctx, conn)
if err != nil {
return err
}
if !finfo.NotFound {
if finfo.IsDir {
return fmt.Errorf(fstype.RecursiveRequiredError)
}
return fmt.Errorf("object was not successfully deleted %v:%v", bucket, objectKey)
}
return nil
}
func (c S3Client) listFilesPrefix(ctx context.Context, input *s3.ListObjectsV2Input, fileCallback func(*types.Object) (bool, error)) error {
var err error
var output *s3.ListObjectsV2Output
objectPaginator := s3.NewListObjectsV2Paginator(c.client, input)
for objectPaginator.HasMorePages() {
output, err = objectPaginator.NextPage(ctx)
if err != nil {
var noBucket *types.NoSuchBucket
if !awsconn.CheckAccessDeniedErr(&err) && errors.As(err, &noBucket) {
err = noBucket
}
return err
} else {
for _, obj := range output.Contents {
if cont, err := fileCallback(&obj); err != nil {
return err
} else if !cont {
return nil
}
}
}
}
return nil
}
func (c S3Client) Join(ctx context.Context, conn *connparse.Connection, parts ...string) (*wshrpc.FileInfo, error) {
var joinParts []string
if conn.Path == "" || conn.Path == fspath.Separator {
joinParts = parts
} else {
joinParts = append([]string{conn.Path}, parts...)
}
conn.Path = fspath.Join(joinParts...)
return c.Stat(ctx, conn)
}
func (c S3Client) GetConnectionType() string {
return connparse.ConnectionTypeS3
}
func (c S3Client) GetCapability() wshrpc.FileShareCapability {
return wshrpc.FileShareCapability{
CanAppend: false,
CanMkdir: false,
}
}

View file

@ -1,638 +0,0 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
package wavefs
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"io/fs"
"log"
"os"
"path/filepath"
"strings"
"time"
"github.com/wavetermdev/waveterm/pkg/filestore"
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fspath"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fstype"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fsutil"
"github.com/wavetermdev/waveterm/pkg/util/fileutil"
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes"
"github.com/wavetermdev/waveterm/pkg/util/tarcopy"
"github.com/wavetermdev/waveterm/pkg/util/wavefileutil"
"github.com/wavetermdev/waveterm/pkg/waveobj"
"github.com/wavetermdev/waveterm/pkg/wps"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshutil"
)
const (
DirMode os.FileMode = 0755 | os.ModeDir
)
type WaveClient struct{}
var _ fstype.FileShareClient = WaveClient{}
func NewWaveClient() *WaveClient {
return &WaveClient{}
}
func (c WaveClient) ReadStream(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData] {
ch := make(chan wshrpc.RespOrErrorUnion[wshrpc.FileData], 16)
go func() {
defer close(ch)
rtnData, err := c.Read(ctx, conn, data)
if err != nil {
ch <- wshutil.RespErr[wshrpc.FileData](err)
return
}
dataLen := len(rtnData.Data64)
if !rtnData.Info.IsDir {
for i := 0; i < dataLen; i += wshrpc.FileChunkSize {
if ctx.Err() != nil {
ch <- wshutil.RespErr[wshrpc.FileData](context.Cause(ctx))
return
}
dataEnd := min(i+wshrpc.FileChunkSize, dataLen)
ch <- wshrpc.RespOrErrorUnion[wshrpc.FileData]{Response: wshrpc.FileData{Data64: rtnData.Data64[i:dataEnd], Info: rtnData.Info, At: &wshrpc.FileDataAt{Offset: int64(i), Size: dataEnd - i}}}
}
} else {
for i := 0; i < len(rtnData.Entries); i += wshrpc.DirChunkSize {
if ctx.Err() != nil {
ch <- wshutil.RespErr[wshrpc.FileData](context.Cause(ctx))
return
}
ch <- wshrpc.RespOrErrorUnion[wshrpc.FileData]{Response: wshrpc.FileData{Entries: rtnData.Entries[i:min(i+wshrpc.DirChunkSize, len(rtnData.Entries))], Info: rtnData.Info}}
}
}
}()
return ch
}
func (c WaveClient) Read(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) (*wshrpc.FileData, error) {
zoneId := conn.Host
if zoneId == "" {
return nil, fmt.Errorf("zoneid not found in connection")
}
fileName, err := cleanPath(conn.Path)
if err != nil {
return nil, fmt.Errorf("error cleaning path: %w", err)
}
if data.At != nil {
_, dataBuf, err := filestore.WFS.ReadAt(ctx, zoneId, fileName, data.At.Offset, int64(data.At.Size))
if err == nil {
return &wshrpc.FileData{Info: data.Info, Data64: base64.StdEncoding.EncodeToString(dataBuf)}, nil
} else if errors.Is(err, fs.ErrNotExist) {
return nil, fmt.Errorf("NOTFOUND: %w", err)
} else {
return nil, fmt.Errorf("error reading blockfile: %w", err)
}
} else {
_, dataBuf, err := filestore.WFS.ReadFile(ctx, zoneId, fileName)
if err == nil {
return &wshrpc.FileData{Info: data.Info, Data64: base64.StdEncoding.EncodeToString(dataBuf)}, nil
} else if !errors.Is(err, fs.ErrNotExist) {
return nil, fmt.Errorf("error reading blockfile: %w", err)
}
}
list, err := c.ListEntries(ctx, conn, nil)
if err != nil {
return nil, fmt.Errorf("error listing blockfiles: %w", err)
}
if len(list) == 0 {
return &wshrpc.FileData{
Info: &wshrpc.FileInfo{
Name: fspath.Base(fileName),
Path: fileName,
Dir: fspath.Dir(fileName),
NotFound: true,
IsDir: true,
}}, nil
}
return &wshrpc.FileData{Info: data.Info, Entries: list}, nil
}
func (c WaveClient) ReadTarStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileCopyOpts) <-chan wshrpc.RespOrErrorUnion[iochantypes.Packet] {
log.Printf("ReadTarStream: conn: %v, opts: %v\n", conn, opts)
path := conn.Path
srcHasSlash := strings.HasSuffix(path, "/")
cleanedPath, err := cleanPath(path)
if err != nil {
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf("error cleaning path: %w", err))
}
finfo, err := c.Stat(ctx, conn)
exists := err == nil && !finfo.NotFound
if err != nil {
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf("error getting file info: %w", err))
}
if !exists {
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf("file not found: %s", conn.GetFullURI()))
}
singleFile := finfo != nil && !finfo.IsDir
var pathPrefix string
if !singleFile && srcHasSlash {
pathPrefix = cleanedPath
} else {
pathPrefix = filepath.Dir(cleanedPath)
}
schemeAndHost := conn.GetSchemeAndHost() + "/"
var entries []*wshrpc.FileInfo
if singleFile {
entries = []*wshrpc.FileInfo{finfo}
} else {
entries, err = c.ListEntries(ctx, conn, nil)
if err != nil {
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf("error listing blockfiles: %w", err))
}
}
timeout := fstype.DefaultTimeout
if opts.Timeout > 0 {
timeout = time.Duration(opts.Timeout) * time.Millisecond
}
readerCtx, cancel := context.WithTimeout(context.Background(), timeout)
rtn, writeHeader, fileWriter, tarClose := tarcopy.TarCopySrc(readerCtx, pathPrefix)
go func() {
defer func() {
tarClose()
cancel()
}()
for _, file := range entries {
if readerCtx.Err() != nil {
rtn <- wshutil.RespErr[iochantypes.Packet](context.Cause(readerCtx))
return
}
file.Mode = 0644
if err = writeHeader(fileutil.ToFsFileInfo(file), file.Path, singleFile); err != nil {
rtn <- wshutil.RespErr[iochantypes.Packet](fmt.Errorf("error writing tar header: %w", err))
return
}
if file.IsDir {
continue
}
log.Printf("ReadTarStream: reading file: %s\n", file.Path)
internalPath := strings.TrimPrefix(file.Path, schemeAndHost)
_, dataBuf, err := filestore.WFS.ReadFile(ctx, conn.Host, internalPath)
if err != nil {
rtn <- wshutil.RespErr[iochantypes.Packet](fmt.Errorf("error reading blockfile: %w", err))
return
}
if _, err = fileWriter.Write(dataBuf); err != nil {
rtn <- wshutil.RespErr[iochantypes.Packet](fmt.Errorf("error writing tar data: %w", err))
return
}
}
}()
return rtn
}
func (c WaveClient) ListEntriesStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
ch := make(chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData], 16)
go func() {
defer close(ch)
list, err := c.ListEntries(ctx, conn, opts)
if err != nil {
ch <- wshutil.RespErr[wshrpc.CommandRemoteListEntriesRtnData](err)
return
}
for i := 0; i < len(list); i += wshrpc.DirChunkSize {
ch <- wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]{Response: wshrpc.CommandRemoteListEntriesRtnData{FileInfo: list[i:min(i+wshrpc.DirChunkSize, len(list))]}}
}
}()
return ch
}
func (c WaveClient) ListEntries(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) ([]*wshrpc.FileInfo, error) {
log.Printf("ListEntries: conn: %v, opts: %v\n", conn, opts)
zoneId := conn.Host
if zoneId == "" {
return nil, fmt.Errorf("zoneid not found in connection")
}
if opts == nil {
opts = &wshrpc.FileListOpts{}
}
prefix, err := cleanPath(conn.Path)
if err != nil {
return nil, fmt.Errorf("error cleaning path: %w", err)
}
prefix += fspath.Separator
var fileList []*wshrpc.FileInfo
dirMap := make(map[string]*wshrpc.FileInfo)
if err := listFilesPrefix(ctx, zoneId, prefix, func(wf *filestore.WaveFile) error {
if !opts.All {
name, isDir := fspath.FirstLevelDir(strings.TrimPrefix(wf.Name, prefix))
if isDir {
path := fspath.Join(conn.GetPathWithHost(), name)
if _, ok := dirMap[path]; ok {
if dirMap[path].ModTime < wf.ModTs {
dirMap[path].ModTime = wf.ModTs
}
return nil
}
dirMap[path] = &wshrpc.FileInfo{
Path: path,
Name: name,
Dir: fspath.Dir(path),
Size: 0,
IsDir: true,
SupportsMkdir: false,
Mode: DirMode,
}
fileList = append(fileList, dirMap[path])
return nil
}
}
fileList = append(fileList, wavefileutil.WaveFileToFileInfo(wf))
return nil
}); err != nil {
return nil, fmt.Errorf("error listing entries: %w", err)
}
if opts.Offset > 0 {
if opts.Offset >= len(fileList) {
fileList = nil
} else {
fileList = fileList[opts.Offset:]
}
}
if opts.Limit > 0 {
if opts.Limit < len(fileList) {
fileList = fileList[:opts.Limit]
}
}
return fileList, nil
}
func (c WaveClient) Stat(ctx context.Context, conn *connparse.Connection) (*wshrpc.FileInfo, error) {
zoneId := conn.Host
if zoneId == "" {
return nil, fmt.Errorf("zoneid not found in connection")
}
fileName, err := fsutil.CleanPathPrefix(conn.Path)
if err != nil {
return nil, fmt.Errorf("error cleaning path: %w", err)
}
fileInfo, err := filestore.WFS.Stat(ctx, zoneId, fileName)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
// attempt to list the directory
entries, err := c.ListEntries(ctx, conn, nil)
if err != nil {
return nil, fmt.Errorf("error listing entries: %w", err)
}
if len(entries) > 0 {
return &wshrpc.FileInfo{
Path: conn.GetPathWithHost(),
Name: fileName,
Dir: fsutil.GetParentPathString(fileName),
Size: 0,
IsDir: true,
Mode: DirMode,
}, nil
} else {
return &wshrpc.FileInfo{
Path: conn.GetPathWithHost(),
Name: fileName,
Dir: fsutil.GetParentPathString(fileName),
NotFound: true}, nil
}
}
return nil, fmt.Errorf("error getting file info: %w", err)
}
return wavefileutil.WaveFileToFileInfo(fileInfo), nil
}
func (c WaveClient) PutFile(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) error {
dataBuf, err := base64.StdEncoding.DecodeString(data.Data64)
if err != nil {
return fmt.Errorf("error decoding data64: %w", err)
}
zoneId := conn.Host
if zoneId == "" {
return fmt.Errorf("zoneid not found in connection")
}
fileName, err := cleanPath(conn.Path)
if err != nil {
return fmt.Errorf("error cleaning path: %w", err)
}
if _, err := filestore.WFS.Stat(ctx, zoneId, fileName); err != nil {
if !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("error getting blockfile info: %w", err)
}
var opts wshrpc.FileOpts
var meta wshrpc.FileMeta
if data.Info != nil {
if data.Info.Opts != nil {
opts = *data.Info.Opts
}
if data.Info.Meta != nil {
meta = *data.Info.Meta
}
}
if err := filestore.WFS.MakeFile(ctx, zoneId, fileName, meta, opts); err != nil {
return fmt.Errorf("error making blockfile: %w", err)
}
}
if data.At != nil && data.At.Offset >= 0 {
if err := filestore.WFS.WriteAt(ctx, zoneId, fileName, data.At.Offset, dataBuf); errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("NOTFOUND: %w", err)
} else if err != nil {
return fmt.Errorf("error writing to blockfile: %w", err)
}
} else {
if err := filestore.WFS.WriteFile(ctx, zoneId, fileName, dataBuf); errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("NOTFOUND: %w", err)
} else if err != nil {
return fmt.Errorf("error writing to blockfile: %w", err)
}
}
wps.Broker.Publish(wps.WaveEvent{
Event: wps.Event_BlockFile,
Scopes: []string{waveobj.MakeORef(waveobj.OType_Block, zoneId).String()},
Data: &wps.WSFileEventData{
ZoneId: zoneId,
FileName: fileName,
FileOp: wps.FileOp_Invalidate,
},
})
return nil
}
func (c WaveClient) AppendFile(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) error {
dataBuf, err := base64.StdEncoding.DecodeString(data.Data64)
if err != nil {
return fmt.Errorf("error decoding data64: %w", err)
}
zoneId := conn.Host
if zoneId == "" {
return fmt.Errorf("zoneid not found in connection")
}
fileName, err := cleanPath(conn.Path)
if err != nil {
return fmt.Errorf("error cleaning path: %w", err)
}
_, err = filestore.WFS.Stat(ctx, zoneId, fileName)
if err != nil {
if !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("error getting blockfile info: %w", err)
}
var opts wshrpc.FileOpts
var meta wshrpc.FileMeta
if data.Info != nil {
if data.Info.Opts != nil {
opts = *data.Info.Opts
}
if data.Info.Meta != nil {
meta = *data.Info.Meta
}
}
if err := filestore.WFS.MakeFile(ctx, zoneId, fileName, meta, opts); err != nil {
return fmt.Errorf("error making blockfile: %w", err)
}
}
err = filestore.WFS.AppendData(ctx, zoneId, fileName, dataBuf)
if errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("NOTFOUND: %w", err)
}
if err != nil {
return fmt.Errorf("error writing to blockfile: %w", err)
}
wps.Broker.Publish(wps.WaveEvent{
Event: wps.Event_BlockFile,
Scopes: []string{waveobj.MakeORef(waveobj.OType_Block, zoneId).String()},
Data: &wps.WSFileEventData{
ZoneId: zoneId,
FileName: fileName,
FileOp: wps.FileOp_Invalidate,
},
})
return nil
}
// WaveFile does not support directories, only prefix-based listing
func (c WaveClient) Mkdir(ctx context.Context, conn *connparse.Connection) error {
return errors.ErrUnsupported
}
func (c WaveClient) MoveInternal(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error {
if srcConn.Host != destConn.Host {
return fmt.Errorf("move internal, src and dest hosts do not match")
}
isDir, err := c.CopyInternal(ctx, srcConn, destConn, opts)
if err != nil {
return fmt.Errorf("error copying blockfile: %w", err)
}
recursive := opts != nil && opts.Recursive && isDir
if err := c.Delete(ctx, srcConn, recursive); err != nil {
return fmt.Errorf("error deleting blockfile: %w", err)
}
return nil
}
func (c WaveClient) CopyInternal(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) (bool, error) {
return fsutil.PrefixCopyInternal(ctx, srcConn, destConn, c, opts, func(ctx context.Context, zoneId, prefix string) ([]string, error) {
entryList := make([]string, 0)
if err := listFilesPrefix(ctx, zoneId, prefix, func(wf *filestore.WaveFile) error {
entryList = append(entryList, wf.Name)
return nil
}); err != nil {
return nil, err
}
return entryList, nil
}, func(ctx context.Context, srcPath, destPath string) error {
srcHost := srcConn.Host
srcFileName := strings.TrimPrefix(srcPath, srcHost+fspath.Separator)
destHost := destConn.Host
destFileName := strings.TrimPrefix(destPath, destHost+fspath.Separator)
_, dataBuf, err := filestore.WFS.ReadFile(ctx, srcHost, srcFileName)
if err != nil {
return fmt.Errorf("error reading source blockfile: %w", err)
}
if err := filestore.WFS.WriteFile(ctx, destHost, destFileName, dataBuf); err != nil {
return fmt.Errorf("error writing to destination blockfile: %w", err)
}
wps.Broker.Publish(wps.WaveEvent{
Event: wps.Event_BlockFile,
Scopes: []string{waveobj.MakeORef(waveobj.OType_Block, destHost).String()},
Data: &wps.WSFileEventData{
ZoneId: destHost,
FileName: destFileName,
FileOp: wps.FileOp_Invalidate,
},
})
return nil
})
}
func (c WaveClient) CopyRemote(ctx context.Context, srcConn, destConn *connparse.Connection, srcClient fstype.FileShareClient, opts *wshrpc.FileCopyOpts) (bool, error) {
if srcConn.Scheme == connparse.ConnectionTypeWave && destConn.Scheme == connparse.ConnectionTypeWave {
return c.CopyInternal(ctx, srcConn, destConn, opts)
}
zoneId := destConn.Host
if zoneId == "" {
return false, fmt.Errorf("zoneid not found in connection")
}
return fsutil.PrefixCopyRemote(ctx, srcConn, destConn, srcClient, c, func(zoneId, path string, size int64, reader io.Reader) error {
dataBuf := make([]byte, size)
if _, err := reader.Read(dataBuf); err != nil {
if !errors.Is(err, io.EOF) {
return fmt.Errorf("error reading tar data: %w", err)
}
}
if _, err := filestore.WFS.Stat(ctx, zoneId, path); err != nil {
if !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("error getting blockfile info: %w", err)
} else {
if err := filestore.WFS.MakeFile(ctx, zoneId, path, wshrpc.FileMeta{}, wshrpc.FileOpts{}); err != nil {
return fmt.Errorf("error making blockfile: %w", err)
}
}
}
if err := filestore.WFS.WriteFile(ctx, zoneId, path, dataBuf); err != nil {
return fmt.Errorf("error writing to blockfile: %w", err)
}
wps.Broker.Publish(wps.WaveEvent{
Event: wps.Event_BlockFile,
Scopes: []string{waveobj.MakeORef(waveobj.OType_Block, zoneId).String()},
Data: &wps.WSFileEventData{
ZoneId: zoneId,
FileName: path,
FileOp: wps.FileOp_Invalidate,
},
})
return nil
}, opts)
}
func (c WaveClient) Delete(ctx context.Context, conn *connparse.Connection, recursive bool) error {
zoneId := conn.Host
if zoneId == "" {
return fmt.Errorf("zoneid not found in connection")
}
prefix := conn.Path
finfo, err := c.Stat(ctx, conn)
exists := err == nil && !finfo.NotFound
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("error getting file info: %w", err)
}
if !exists {
return nil
}
pathsToDelete := make([]string, 0)
if finfo.IsDir {
if !recursive {
return fmt.Errorf("%v is not empty, use recursive flag to delete", prefix)
}
if !strings.HasSuffix(prefix, fspath.Separator) {
prefix += fspath.Separator
}
if err := listFilesPrefix(ctx, zoneId, prefix, func(wf *filestore.WaveFile) error {
pathsToDelete = append(pathsToDelete, wf.Name)
return nil
}); err != nil {
return fmt.Errorf("error listing blockfiles: %w", err)
}
} else {
pathsToDelete = append(pathsToDelete, prefix)
}
if len(pathsToDelete) > 0 {
errs := make([]error, 0)
for _, entry := range pathsToDelete {
if err := filestore.WFS.DeleteFile(ctx, zoneId, entry); err != nil {
errs = append(errs, fmt.Errorf("error deleting blockfile %s/%s: %w", zoneId, entry, err))
continue
}
wps.Broker.Publish(wps.WaveEvent{
Event: wps.Event_BlockFile,
Scopes: []string{waveobj.MakeORef(waveobj.OType_Block, zoneId).String()},
Data: &wps.WSFileEventData{
ZoneId: zoneId,
FileName: entry,
FileOp: wps.FileOp_Delete,
},
})
}
if len(errs) > 0 {
return fmt.Errorf("error deleting blockfiles: %v", errs)
}
}
return nil
}
func listFilesPrefix(ctx context.Context, zoneId, prefix string, entryCallback func(*filestore.WaveFile) error) error {
if zoneId == "" {
return fmt.Errorf("zoneid not found in connection")
}
fileListOrig, err := filestore.WFS.ListFiles(ctx, zoneId)
if err != nil {
return fmt.Errorf("error listing blockfiles: %w", err)
}
for _, wf := range fileListOrig {
if prefix == "" || strings.HasPrefix(wf.Name, prefix) {
entryCallback(wf)
}
}
return nil
}
func (c WaveClient) Join(ctx context.Context, conn *connparse.Connection, parts ...string) (*wshrpc.FileInfo, error) {
newPath := fspath.Join(append([]string{conn.Path}, parts...)...)
newPath, err := cleanPath(newPath)
if err != nil {
return nil, fmt.Errorf("error cleaning path: %w", err)
}
conn.Path = newPath
return c.Stat(ctx, conn)
}
func (c WaveClient) GetCapability() wshrpc.FileShareCapability {
return wshrpc.FileShareCapability{
CanAppend: true,
CanMkdir: false,
}
}
func cleanPath(path string) (string, error) {
if path == "" || path == fspath.Separator {
return "", nil
}
if strings.HasPrefix(path, fspath.Separator) {
path = path[1:]
}
if strings.HasPrefix(path, "~") || strings.HasPrefix(path, ".") || strings.HasPrefix(path, "..") {
return "", fmt.Errorf("wavefile path cannot start with ~, ., or ..")
}
var newParts []string
for _, part := range strings.Split(path, fspath.Separator) {
if part == ".." {
if len(newParts) > 0 {
newParts = newParts[:len(newParts)-1]
}
} else if part != "." {
newParts = append(newParts, part)
}
}
return fspath.Join(newParts...), nil
}
func (c WaveClient) GetConnectionType() string {
return connparse.ConnectionTypeWave
}

View file

@ -5,34 +5,60 @@ package wshfs
import (
"context"
"encoding/base64"
"fmt"
"log"
"os"
"time"
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fstype"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fsutil"
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
"github.com/wavetermdev/waveterm/pkg/wshutil"
)
const (
RemoteFileTransferSizeLimit = 32 * 1024 * 1024
DefaultTimeout = 30 * time.Second
FileMode = os.FileMode(0644)
DirMode = os.FileMode(0755) | os.ModeDir
RecursiveRequiredError = "recursive flag must be set for directory operations"
MergeRequiredError = "directory already exists at %q, set overwrite flag to delete the existing contents or set merge flag to merge the contents"
OverwriteRequiredError = "file already exists at %q, set overwrite flag to delete the existing file"
)
// This needs to be set by whoever initializes the client, either main-server or wshcmd-connserver
var RpcClient *wshutil.WshRpc
type WshClient struct{}
var _ fstype.FileShareClient = WshClient{}
func NewWshClient() *WshClient {
return &WshClient{}
func parseConnection(ctx context.Context, path string) (*connparse.Connection, error) {
conn, err := connparse.ParseURIAndReplaceCurrentHost(ctx, path)
if err != nil {
return nil, fmt.Errorf("error parsing connection %s: %w", path, err)
}
return conn, nil
}
func (c WshClient) Read(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) (*wshrpc.FileData, error) {
rtnCh := c.ReadStream(ctx, conn, data)
func Read(ctx context.Context, data wshrpc.FileData) (*wshrpc.FileData, error) {
log.Printf("Read: %v", data.Info.Path)
conn, err := parseConnection(ctx, data.Info.Path)
if err != nil {
return nil, err
}
rtnCh := readStream(conn, data)
return fsutil.ReadStreamToFileData(ctx, rtnCh)
}
func (c WshClient) ReadStream(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData] {
func ReadStream(ctx context.Context, data wshrpc.FileData) <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData] {
log.Printf("ReadStream: %v", data.Info.Path)
conn, err := parseConnection(ctx, data.Info.Path)
if err != nil {
return wshutil.SendErrCh[wshrpc.FileData](err)
}
return readStream(conn, data)
}
func readStream(conn *connparse.Connection, data wshrpc.FileData) <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData] {
byteRange := ""
if data.At != nil && data.At.Size > 0 {
byteRange = fmt.Sprintf("%d-%d", data.At.Offset, data.At.Offset+int64(data.At.Size))
@ -41,17 +67,14 @@ func (c WshClient) ReadStream(ctx context.Context, conn *connparse.Connection, d
return wshclient.RemoteStreamFileCommand(RpcClient, streamFileData, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
}
func (c WshClient) ReadTarStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileCopyOpts) <-chan wshrpc.RespOrErrorUnion[iochantypes.Packet] {
timeout := opts.Timeout
if timeout == 0 {
timeout = fstype.DefaultTimeout.Milliseconds()
func ListEntries(ctx context.Context, path string, opts *wshrpc.FileListOpts) ([]*wshrpc.FileInfo, error) {
log.Printf("ListEntries: %v", path)
conn, err := parseConnection(ctx, path)
if err != nil {
return nil, err
}
return wshclient.RemoteTarStreamCommand(RpcClient, wshrpc.CommandRemoteStreamTarData{Path: conn.Path, Opts: opts}, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host), Timeout: timeout})
}
func (c WshClient) ListEntries(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) ([]*wshrpc.FileInfo, error) {
var entries []*wshrpc.FileInfo
rtnCh := c.ListEntriesStream(ctx, conn, opts)
rtnCh := listEntriesStream(conn, opts)
for respUnion := range rtnCh {
if respUnion.Error != nil {
return nil, respUnion.Error
@ -62,15 +85,42 @@ func (c WshClient) ListEntries(ctx context.Context, conn *connparse.Connection,
return entries, nil
}
func (c WshClient) ListEntriesStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
func ListEntriesStream(ctx context.Context, path string, opts *wshrpc.FileListOpts) <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
log.Printf("ListEntriesStream: %v", path)
conn, err := parseConnection(ctx, path)
if err != nil {
return wshutil.SendErrCh[wshrpc.CommandRemoteListEntriesRtnData](err)
}
return listEntriesStream(conn, opts)
}
func listEntriesStream(conn *connparse.Connection, opts *wshrpc.FileListOpts) <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
return wshclient.RemoteListEntriesCommand(RpcClient, wshrpc.CommandRemoteListEntriesData{Path: conn.Path, Opts: opts}, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
}
func (c WshClient) Stat(ctx context.Context, conn *connparse.Connection) (*wshrpc.FileInfo, error) {
func Stat(ctx context.Context, path string) (*wshrpc.FileInfo, error) {
log.Printf("Stat: %v", path)
conn, err := parseConnection(ctx, path)
if err != nil {
return nil, err
}
return stat(conn)
}
func stat(conn *connparse.Connection) (*wshrpc.FileInfo, error) {
return wshclient.RemoteFileInfoCommand(RpcClient, conn.Path, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
}
func (c WshClient) PutFile(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) error {
func PutFile(ctx context.Context, data wshrpc.FileData) error {
log.Printf("PutFile: %v", data.Info.Path)
conn, err := parseConnection(ctx, data.Info.Path)
if err != nil {
return err
}
dataSize := base64.StdEncoding.DecodedLen(len(data.Data64))
if dataSize > RemoteFileTransferSizeLimit {
return fmt.Errorf("file data size %d exceeds transfer limit of %d bytes", dataSize, RemoteFileTransferSizeLimit)
}
info := data.Info
if info == nil {
info = &wshrpc.FileInfo{Opts: &wshrpc.FileOpts{}}
@ -83,7 +133,16 @@ func (c WshClient) PutFile(ctx context.Context, conn *connparse.Connection, data
return wshclient.RemoteWriteFileCommand(RpcClient, data, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
}
func (c WshClient) AppendFile(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) error {
func Append(ctx context.Context, data wshrpc.FileData) error {
log.Printf("Append: %v", data.Info.Path)
conn, err := parseConnection(ctx, data.Info.Path)
if err != nil {
return err
}
dataSize := base64.StdEncoding.DecodedLen(len(data.Data64))
if dataSize > RemoteFileTransferSizeLimit {
return fmt.Errorf("file data size %d exceeds transfer limit of %d bytes", dataSize, RemoteFileTransferSizeLimit)
}
info := data.Info
if info == nil {
info = &wshrpc.FileInfo{Path: conn.Path, Opts: &wshrpc.FileOpts{}}
@ -96,11 +155,80 @@ func (c WshClient) AppendFile(ctx context.Context, conn *connparse.Connection, d
return wshclient.RemoteWriteFileCommand(RpcClient, data, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
}
func (c WshClient) Mkdir(ctx context.Context, conn *connparse.Connection) error {
func Mkdir(ctx context.Context, path string) error {
log.Printf("Mkdir: %v", path)
conn, err := parseConnection(ctx, path)
if err != nil {
return err
}
return wshclient.RemoteMkdirCommand(RpcClient, conn.Path, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
}
func (c WshClient) MoveInternal(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error {
func Move(ctx context.Context, data wshrpc.CommandFileCopyData) error {
opts := data.Opts
if opts == nil {
opts = &wshrpc.FileCopyOpts{}
}
log.Printf("Move: srcuri: %v, desturi: %v, opts: %v", data.SrcUri, data.DestUri, opts)
srcConn, err := parseConnection(ctx, data.SrcUri)
if err != nil {
return fmt.Errorf("error parsing source connection: %w", err)
}
destConn, err := parseConnection(ctx, data.DestUri)
if err != nil {
return fmt.Errorf("error parsing destination connection: %w", err)
}
if srcConn.Host != destConn.Host {
isDir, err := copyInternal(srcConn, destConn, opts)
if err != nil {
return fmt.Errorf("cannot copy %q to %q: %w", data.SrcUri, data.DestUri, err)
}
return delete_(srcConn, opts.Recursive && isDir)
}
return moveInternal(srcConn, destConn, opts)
}
func Copy(ctx context.Context, data wshrpc.CommandFileCopyData) error {
opts := data.Opts
if opts == nil {
opts = &wshrpc.FileCopyOpts{}
}
log.Printf("Copy: srcuri: %v, desturi: %v, opts: %v", data.SrcUri, data.DestUri, opts)
srcConn, err := parseConnection(ctx, data.SrcUri)
if err != nil {
return fmt.Errorf("error parsing source connection: %w", err)
}
destConn, err := parseConnection(ctx, data.DestUri)
if err != nil {
return fmt.Errorf("error parsing destination connection: %w", err)
}
_, err = copyInternal(srcConn, destConn, opts)
return err
}
func Delete(ctx context.Context, data wshrpc.CommandDeleteFileData) error {
log.Printf("Delete: %v", data)
conn, err := parseConnection(ctx, data.Path)
if err != nil {
return err
}
return delete_(conn, data.Recursive)
}
func delete_(conn *connparse.Connection, recursive bool) error {
return wshclient.RemoteFileDeleteCommand(RpcClient, wshrpc.CommandDeleteFileData{Path: conn.Path, Recursive: recursive}, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
}
func Join(ctx context.Context, path string, parts ...string) (*wshrpc.FileInfo, error) {
log.Printf("Join: %v", path)
conn, err := parseConnection(ctx, path)
if err != nil {
return nil, err
}
return wshclient.RemoteFileJoinCommand(RpcClient, append([]string{conn.Path}, parts...), &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
}
func moveInternal(srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error {
if srcConn.Host != destConn.Host {
return fmt.Errorf("move internal, src and dest hosts do not match")
}
@ -109,38 +237,18 @@ func (c WshClient) MoveInternal(ctx context.Context, srcConn, destConn *connpars
}
timeout := opts.Timeout
if timeout == 0 {
timeout = fstype.DefaultTimeout.Milliseconds()
timeout = DefaultTimeout.Milliseconds()
}
return wshclient.RemoteFileMoveCommand(RpcClient, wshrpc.CommandFileCopyData{SrcUri: srcConn.GetFullURI(), DestUri: destConn.GetFullURI(), Opts: opts}, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(destConn.Host), Timeout: timeout})
}
func (c WshClient) CopyRemote(ctx context.Context, srcConn, destConn *connparse.Connection, _ fstype.FileShareClient, opts *wshrpc.FileCopyOpts) (bool, error) {
return c.CopyInternal(ctx, srcConn, destConn, opts)
}
func (c WshClient) CopyInternal(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) (bool, error) {
func copyInternal(srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) (bool, error) {
if opts == nil {
opts = &wshrpc.FileCopyOpts{}
}
timeout := opts.Timeout
if timeout == 0 {
timeout = fstype.DefaultTimeout.Milliseconds()
timeout = DefaultTimeout.Milliseconds()
}
return wshclient.RemoteFileCopyCommand(RpcClient, wshrpc.CommandFileCopyData{SrcUri: srcConn.GetFullURI(), DestUri: destConn.GetFullURI(), Opts: opts}, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(destConn.Host), Timeout: timeout})
}
func (c WshClient) Delete(ctx context.Context, conn *connparse.Connection, recursive bool) error {
return wshclient.RemoteFileDeleteCommand(RpcClient, wshrpc.CommandDeleteFileData{Path: conn.Path, Recursive: recursive}, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
}
func (c WshClient) Join(ctx context.Context, conn *connparse.Connection, parts ...string) (*wshrpc.FileInfo, error) {
return wshclient.RemoteFileJoinCommand(RpcClient, append([]string{conn.Path}, parts...), &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
}
func (c WshClient) GetConnectionType() string {
return connparse.ConnectionTypeWsh
}
func (c WshClient) GetCapability() wshrpc.FileShareCapability {
return wshrpc.FileShareCapability{CanAppend: true, CanMkdir: true}
}

View file

@ -14,8 +14,6 @@ import (
"sync"
"time"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
"golang.org/x/sync/singleflight"
)
@ -132,64 +130,6 @@ type DirEntryResult struct {
Err error
}
func listS3Directory(ctx context.Context, widgetId string, conn string, dir string, maxFiles int) (<-chan DirEntryResult, error) {
if !strings.HasPrefix(conn, "aws:") {
return nil, fmt.Errorf("invalid S3 connection: %s", conn)
}
key := widgetId + "|" + dir
if cached, ok := getCache(key); ok {
ch := make(chan DirEntryResult, ListDirChanSize)
go func() {
defer close(ch)
for _, r := range cached {
select {
case ch <- r:
case <-ctx.Done():
return
}
}
}()
return ch, nil
}
// Ensure only one operation populates the cache for this key.
value, err, _ := group.Do(key, func() (interface{}, error) {
path := conn + ":s3://" + dir
entries, err := wshclient.FileListCommand(wshclient.GetBareRpcClient(), wshrpc.FileListData{Path: path, Opts: &wshrpc.FileListOpts{Limit: maxFiles}}, nil)
if err != nil {
return nil, err
}
var results []DirEntryResult
for _, entry := range entries {
mockEntry := &MockDirEntry{
NameStr: entry.Name,
IsDirVal: entry.IsDir,
FileMode: entry.Mode,
}
results = append(results, DirEntryResult{Entry: mockEntry})
}
return results, nil
})
if err != nil {
return nil, err
}
results := value.([]DirEntryResult)
setCache(key, results)
ch := make(chan DirEntryResult, ListDirChanSize)
go func() {
defer close(ch)
for _, r := range results {
select {
case ch <- r:
case <-ctx.Done():
return
}
}
}()
return ch, nil
}
func listDirectory(ctx context.Context, widgetId string, dir string, maxFiles int) (<-chan DirEntryResult, error) {
key := widgetId + "|" + dir
if cached, ok := getCache(key); ok {

View file

@ -373,17 +373,9 @@ func fetchFileSuggestions(ctx context.Context, data wshrpc.FetchSuggestionsData)
listingCtx, cancelFn := context.WithCancel(ctx)
defer cancelFn()
var entriesCh <-chan DirEntryResult
if strings.HasPrefix(data.FileConnection, "aws:") {
entriesCh, err = listS3Directory(listingCtx, data.WidgetId, data.FileConnection, baseDir, 1000)
if err != nil {
return nil, fmt.Errorf("error listing S3 directory: %w", err)
}
} else {
entriesCh, err = listDirectory(listingCtx, data.WidgetId, baseDir, 1000)
if err != nil {
return nil, fmt.Errorf("error listing directory: %w", err)
}
entriesCh, err := listDirectory(listingCtx, data.WidgetId, baseDir, 1000)
if err != nil {
return nil, fmt.Errorf("error listing directory: %w", err)
}
const maxEntries = MaxSuggestions // top-k entries

View file

@ -1,151 +0,0 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
package colprint
import (
"fmt"
"io"
)
// formatFn is a function that converts a value of type T to its string representation
type formatFn[T any] func(T) (string, error)
type formatFnArray[T any] func(T) ([]string, error)
func PrintColumnsArray[T any](values <-chan T, numCols int, sampleSize int, format formatFnArray[T], w io.Writer) error {
// Get first batch and determine column width
maxLen := 0
var samples []T
for v := range values {
samples = append(samples, v)
str, err := format(v)
if err != nil {
return err
}
for _, s := range str {
if len(s) > maxLen {
maxLen = len(s)
}
}
if len(samples) >= sampleSize {
break
}
}
colWidth := maxLen + 2 // Add minimum padding
if colWidth < 1 {
colWidth = 1
}
// Print in columns using our determined width
col := 0
for _, v := range samples {
str, err := format(v)
if err != nil {
return err
}
for _, s := range str {
if err := printColHelper(s, colWidth, &col, numCols, w); err != nil {
return err
}
}
}
// Continue with any remaining values
for v := range values {
str, err := format(v)
if err != nil {
return err
}
for _, s := range str {
if err := printColHelper(s, colWidth, &col, numCols, w); err != nil {
return err
}
}
}
if col > 0 {
if _, err := fmt.Fprint(w, "\n"); err != nil {
return err
}
}
return nil
}
// PrintColumns prints values in columns, adapting to long values by letting them span multiple columns
func PrintColumns[T any](values <-chan T, numCols int, sampleSize int, format formatFn[T], w io.Writer) error {
// Get first batch and determine column width
maxLen := 0
var samples []T
for v := range values {
samples = append(samples, v)
str, err := format(v)
if err != nil {
return err
}
if len(str) > maxLen {
maxLen = len(str)
}
if len(samples) >= sampleSize {
break
}
}
colWidth := maxLen + 2 // Add minimum padding
if colWidth < 1 {
colWidth = 1
}
// Print in columns using our determined width
col := 0
for _, v := range samples {
str, err := format(v)
if err != nil {
return err
}
if err := printColHelper(str, colWidth, &col, numCols, w); err != nil {
return err
}
}
// Continue with any remaining values
for v := range values {
str, err := format(v)
if err != nil {
return err
}
if err := printColHelper(str, colWidth, &col, numCols, w); err != nil {
return err
}
}
if col > 0 {
if _, err := fmt.Fprint(w, "\n"); err != nil {
return err
}
}
return nil
}
func printColHelper(str string, colWidth int, col *int, numCols int, w io.Writer) error {
nameColSpan := (len(str) + 1) / colWidth
if (len(str)+1)%colWidth != 0 {
nameColSpan++
}
if *col+nameColSpan > numCols {
if _, err := fmt.Fprint(w, "\n"); err != nil {
return err
}
*col = 0
}
if _, err := fmt.Fprintf(w, "%-*s", nameColSpan*colWidth, str); err != nil {
return err
}
*col += nameColSpan
return nil
}

View file

@ -1,142 +0,0 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
// Package tarcopy provides functions for copying files over a channel via a tar stream.
package tarcopy
import (
"archive/tar"
"context"
"errors"
"fmt"
"io"
"io/fs"
"log"
"path/filepath"
"strings"
"github.com/wavetermdev/waveterm/pkg/util/iochan"
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes"
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
)
const (
tarCopySrcName = "TarCopySrc"
tarCopyDestName = "TarCopyDest"
pipeReaderName = "pipe reader"
pipeWriterName = "pipe writer"
tarWriterName = "tar writer"
// custom flag to indicate that the source is a single file
SingleFile = "singlefile"
)
// TarCopySrc creates a tar stream writer and returns a channel to send the tar stream to.
// writeHeader is a function that writes the tar header for the file. If only a single file is being written, the singleFile flag should be set to true.
// writer is the tar writer to write the file data to.
// close is a function that closes the tar writer and internal pipe writer.
func TarCopySrc(ctx context.Context, pathPrefix string) (outputChan chan wshrpc.RespOrErrorUnion[iochantypes.Packet], writeHeader func(fi fs.FileInfo, file string, singleFile bool) error, writer io.Writer, close func()) {
pipeReader, pipeWriter := io.Pipe()
tarWriter := tar.NewWriter(pipeWriter)
rtnChan := iochan.ReaderChan(ctx, pipeReader, wshrpc.FileChunkSize, func() {
log.Printf("Closing pipe reader\n")
utilfn.GracefulClose(pipeReader, tarCopySrcName, pipeReaderName)
})
singleFileFlagSet := false
return rtnChan, func(fi fs.FileInfo, path string, singleFile bool) error {
// generate tar header
header, err := tar.FileInfoHeader(fi, path)
if err != nil {
return err
}
if singleFile {
if singleFileFlagSet {
return errors.New("attempting to write multiple files to a single file tar stream")
}
header.PAXRecords = map[string]string{SingleFile: "true"}
singleFileFlagSet = true
}
path, err = fixPath(path, pathPrefix)
if err != nil {
return err
}
// skip if path is empty, which means the file is the root directory
if path == "" {
return nil
}
header.Name = path
// write header
if err := tarWriter.WriteHeader(header); err != nil {
return err
}
return nil
}, tarWriter, func() {
log.Printf("Closing tar writer\n")
utilfn.GracefulClose(tarWriter, tarCopySrcName, tarWriterName)
utilfn.GracefulClose(pipeWriter, tarCopySrcName, pipeWriterName)
}
}
func fixPath(path, prefix string) (string, error) {
path = strings.TrimPrefix(strings.TrimPrefix(filepath.Clean(strings.TrimPrefix(path, prefix)), "/"), "\\")
if strings.Contains(path, "..") {
return "", fmt.Errorf("invalid tar path containing directory traversal: %s", path)
}
return path, nil
}
// TarCopyDest reads a tar stream from a channel and writes the files to the destination.
// readNext is a function that is called for each file in the tar stream to read the file data. If only a single file is being written from the tar src, the singleFile flag will be set in this callback. It should return an error if the file cannot be read.
// The function returns an error if the tar stream cannot be read.
func TarCopyDest(ctx context.Context, cancel context.CancelCauseFunc, ch <-chan wshrpc.RespOrErrorUnion[iochantypes.Packet], readNext func(next *tar.Header, reader *tar.Reader, singleFile bool) error) error {
pipeReader, pipeWriter := io.Pipe()
iochan.WriterChan(ctx, pipeWriter, ch, func() {
utilfn.GracefulClose(pipeWriter, tarCopyDestName, pipeWriterName)
}, cancel)
tarReader := tar.NewReader(pipeReader)
defer func() {
if !utilfn.GracefulClose(pipeReader, tarCopyDestName, pipeReaderName) {
// If the pipe reader cannot be closed, cancel the context. This should kill the writer goroutine.
cancel(nil)
}
}()
for {
select {
case <-ctx.Done():
if ctx.Err() != nil {
return context.Cause(ctx)
}
return nil
default:
next, err := tarReader.Next()
if err != nil {
// Do one more check for context error before returning
if ctx.Err() != nil {
return context.Cause(ctx)
}
if errors.Is(err, io.EOF) {
return nil
} else {
return err
}
}
// Check for directory traversal
if strings.Contains(next.Name, "..") {
return fmt.Errorf("invalid tar path containing directory traversal: %s", next.Name)
}
err = readNext(next, tarReader, next.PAXRecords != nil && next.PAXRecords[SingleFile] == "true")
if err != nil {
return err
}
}
}
}

View file

@ -1,37 +0,0 @@
package wavefileutil
import (
"fmt"
"github.com/wavetermdev/waveterm/pkg/filestore"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fsutil"
"github.com/wavetermdev/waveterm/pkg/util/fileutil"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
)
const (
WaveFilePathPattern = "wavefile://%s/%s"
)
func WaveFileToFileInfo(wf *filestore.WaveFile) *wshrpc.FileInfo {
path := fmt.Sprintf(WaveFilePathPattern, wf.ZoneId, wf.Name)
rtn := &wshrpc.FileInfo{
Path: path,
Dir: fsutil.GetParentPathString(path),
Name: wf.Name,
Opts: &wf.Opts,
Size: wf.Size,
Meta: &wf.Meta,
SupportsMkdir: false,
}
fileutil.AddMimeTypeToFileInfo(path, rtn)
return rtn
}
func WaveFileListToFileInfoList(wfList []*filestore.WaveFile) []*wshrpc.FileInfo {
var fileInfoList []*wshrpc.FileInfo
for _, wf := range wfList {
fileInfoList = append(fileInfoList, WaveFileToFileInfo(wf))
}
return fileInfoList
}

View file

@ -25,7 +25,7 @@ import (
"github.com/wavetermdev/waveterm/pkg/authkey"
"github.com/wavetermdev/waveterm/pkg/filestore"
"github.com/wavetermdev/waveterm/pkg/panichandler"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/wshfs"
"github.com/wavetermdev/waveterm/pkg/schema"
"github.com/wavetermdev/waveterm/pkg/service"
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
@ -331,25 +331,22 @@ func handleStreamLocalFile(w http.ResponseWriter, r *http.Request) {
}
func handleStreamFile(w http.ResponseWriter, r *http.Request) {
conn := r.URL.Query().Get("connection")
if conn == "" {
conn = wshrpc.LocalConnName
}
path := r.URL.Query().Get("path")
if path == "" {
http.Error(w, "path is required", http.StatusBadRequest)
return
}
no404 := r.URL.Query().Get("no404")
// path should already be formatted as a wsh:// URI (e.g. wsh://local/path or wsh://connection/path)
data := wshrpc.FileData{
Info: &wshrpc.FileInfo{
Path: path,
},
}
rtnCh := fileshare.ReadStream(r.Context(), data)
rtnCh := wshfs.ReadStream(r.Context(), data)
err := handleRemoteStreamFileFromCh(w, r, path, rtnCh, nil, no404 != "")
if err != nil {
log.Printf("error streaming file %q %q: %v\n", conn, path, err)
log.Printf("error streaming file %q: %v\n", path, err)
http.Error(w, fmt.Sprintf("error streaming file: %v", err), http.StatusInternalServerError)
}
}

View file

@ -13,7 +13,6 @@ import (
"github.com/wavetermdev/waveterm/pkg/waveobj"
"github.com/wavetermdev/waveterm/pkg/wps"
"github.com/wavetermdev/waveterm/pkg/vdom"
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes"
"github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes"
)
@ -113,12 +112,6 @@ func ConnListCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) ([]string, error)
return resp, err
}
// command "connlistaws", wshserver.ConnListAWSCommand
func ConnListAWSCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) ([]string, error) {
resp, err := sendRpcRequestCallHelper[[]string](w, "connlistaws", nil, opts)
return resp, err
}
// command "connreinstallwsh", wshserver.ConnReinstallWshCommand
func ConnReinstallWshCommand(w *wshutil.WshRpc, data wshrpc.ConnExtData, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "connreinstallwsh", data, opts)
@ -143,6 +136,12 @@ func ConnUpdateWshCommand(w *wshutil.WshRpc, data wshrpc.RemoteInfo, opts *wshrp
return resp, err
}
// command "controlgetrouteid", wshserver.ControlGetRouteIdCommand
func ControlGetRouteIdCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) (string, error) {
resp, err := sendRpcRequestCallHelper[string](w, "controlgetrouteid", nil, opts)
return resp, err
}
// command "controllerappendoutput", wshserver.ControllerAppendOutputCommand
func ControllerAppendOutputCommand(w *wshutil.WshRpc, data wshrpc.CommandControllerAppendOutputData, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "controllerappendoutput", data, opts)
@ -281,12 +280,6 @@ func FileAppendCommand(w *wshutil.WshRpc, data wshrpc.FileData, opts *wshrpc.Rpc
return err
}
// command "fileappendijson", wshserver.FileAppendIJsonCommand
func FileAppendIJsonCommand(w *wshutil.WshRpc, data wshrpc.CommandAppendIJsonData, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "fileappendijson", data, opts)
return err
}
// command "filecopy", wshserver.FileCopyCommand
func FileCopyCommand(w *wshutil.WshRpc, data wshrpc.CommandFileCopyData, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "filecopy", data, opts)
@ -357,17 +350,6 @@ func FileRestoreBackupCommand(w *wshutil.WshRpc, data wshrpc.CommandFileRestoreB
return err
}
// command "filesharecapability", wshserver.FileShareCapabilityCommand
func FileShareCapabilityCommand(w *wshutil.WshRpc, data string, opts *wshrpc.RpcOpts) (wshrpc.FileShareCapability, error) {
resp, err := sendRpcRequestCallHelper[wshrpc.FileShareCapability](w, "filesharecapability", data, opts)
return resp, err
}
// command "filestreamtar", wshserver.FileStreamTarCommand
func FileStreamTarCommand(w *wshutil.WshRpc, data wshrpc.CommandRemoteStreamTarData, opts *wshrpc.RpcOpts) chan wshrpc.RespOrErrorUnion[iochantypes.Packet] {
return sendRpcRequestResponseStreamHelper[iochantypes.Packet](w, "filestreamtar", data, opts)
}
// command "filewrite", wshserver.FileWriteCommand
func FileWriteCommand(w *wshutil.WshRpc, data wshrpc.FileData, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "filewrite", data, opts)
@ -386,6 +368,12 @@ func FocusWindowCommand(w *wshutil.WshRpc, data string, opts *wshrpc.RpcOpts) er
return err
}
// command "getallvars", wshserver.GetAllVarsCommand
func GetAllVarsCommand(w *wshutil.WshRpc, data wshrpc.CommandVarData, opts *wshrpc.RpcOpts) ([]wshrpc.CommandVarResponseData, error) {
resp, err := sendRpcRequestCallHelper[[]wshrpc.CommandVarResponseData](w, "getallvars", data, opts)
return resp, err
}
// command "getbuilderoutput", wshserver.GetBuilderOutputCommand
func GetBuilderOutputCommand(w *wshutil.WshRpc, data string, opts *wshrpc.RpcOpts) ([]string, error) {
resp, err := sendRpcRequestCallHelper[[]string](w, "getbuilderoutput", data, opts)
@ -719,11 +707,6 @@ func RemoteStreamFileCommand(w *wshutil.WshRpc, data wshrpc.CommandRemoteStreamF
return sendRpcRequestResponseStreamHelper[wshrpc.FileData](w, "remotestreamfile", data, opts)
}
// command "remotetarstream", wshserver.RemoteTarStreamCommand
func RemoteTarStreamCommand(w *wshutil.WshRpc, data wshrpc.CommandRemoteStreamTarData, opts *wshrpc.RpcOpts) chan wshrpc.RespOrErrorUnion[iochantypes.Packet] {
return sendRpcRequestResponseStreamHelper[iochantypes.Packet](w, "remotetarstream", data, opts)
}
// command "remoteterminatejobmanager", wshserver.RemoteTerminateJobManagerCommand
func RemoteTerminateJobManagerCommand(w *wshutil.WshRpc, data wshrpc.CommandRemoteTerminateJobManagerData, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "remoteterminatejobmanager", data, opts)
@ -929,6 +912,12 @@ func WaveAIToolApproveCommand(w *wshutil.WshRpc, data wshrpc.CommandWaveAIToolAp
return err
}
// command "wavefilereadstream", wshserver.WaveFileReadStreamCommand
func WaveFileReadStreamCommand(w *wshutil.WshRpc, data wshrpc.CommandWaveFileReadStreamData, opts *wshrpc.RpcOpts) (*wshrpc.WaveFileInfo, error) {
resp, err := sendRpcRequestCallHelper[*wshrpc.WaveFileInfo](w, "wavefilereadstream", data, opts)
return resp, err
}
// command "waveinfo", wshserver.WaveInfoCommand
func WaveInfoCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) (*wshrpc.WaveInfoData, error) {
resp, err := sendRpcRequestCallHelper[*wshrpc.WaveInfoData](w, "waveinfo", nil, opts)

View file

@ -4,7 +4,6 @@
package wshremote
import (
"archive/tar"
"context"
"encoding/base64"
"errors"
@ -18,11 +17,9 @@ import (
"time"
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fstype"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fsutil"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/wshfs"
"github.com/wavetermdev/waveterm/pkg/util/fileutil"
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes"
"github.com/wavetermdev/waveterm/pkg/util/tarcopy"
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
"github.com/wavetermdev/waveterm/pkg/wavebase"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
@ -30,6 +27,10 @@ import (
"github.com/wavetermdev/waveterm/pkg/wshutil"
)
const RemoteFileTransferSizeLimit = 32 * 1024 * 1024
var DisableRecursiveFileOpts = true
type ByteRangeType struct {
All bool
Start int64
@ -153,6 +154,9 @@ func (impl *ServerImpl) remoteStreamFileInternal(ctx context.Context, data wshrp
if finfo.IsDir {
return impl.remoteStreamFileDir(ctx, path, byteRange, dataCallback)
} else {
if finfo.Size > RemoteFileTransferSizeLimit {
return fmt.Errorf("file %q size %d exceeds transfer limit of %d bytes", path, finfo.Size, RemoteFileTransferSizeLimit)
}
return impl.remoteStreamFileRegular(ctx, path, byteRange, dataCallback)
}
}
@ -186,288 +190,155 @@ func (impl *ServerImpl) RemoteStreamFileCommand(ctx context.Context, data wshrpc
return ch
}
func (impl *ServerImpl) RemoteTarStreamCommand(ctx context.Context, data wshrpc.CommandRemoteStreamTarData) <-chan wshrpc.RespOrErrorUnion[iochantypes.Packet] {
path := data.Path
opts := data.Opts
if opts == nil {
opts = &wshrpc.FileCopyOpts{}
}
log.Printf("RemoteTarStreamCommand: path=%s\n", path)
srcHasSlash := strings.HasSuffix(path, "/")
path, err := wavebase.ExpandHomeDir(path)
if err != nil {
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf("cannot expand path %q: %w", path, err))
}
cleanedPath := filepath.Clean(wavebase.ExpandHomeDirSafe(path))
finfo, err := os.Stat(cleanedPath)
if err != nil {
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf("cannot stat file %q: %w", path, err))
// prepareDestForCopy resolves the final destination path and handles overwrite logic.
// destPath is the raw destination path (may be a directory or file path).
// srcBaseName is the basename of the source file (used when dest is a directory or ends with slash).
// destHasSlash indicates if the original URI ended with a slash (forcing directory interpretation).
// Returns the resolved path ready for writing.
func prepareDestForCopy(destPath string, srcBaseName string, destHasSlash bool, overwrite bool) (string, error) {
destInfo, err := os.Stat(destPath)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return "", fmt.Errorf("cannot stat destination %q: %w", destPath, err)
}
var pathPrefix string
singleFile := !finfo.IsDir()
if !singleFile && srcHasSlash {
pathPrefix = cleanedPath
destExists := destInfo != nil
destIsDir := destExists && destInfo.IsDir()
var finalPath string
if destHasSlash || destIsDir {
finalPath = filepath.Join(destPath, srcBaseName)
} else {
pathPrefix = filepath.Dir(cleanedPath)
finalPath = destPath
}
timeout := fstype.DefaultTimeout
if opts.Timeout > 0 {
timeout = time.Duration(opts.Timeout) * time.Millisecond
finalInfo, err := os.Stat(finalPath)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return "", fmt.Errorf("cannot stat file %q: %w", finalPath, err)
}
readerCtx, cancel := context.WithTimeout(ctx, timeout)
rtn, writeHeader, fileWriter, tarClose := tarcopy.TarCopySrc(readerCtx, pathPrefix)
go func() {
defer func() {
tarClose()
cancel()
}()
walkFunc := func(path string, info fs.FileInfo, err error) error {
if readerCtx.Err() != nil {
return readerCtx.Err()
}
if err != nil {
return err
}
if err = writeHeader(info, path, singleFile); err != nil {
return err
}
// if not a dir, write file content
if !info.IsDir() {
data, err := os.Open(path)
if err != nil {
return err
}
defer utilfn.GracefulClose(data, "RemoteTarStreamCommand", path)
if _, err := io.Copy(fileWriter, data); err != nil {
return err
}
}
return nil
if finalInfo != nil {
if !overwrite {
return "", fmt.Errorf(wshfs.OverwriteRequiredError, finalPath)
}
log.Printf("RemoteTarStreamCommand: starting\n")
err = nil
if singleFile {
err = walkFunc(cleanedPath, finfo, nil)
} else {
err = filepath.Walk(cleanedPath, walkFunc)
if err := os.Remove(finalPath); err != nil {
return "", fmt.Errorf("cannot remove file %q: %w", finalPath, err)
}
if err != nil {
rtn <- wshutil.RespErr[iochantypes.Packet](err)
}
log.Printf("RemoteTarStreamCommand: done\n")
}()
log.Printf("RemoteTarStreamCommand: returning channel\n")
return rtn
}
return finalPath, nil
}
// remoteCopyFileInternal copies FROM local (this host) TO local (this host)
// Only supports copying files, not directories
func remoteCopyFileInternal(srcUri, destUri string, srcPathCleaned, destPathCleaned string, destHasSlash bool, overwrite bool) error {
srcFileStat, err := os.Stat(srcPathCleaned)
if err != nil {
return fmt.Errorf("cannot stat file %q: %w", srcPathCleaned, err)
}
if srcFileStat.IsDir() {
return fmt.Errorf("copying directories is not supported")
}
if srcFileStat.Size() > RemoteFileTransferSizeLimit {
return fmt.Errorf("file %q size %d exceeds transfer limit of %d bytes", srcPathCleaned, srcFileStat.Size(), RemoteFileTransferSizeLimit)
}
destFilePath, err := prepareDestForCopy(destPathCleaned, filepath.Base(srcPathCleaned), destHasSlash, overwrite)
if err != nil {
return err
}
srcFile, err := os.Open(srcPathCleaned)
if err != nil {
return fmt.Errorf("cannot open file %q: %w", srcPathCleaned, err)
}
defer srcFile.Close()
destFile, err := os.OpenFile(destFilePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, srcFileStat.Mode())
if err != nil {
return fmt.Errorf("cannot create file %q: %w", destFilePath, err)
}
defer destFile.Close()
if _, err = io.Copy(destFile, srcFile); err != nil {
return fmt.Errorf("cannot copy %q to %q: %w", srcUri, destUri, err)
}
return nil
}
// RemoteFileCopyCommand copies a file FROM somewhere TO here
func (impl *ServerImpl) RemoteFileCopyCommand(ctx context.Context, data wshrpc.CommandFileCopyData) (bool, error) {
log.Printf("RemoteFileCopyCommand: src=%s, dest=%s\n", data.SrcUri, data.DestUri)
opts := data.Opts
if opts == nil {
opts = &wshrpc.FileCopyOpts{}
}
destUri := data.DestUri
srcUri := data.SrcUri
merge := opts.Merge
overwrite := opts.Overwrite
if overwrite && merge {
if opts.Overwrite && opts.Merge {
return false, fmt.Errorf("cannot specify both overwrite and merge")
}
destConn, err := connparse.ParseURIAndReplaceCurrentHost(ctx, destUri)
if opts.Recursive {
return false, fmt.Errorf("directory copying is not supported")
}
srcConn, err := connparse.ParseURIAndReplaceCurrentHost(ctx, data.SrcUri)
if err != nil {
return false, fmt.Errorf("cannot parse destination URI %q: %w", destUri, err)
return false, fmt.Errorf("cannot parse source URI %q: %w", data.SrcUri, err)
}
destConn, err := connparse.ParseURIAndReplaceCurrentHost(ctx, data.DestUri)
if err != nil {
return false, fmt.Errorf("cannot parse destination URI %q: %w", data.DestUri, err)
}
destPathCleaned := filepath.Clean(wavebase.ExpandHomeDirSafe(destConn.Path))
destinfo, err := os.Stat(destPathCleaned)
if err != nil {
if !errors.Is(err, fs.ErrNotExist) {
return false, fmt.Errorf("cannot stat destination %q: %w", destPathCleaned, err)
}
}
destHasSlash := strings.HasSuffix(data.DestUri, "/")
destExists := destinfo != nil
destIsDir := destExists && destinfo.IsDir()
destHasSlash := strings.HasSuffix(destUri, "/")
if destExists && !destIsDir {
if !overwrite {
return false, fmt.Errorf(fstype.OverwriteRequiredError, destPathCleaned)
} else {
err := os.Remove(destPathCleaned)
if err != nil {
return false, fmt.Errorf("cannot remove file %q: %w", destPathCleaned, err)
}
}
}
srcConn, err := connparse.ParseURIAndReplaceCurrentHost(ctx, srcUri)
if err != nil {
return false, fmt.Errorf("cannot parse source URI %q: %w", srcUri, err)
}
copyFileFunc := func(path string, finfo fs.FileInfo, srcFile io.Reader) (int64, error) {
nextinfo, err := os.Stat(path)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return 0, fmt.Errorf("cannot stat file %q: %w", path, err)
}
if nextinfo != nil {
if nextinfo.IsDir() {
if !finfo.IsDir() {
// try to create file in directory
path = filepath.Join(path, filepath.Base(finfo.Name()))
newdestinfo, err := os.Stat(path)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return 0, fmt.Errorf("cannot stat file %q: %w", path, err)
}
if newdestinfo != nil && !overwrite {
return 0, fmt.Errorf(fstype.OverwriteRequiredError, path)
}
} else if overwrite {
err := os.RemoveAll(path)
if err != nil {
return 0, fmt.Errorf("cannot remove directory %q: %w", path, err)
}
} else if !merge {
return 0, fmt.Errorf(fstype.MergeRequiredError, path)
}
} else {
if !overwrite {
return 0, fmt.Errorf(fstype.OverwriteRequiredError, path)
} else if finfo.IsDir() {
err := os.RemoveAll(path)
if err != nil {
return 0, fmt.Errorf("cannot remove directory %q: %w", path, err)
}
}
}
}
if finfo.IsDir() {
err := os.MkdirAll(path, finfo.Mode())
if err != nil {
return 0, fmt.Errorf("cannot create directory %q: %w", path, err)
}
return 0, nil
} else {
err := os.MkdirAll(filepath.Dir(path), 0755)
if err != nil {
return 0, fmt.Errorf("cannot create parent directory %q: %w", filepath.Dir(path), err)
}
}
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, finfo.Mode())
if err != nil {
return 0, fmt.Errorf("cannot create new file %q: %w", path, err)
}
defer utilfn.GracefulClose(file, "RemoteFileCopyCommand", path)
_, err = io.Copy(file, srcFile)
if err != nil {
return 0, fmt.Errorf("cannot write file %q: %w", path, err)
}
return finfo.Size(), nil
}
srcIsDir := false
if srcConn.Host == destConn.Host {
srcPathCleaned := filepath.Clean(wavebase.ExpandHomeDirSafe(srcConn.Path))
srcFileStat, err := os.Stat(srcPathCleaned)
if err != nil {
return false, fmt.Errorf("cannot stat file %q: %w", srcPathCleaned, err)
}
if srcFileStat.IsDir() {
srcIsDir = true
var srcPathPrefix string
if destIsDir {
srcPathPrefix = filepath.Dir(srcPathCleaned)
} else {
srcPathPrefix = srcPathCleaned
}
err = filepath.Walk(srcPathCleaned, func(path string, info fs.FileInfo, err error) error {
if err != nil {
return err
}
srcFilePath := path
destFilePath := filepath.Join(destPathCleaned, strings.TrimPrefix(path, srcPathPrefix))
var file *os.File
if !info.IsDir() {
file, err = os.Open(srcFilePath)
if err != nil {
return fmt.Errorf("cannot open file %q: %w", srcFilePath, err)
}
defer utilfn.GracefulClose(file, "RemoteFileCopyCommand", srcFilePath)
}
_, err = copyFileFunc(destFilePath, info, file)
return err
})
if err != nil {
return false, fmt.Errorf("cannot copy %q to %q: %w", srcUri, destUri, err)
}
} else {
file, err := os.Open(srcPathCleaned)
if err != nil {
return false, fmt.Errorf("cannot open file %q: %w", srcPathCleaned, err)
}
defer utilfn.GracefulClose(file, "RemoteFileCopyCommand", srcPathCleaned)
var destFilePath string
if destHasSlash {
destFilePath = filepath.Join(destPathCleaned, filepath.Base(srcPathCleaned))
} else {
destFilePath = destPathCleaned
}
_, err = copyFileFunc(destFilePath, srcFileStat, file)
if err != nil {
return false, fmt.Errorf("cannot copy %q to %q: %w", srcUri, destUri, err)
}
}
} else {
timeout := fstype.DefaultTimeout
if opts.Timeout > 0 {
timeout = time.Duration(opts.Timeout) * time.Millisecond
}
readCtx, cancel := context.WithCancelCause(ctx)
readCtx, timeoutCancel := context.WithTimeoutCause(readCtx, timeout, fmt.Errorf("timeout copying file %q to %q", srcUri, destUri))
defer timeoutCancel()
copyStart := time.Now()
ioch := wshclient.FileStreamTarCommand(wshfs.RpcClient, wshrpc.CommandRemoteStreamTarData{Path: srcUri, Opts: opts}, &wshrpc.RpcOpts{Timeout: opts.Timeout})
numFiles := 0
numSkipped := 0
totalBytes := int64(0)
err := tarcopy.TarCopyDest(readCtx, cancel, ioch, func(next *tar.Header, reader *tar.Reader, singleFile bool) error {
numFiles++
nextpath := filepath.Join(destPathCleaned, next.Name)
srcIsDir = !singleFile
if singleFile && !destHasSlash {
// custom flag to indicate that the source is a single file, not a directory the contents of a directory
nextpath = destPathCleaned
}
finfo := next.FileInfo()
n, err := copyFileFunc(nextpath, finfo, reader)
if err != nil {
return fmt.Errorf("cannot copy file %q: %w", next.Name, err)
}
totalBytes += n
return nil
})
if err != nil {
return false, fmt.Errorf("cannot copy %q to %q: %w", srcUri, destUri, err)
}
totalTime := time.Since(copyStart).Seconds()
totalMegaBytes := float64(totalBytes) / 1024 / 1024
rate := float64(0)
if totalTime > 0 {
rate = totalMegaBytes / totalTime
}
log.Printf("RemoteFileCopyCommand: done; %d files copied in %.3fs, total of %.4f MB, %.2f MB/s, %d files skipped\n", numFiles, totalTime, totalMegaBytes, rate, numSkipped)
err := remoteCopyFileInternal(data.SrcUri, data.DestUri, srcPathCleaned, destPathCleaned, destHasSlash, opts.Overwrite)
return false, err
}
return srcIsDir, nil
// FROM external TO here - only supports single file copying
timeout := wshfs.DefaultTimeout
if opts.Timeout > 0 {
timeout = time.Duration(opts.Timeout) * time.Millisecond
}
readCtx, timeoutCancel := context.WithTimeoutCause(ctx, timeout, fmt.Errorf("timeout copying file %q to %q", data.SrcUri, data.DestUri))
defer timeoutCancel()
copyStart := time.Now()
srcFileInfo, err := wshclient.RemoteFileInfoCommand(wshfs.RpcClient, srcConn.Path, &wshrpc.RpcOpts{Timeout: opts.Timeout, Route: wshutil.MakeConnectionRouteId(srcConn.Host)})
if err != nil {
return false, fmt.Errorf("cannot get info for source file %q: %w", data.SrcUri, err)
}
if srcFileInfo.IsDir {
return false, fmt.Errorf("copying directories is not supported")
}
if srcFileInfo.Size > RemoteFileTransferSizeLimit {
return false, fmt.Errorf("file %q size %d exceeds transfer limit of %d bytes", data.SrcUri, srcFileInfo.Size, RemoteFileTransferSizeLimit)
}
destFilePath, err := prepareDestForCopy(destPathCleaned, filepath.Base(srcConn.Path), destHasSlash, opts.Overwrite)
if err != nil {
return false, err
}
destFile, err := os.OpenFile(destFilePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, srcFileInfo.Mode)
if err != nil {
return false, fmt.Errorf("cannot create destination file %q: %w", destFilePath, err)
}
defer destFile.Close()
streamChan := wshclient.RemoteStreamFileCommand(wshfs.RpcClient, wshrpc.CommandRemoteStreamFileData{Path: srcConn.Path}, &wshrpc.RpcOpts{Timeout: opts.Timeout, Route: wshutil.MakeConnectionRouteId(srcConn.Host)})
if err = fsutil.ReadFileStreamToWriter(readCtx, streamChan, destFile); err != nil {
return false, fmt.Errorf("error copying file %q to %q: %w", data.SrcUri, data.DestUri, err)
}
totalTime := time.Since(copyStart).Seconds()
totalMegaBytes := float64(srcFileInfo.Size) / 1024 / 1024
rate := float64(0)
if totalTime > 0 {
rate = totalMegaBytes / totalTime
}
log.Printf("RemoteFileCopyCommand: done; 1 file copied in %.3fs, total of %.4f MB, %.2f MB/s\n", totalTime, totalMegaBytes, rate)
return false, nil
}
func (impl *ServerImpl) RemoteListEntriesCommand(ctx context.Context, data wshrpc.CommandRemoteListEntriesData) chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
@ -485,6 +356,10 @@ func (impl *ServerImpl) RemoteListEntriesCommand(ctx context.Context, data wshrp
data.Opts.Limit = wshrpc.MaxDirSize
}
if data.Opts.All {
if DisableRecursiveFileOpts {
ch <- wshutil.RespErr[wshrpc.CommandRemoteListEntriesRtnData](fmt.Errorf("recursive directory listings are not supported"))
return
}
fs.WalkDir(os.DirFS(path), ".", func(path string, d fs.DirEntry, err error) error {
defer func() {
seen++
@ -656,52 +531,35 @@ func (impl *ServerImpl) RemoteFileTouchCommand(ctx context.Context, path string)
}
func (impl *ServerImpl) RemoteFileMoveCommand(ctx context.Context, data wshrpc.CommandFileCopyData) error {
opts := data.Opts
destUri := data.DestUri
srcUri := data.SrcUri
overwrite := opts != nil && opts.Overwrite
recursive := opts != nil && opts.Recursive
destConn, err := connparse.ParseURIAndReplaceCurrentHost(ctx, destUri)
if err != nil {
return fmt.Errorf("cannot parse destination URI %q: %w", srcUri, err)
}
destPathCleaned := filepath.Clean(wavebase.ExpandHomeDirSafe(destConn.Path))
destinfo, err := os.Stat(destPathCleaned)
_, err = os.Stat(destPathCleaned)
if err == nil {
if !destinfo.IsDir() {
if !overwrite {
return fmt.Errorf("destination %q already exists, use overwrite option", destUri)
} else {
err := os.Remove(destPathCleaned)
if err != nil {
return fmt.Errorf("cannot remove file %q: %w", destUri, err)
}
}
}
return fmt.Errorf("destination %q already exists", destUri)
} else if !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("cannot stat destination %q: %w", destUri, err)
}
srcConn, err := connparse.ParseURIAndReplaceCurrentHost(ctx, srcUri)
if err != nil {
return fmt.Errorf("cannot parse source URI %q: %w", srcUri, err)
}
if srcConn.Host == destConn.Host {
srcPathCleaned := filepath.Clean(wavebase.ExpandHomeDirSafe(srcConn.Path))
finfo, err := os.Stat(srcPathCleaned)
if err != nil {
return fmt.Errorf("cannot stat file %q: %w", srcPathCleaned, err)
}
if finfo.IsDir() && !recursive {
return fmt.Errorf(fstype.RecursiveRequiredError)
}
err = os.Rename(srcPathCleaned, destPathCleaned)
if err != nil {
return fmt.Errorf("cannot move file %q to %q: %w", srcPathCleaned, destPathCleaned, err)
}
} else {
if srcConn.Host != destConn.Host {
return fmt.Errorf("cannot move file %q to %q: different hosts", srcUri, destUri)
}
srcPathCleaned := filepath.Clean(wavebase.ExpandHomeDirSafe(srcConn.Path))
err = os.Rename(srcPathCleaned, destPathCleaned)
if err != nil {
return fmt.Errorf("cannot move file %q to %q: %w", srcPathCleaned, destPathCleaned, err)
}
return nil
}
@ -719,6 +577,7 @@ func (impl *ServerImpl) RemoteMkdirCommand(ctx context.Context, path string) err
}
return nil
}
func (*ServerImpl) RemoteWriteFileCommand(ctx context.Context, data wshrpc.FileData) error {
var truncate, append bool
var atOffset int64
@ -755,6 +614,9 @@ func (*ServerImpl) RemoteWriteFileCommand(ctx context.Context, data wshrpc.FileD
}
fileSize := int64(0)
if finfo != nil {
if finfo.IsDir() {
return fmt.Errorf("cannot use write file to overwrite a directory %q", path)
}
fileSize = finfo.Size()
}
if atOffset > fileSize {
@ -791,20 +653,21 @@ func (*ServerImpl) RemoteFileDeleteCommand(ctx context.Context, data wshrpc.Comm
}
cleanedPath := filepath.Clean(expandedPath)
if data.Recursive {
err = os.RemoveAll(cleanedPath)
if err != nil {
return fmt.Errorf("cannot delete %q: %w", data.Path, err)
}
return nil
}
err = os.Remove(cleanedPath)
if err != nil {
finfo, _ := os.Stat(cleanedPath)
if finfo != nil && finfo.IsDir() {
if !data.Recursive {
return fmt.Errorf(fstype.RecursiveRequiredError)
}
err = os.RemoveAll(cleanedPath)
if err != nil {
return fmt.Errorf("cannot delete directory %q: %w", data.Path, err)
}
} else {
return fmt.Errorf("cannot delete file %q: %w", data.Path, err)
finfo, statErr := os.Stat(cleanedPath)
if statErr == nil && finfo.IsDir() {
return fmt.Errorf(wshfs.RecursiveRequiredError)
}
return fmt.Errorf("cannot delete file %q: %w", data.Path, err)
}
return nil
}

View file

@ -36,8 +36,9 @@ type WshRpcInterface interface {
AuthenticateJobManagerCommand(ctx context.Context, data CommandAuthenticateJobManagerData) error
AuthenticateJobManagerVerifyCommand(ctx context.Context, data CommandAuthenticateJobManagerData) error // (special) validates job auth token without binding, root router only
DisposeCommand(ctx context.Context, data CommandDisposeData) error
RouteAnnounceCommand(ctx context.Context) error // (special) announces a new route to the main router
RouteUnannounceCommand(ctx context.Context) error // (special) unannounces a route to the main router
RouteAnnounceCommand(ctx context.Context) error // (special) announces a new route to the main router
RouteUnannounceCommand(ctx context.Context) error // (special) unannounces a route to the main router
ControlGetRouteIdCommand(ctx context.Context) (string, error) // (special) gets the route for the link that we're on
SetPeerInfoCommand(ctx context.Context, peerInfo string) error
GetJwtPublicKeyCommand(ctx context.Context) (string, error) // (special) gets the public JWT signing key
@ -79,6 +80,7 @@ type WshRpcInterface interface {
ActivityCommand(ctx context.Context, data ActivityUpdate) error
RecordTEventCommand(ctx context.Context, data telemetrydata.TEvent) error
GetVarCommand(ctx context.Context, data CommandVarData) (*CommandVarResponseData, error)
GetAllVarsCommand(ctx context.Context, data CommandVarData) ([]CommandVarResponseData, error)
SetVarCommand(ctx context.Context, data CommandVarData) error
PathCommand(ctx context.Context, data PathCommandData) (string, error)
SendTelemetryCommand(ctx context.Context) error
@ -94,7 +96,6 @@ type WshRpcInterface interface {
ConnConnectCommand(ctx context.Context, connRequest ConnRequest) error
ConnDisconnectCommand(ctx context.Context, connName string) error
ConnListCommand(ctx context.Context) ([]string, error)
ConnListAWSCommand(ctx context.Context) ([]string, error)
WslListCommand(ctx context.Context) ([]string, error)
WslDefaultDistroCommand(ctx context.Context) (string, error)
DismissWshFailCommand(ctx context.Context, connName string) error
@ -158,6 +159,7 @@ type WshRpcInterface interface {
// file
WshRpcFileInterface
WaveFileReadStreamCommand(ctx context.Context, data CommandWaveFileReadStreamData) (*WaveFileInfo, error)
// builder
WshRpcBuilderInterface
@ -459,11 +461,11 @@ type CommandWebSelectorData struct {
}
type BlockInfoData struct {
BlockId string `json:"blockid"`
TabId string `json:"tabid"`
WorkspaceId string `json:"workspaceid"`
Block *waveobj.Block `json:"block"`
Files []*FileInfo `json:"files"`
BlockId string `json:"blockid"`
TabId string `json:"tabid"`
WorkspaceId string `json:"workspaceid"`
Block *waveobj.Block `json:"block"`
Files []*WaveFileInfo `json:"files"`
}
type WaveNotificationOptions struct {
@ -821,3 +823,20 @@ type CommandJobControllerAttachJobData struct {
JobId string `json:"jobid"`
BlockId string `json:"blockid"`
}
type CommandWaveFileReadStreamData struct {
ZoneId string `json:"zoneid"`
Name string `json:"name"`
StreamMeta StreamMeta `json:"streammeta"`
}
// see blockstore.go (WaveFile)
type WaveFileInfo struct {
ZoneId string `json:"zoneid"`
Name string `json:"name"`
Opts FileOpts `json:"opts"`
CreatedTs int64 `json:"createdts"`
Size int64 `json:"size"`
ModTs int64 `json:"modts"`
Meta FileMeta `json:"meta"`
}

View file

@ -9,7 +9,6 @@ import (
"os"
"github.com/wavetermdev/waveterm/pkg/ijson"
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes"
)
type WshRpcFileInterface interface {
@ -17,23 +16,19 @@ type WshRpcFileInterface interface {
FileCreateCommand(ctx context.Context, data FileData) error
FileDeleteCommand(ctx context.Context, data CommandDeleteFileData) error
FileAppendCommand(ctx context.Context, data FileData) error
FileAppendIJsonCommand(ctx context.Context, data CommandAppendIJsonData) error
FileWriteCommand(ctx context.Context, data FileData) error
FileReadCommand(ctx context.Context, data FileData) (*FileData, error)
FileReadStreamCommand(ctx context.Context, data FileData) <-chan RespOrErrorUnion[FileData]
FileStreamTarCommand(ctx context.Context, data CommandRemoteStreamTarData) <-chan RespOrErrorUnion[iochantypes.Packet]
FileMoveCommand(ctx context.Context, data CommandFileCopyData) error
FileCopyCommand(ctx context.Context, data CommandFileCopyData) error
FileInfoCommand(ctx context.Context, data FileData) (*FileInfo, error)
FileListCommand(ctx context.Context, data FileListData) ([]*FileInfo, error)
FileJoinCommand(ctx context.Context, paths []string) (*FileInfo, error)
FileListStreamCommand(ctx context.Context, data FileListData) <-chan RespOrErrorUnion[CommandRemoteListEntriesRtnData]
FileShareCapabilityCommand(ctx context.Context, path string) (FileShareCapability, error)
}
type WshRpcRemoteFileInterface interface {
RemoteStreamFileCommand(ctx context.Context, data CommandRemoteStreamFileData) chan RespOrErrorUnion[FileData]
RemoteTarStreamCommand(ctx context.Context, data CommandRemoteStreamTarData) <-chan RespOrErrorUnion[iochantypes.Packet]
RemoteFileCopyCommand(ctx context.Context, data CommandFileCopyData) (bool, error)
RemoteListEntriesCommand(ctx context.Context, data CommandRemoteListEntriesData) chan RespOrErrorUnion[CommandRemoteListEntriesRtnData]
RemoteFileInfoCommand(ctx context.Context, path string) (*FileInfo, error)
@ -121,11 +116,6 @@ type CommandFileCopyData struct {
Opts *FileCopyOpts `json:"opts,omitempty"`
}
type CommandRemoteStreamTarData struct {
Path string `json:"path"`
Opts *FileCopyOpts `json:"opts,omitempty"`
}
type FileCopyOpts struct {
Overwrite bool `json:"overwrite,omitempty"`
Recursive bool `json:"recursive,omitempty"` // only used for move, always true for copy
@ -146,8 +136,3 @@ type CommandRemoteListEntriesData struct {
type CommandRemoteListEntriesRtnData struct {
FileInfo []*FileInfo `json:"fileinfo,omitempty"`
}
type FileShareCapability struct {
CanAppend bool `json:"canappend"`
CanMkdir bool `json:"canmkdir"`
}

View file

@ -16,6 +16,7 @@ import (
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"time"
@ -32,19 +33,15 @@ import (
"github.com/wavetermdev/waveterm/pkg/jobcontroller"
"github.com/wavetermdev/waveterm/pkg/panichandler"
"github.com/wavetermdev/waveterm/pkg/remote"
"github.com/wavetermdev/waveterm/pkg/remote/awsconn"
"github.com/wavetermdev/waveterm/pkg/remote/conncontroller"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/wshfs"
"github.com/wavetermdev/waveterm/pkg/secretstore"
"github.com/wavetermdev/waveterm/pkg/suggestion"
"github.com/wavetermdev/waveterm/pkg/telemetry"
"github.com/wavetermdev/waveterm/pkg/telemetry/telemetrydata"
"github.com/wavetermdev/waveterm/pkg/util/envutil"
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes"
"github.com/wavetermdev/waveterm/pkg/util/iterfn"
"github.com/wavetermdev/waveterm/pkg/util/shellutil"
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
"github.com/wavetermdev/waveterm/pkg/util/wavefileutil"
"github.com/wavetermdev/waveterm/pkg/waveai"
"github.com/wavetermdev/waveterm/pkg/waveappstore"
"github.com/wavetermdev/waveterm/pkg/waveapputil"
@ -325,7 +322,7 @@ func (ws *WshServer) ControllerAppendOutputCommand(ctx context.Context, data wsh
func (ws *WshServer) FileCreateCommand(ctx context.Context, data wshrpc.FileData) error {
data.Data64 = ""
err := fileshare.PutFile(ctx, data)
err := wshfs.PutFile(ctx, data)
if err != nil {
return fmt.Errorf("error creating file: %w", err)
}
@ -333,76 +330,47 @@ func (ws *WshServer) FileCreateCommand(ctx context.Context, data wshrpc.FileData
}
func (ws *WshServer) FileMkdirCommand(ctx context.Context, data wshrpc.FileData) error {
return fileshare.Mkdir(ctx, data.Info.Path)
return wshfs.Mkdir(ctx, data.Info.Path)
}
func (ws *WshServer) FileDeleteCommand(ctx context.Context, data wshrpc.CommandDeleteFileData) error {
return fileshare.Delete(ctx, data)
return wshfs.Delete(ctx, data)
}
func (ws *WshServer) FileInfoCommand(ctx context.Context, data wshrpc.FileData) (*wshrpc.FileInfo, error) {
return fileshare.Stat(ctx, data.Info.Path)
return wshfs.Stat(ctx, data.Info.Path)
}
func (ws *WshServer) FileListCommand(ctx context.Context, data wshrpc.FileListData) ([]*wshrpc.FileInfo, error) {
return fileshare.ListEntries(ctx, data.Path, data.Opts)
return wshfs.ListEntries(ctx, data.Path, data.Opts)
}
func (ws *WshServer) FileListStreamCommand(ctx context.Context, data wshrpc.FileListData) <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
return fileshare.ListEntriesStream(ctx, data.Path, data.Opts)
return wshfs.ListEntriesStream(ctx, data.Path, data.Opts)
}
func (ws *WshServer) FileWriteCommand(ctx context.Context, data wshrpc.FileData) error {
return fileshare.PutFile(ctx, data)
return wshfs.PutFile(ctx, data)
}
func (ws *WshServer) FileReadCommand(ctx context.Context, data wshrpc.FileData) (*wshrpc.FileData, error) {
return fileshare.Read(ctx, data)
return wshfs.Read(ctx, data)
}
func (ws *WshServer) FileReadStreamCommand(ctx context.Context, data wshrpc.FileData) <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData] {
return fileshare.ReadStream(ctx, data)
return wshfs.ReadStream(ctx, data)
}
func (ws *WshServer) FileCopyCommand(ctx context.Context, data wshrpc.CommandFileCopyData) error {
return fileshare.Copy(ctx, data)
return wshfs.Copy(ctx, data)
}
func (ws *WshServer) FileMoveCommand(ctx context.Context, data wshrpc.CommandFileCopyData) error {
return fileshare.Move(ctx, data)
}
func (ws *WshServer) FileStreamTarCommand(ctx context.Context, data wshrpc.CommandRemoteStreamTarData) <-chan wshrpc.RespOrErrorUnion[iochantypes.Packet] {
return fileshare.ReadTarStream(ctx, data)
return wshfs.Move(ctx, data)
}
func (ws *WshServer) FileAppendCommand(ctx context.Context, data wshrpc.FileData) error {
return fileshare.Append(ctx, data)
}
func (ws *WshServer) FileAppendIJsonCommand(ctx context.Context, data wshrpc.CommandAppendIJsonData) error {
tryCreate := true
if data.FileName == wavebase.BlockFile_VDom && tryCreate {
err := filestore.WFS.MakeFile(ctx, data.ZoneId, data.FileName, nil, wshrpc.FileOpts{MaxSize: blockcontroller.DefaultHtmlMaxFileSize, IJson: true})
if err != nil && err != fs.ErrExist {
return fmt.Errorf("error creating blockfile[vdom]: %w", err)
}
}
err := filestore.WFS.AppendIJson(ctx, data.ZoneId, data.FileName, data.Data)
if err != nil {
return fmt.Errorf("error appending to blockfile(ijson): %w", err)
}
wps.Broker.Publish(wps.WaveEvent{
Event: wps.Event_BlockFile,
Scopes: []string{waveobj.MakeORef(waveobj.OType_Block, data.ZoneId).String()},
Data: &wps.WSFileEventData{
ZoneId: data.ZoneId,
FileName: data.FileName,
FileOp: wps.FileOp_Append,
Data64: base64.StdEncoding.EncodeToString([]byte("{}")),
},
})
return nil
return wshfs.Append(ctx, data)
}
func (ws *WshServer) FileJoinCommand(ctx context.Context, paths []string) (*wshrpc.FileInfo, error) {
@ -410,13 +378,9 @@ func (ws *WshServer) FileJoinCommand(ctx context.Context, paths []string) (*wshr
if len(paths) == 0 {
return nil, fmt.Errorf("no paths provided")
}
return fileshare.Stat(ctx, paths[0])
return wshfs.Stat(ctx, paths[0])
}
return fileshare.Join(ctx, paths[0], paths[1:]...)
}
func (ws *WshServer) FileShareCapabilityCommand(ctx context.Context, path string) (wshrpc.FileShareCapability, error) {
return fileshare.GetCapability(ctx, path)
return wshfs.Join(ctx, paths[0], paths[1:]...)
}
func (ws *WshServer) FileRestoreBackupCommand(ctx context.Context, data wshrpc.CommandFileRestoreBackupData) error {
@ -606,15 +570,6 @@ func termCtxWithLogBlockId(ctx context.Context, logBlockId string) context.Conte
}
func (ws *WshServer) ConnEnsureCommand(ctx context.Context, data wshrpc.ConnExtData) error {
// TODO: if we add proper wsh connections via aws, we'll need to handle that here
if strings.HasPrefix(data.ConnName, "aws:") {
profiles := awsconn.ParseProfiles()
for profile := range profiles {
if strings.HasPrefix(data.ConnName, profile) {
return nil
}
}
}
ctx = genconn.ContextWithConnData(ctx, data.LogBlockId)
ctx = termCtxWithLogBlockId(ctx, data.LogBlockId)
if strings.HasPrefix(data.ConnName, "wsl://") {
@ -625,10 +580,6 @@ func (ws *WshServer) ConnEnsureCommand(ctx context.Context, data wshrpc.ConnExtD
}
func (ws *WshServer) ConnDisconnectCommand(ctx context.Context, connName string) error {
// TODO: if we add proper wsh connections via aws, we'll need to handle that here
if strings.HasPrefix(connName, "aws:") {
return nil
}
if conncontroller.IsLocalConnName(connName) {
return nil
}
@ -652,10 +603,6 @@ func (ws *WshServer) ConnDisconnectCommand(ctx context.Context, connName string)
}
func (ws *WshServer) ConnConnectCommand(ctx context.Context, connRequest wshrpc.ConnRequest) error {
// TODO: if we add proper wsh connections via aws, we'll need to handle that here
if strings.HasPrefix(connRequest.Host, "aws:") {
return nil
}
if conncontroller.IsLocalConnName(connRequest.Host) {
return nil
}
@ -682,10 +629,6 @@ func (ws *WshServer) ConnConnectCommand(ctx context.Context, connRequest wshrpc.
}
func (ws *WshServer) ConnReinstallWshCommand(ctx context.Context, data wshrpc.ConnExtData) error {
// TODO: if we add proper wsh connections via aws, we'll need to handle that here
if strings.HasPrefix(data.ConnName, "aws:") {
return nil
}
if conncontroller.IsLocalConnName(data.ConnName) {
return nil
}
@ -758,11 +701,6 @@ func (ws *WshServer) ConnListCommand(ctx context.Context) ([]string, error) {
return conncontroller.GetConnectionsList()
}
func (ws *WshServer) ConnListAWSCommand(ctx context.Context) ([]string, error) {
profilesMap := awsconn.ParseProfiles()
return iterfn.MapKeysToSorted(profilesMap), nil
}
func (ws *WshServer) WslListCommand(ctx context.Context) ([]string, error) {
distros, err := wsl.RegisteredDistros(ctx)
if err != nil {
@ -822,6 +760,18 @@ func (ws *WshServer) FindGitBashCommand(ctx context.Context, rescan bool) (strin
return shellutil.FindGitBash(&fullConfig, rescan), nil
}
func waveFileToWaveFileInfo(wf *filestore.WaveFile) *wshrpc.WaveFileInfo {
return &wshrpc.WaveFileInfo{
ZoneId: wf.ZoneId,
Name: wf.Name,
Opts: wf.Opts,
CreatedTs: wf.CreatedTs,
Size: wf.Size,
ModTs: wf.ModTs,
Meta: wf.Meta,
}
}
func (ws *WshServer) BlockInfoCommand(ctx context.Context, blockId string) (*wshrpc.BlockInfoData, error) {
blockData, err := wstore.DBMustGet[*waveobj.Block](ctx, blockId)
if err != nil {
@ -839,7 +789,10 @@ func (ws *WshServer) BlockInfoCommand(ctx context.Context, blockId string) (*wsh
if err != nil {
return nil, fmt.Errorf("error listing blockfiles: %w", err)
}
fileInfoList := wavefileutil.WaveFileListToFileInfoList(fileList)
var fileInfoList []*wshrpc.WaveFileInfo
for _, wf := range fileList {
fileInfoList = append(fileInfoList, waveFileToWaveFileInfo(wf))
}
return &wshrpc.BlockInfoData{
BlockId: blockId,
TabId: tabId,
@ -1014,6 +967,59 @@ func (ws *WshServer) WriteAppFileCommand(ctx context.Context, data wshrpc.Comman
return waveappstore.WriteAppFile(data.AppId, data.FileName, contents)
}
func (ws *WshServer) WaveFileReadStreamCommand(ctx context.Context, data wshrpc.CommandWaveFileReadStreamData) (*wshrpc.WaveFileInfo, error) {
const maxStreamFileSize = 5 * 1024 * 1024
waveFile, err := filestore.WFS.Stat(ctx, data.ZoneId, data.Name)
if err != nil {
return nil, fmt.Errorf("error statting wavefile: %w", err)
}
dataLength := waveFile.DataLength()
if dataLength > maxStreamFileSize {
return nil, fmt.Errorf("file size %d exceeds maximum streaming size of %d bytes", dataLength, maxStreamFileSize)
}
wshRpc := wshutil.GetWshRpcFromContext(ctx)
if wshRpc == nil || wshRpc.StreamBroker == nil {
return nil, fmt.Errorf("no stream broker available")
}
writer, err := wshRpc.StreamBroker.CreateStreamWriter(&data.StreamMeta)
if err != nil {
return nil, fmt.Errorf("error creating stream writer: %w", err)
}
_, fileData, err := filestore.WFS.ReadFile(ctx, data.ZoneId, data.Name)
if err != nil {
writer.Close()
return nil, fmt.Errorf("error reading wavefile: %w", err)
}
go func() {
defer func() {
panichandler.PanicHandler("WaveFileReadStreamCommand", recover())
}()
defer writer.Close()
_, err := writer.Write(fileData)
if err != nil {
log.Printf("error writing to stream for wavefile %s:%s: %v\n", data.ZoneId, data.Name, err)
}
}()
rtnInfo := &wshrpc.WaveFileInfo{
ZoneId: waveFile.ZoneId,
Name: waveFile.Name,
Opts: waveFile.Opts,
CreatedTs: waveFile.CreatedTs,
Size: waveFile.Size,
ModTs: waveFile.ModTs,
Meta: waveFile.Meta,
}
return rtnInfo, nil
}
func (ws *WshServer) WriteAppGoFileCommand(ctx context.Context, data wshrpc.CommandWriteAppGoFileData) (*wshrpc.CommandWriteAppGoFileRtnData, error) {
if data.AppId == "" {
return nil, fmt.Errorf("must provide an appId to WriteAppGoFileCommand")
@ -1292,6 +1298,31 @@ func (ws *WshServer) GetVarCommand(ctx context.Context, data wshrpc.CommandVarDa
return &wshrpc.CommandVarResponseData{Key: data.Key, Exists: ok, Val: value}, nil
}
func (ws *WshServer) GetAllVarsCommand(ctx context.Context, data wshrpc.CommandVarData) ([]wshrpc.CommandVarResponseData, error) {
_, fileData, err := filestore.WFS.ReadFile(ctx, data.ZoneId, data.FileName)
if err == fs.ErrNotExist {
return []wshrpc.CommandVarResponseData{}, nil
}
if err != nil {
return nil, fmt.Errorf("error reading blockfile: %w", err)
}
envMap := envutil.EnvToMap(string(fileData))
keys := make([]string, 0, len(envMap))
for k := range envMap {
keys = append(keys, k)
}
sort.Strings(keys)
result := make([]wshrpc.CommandVarResponseData, 0, len(keys))
for _, k := range keys {
result = append(result, wshrpc.CommandVarResponseData{
Key: k,
Val: envMap[k],
Exists: true,
})
}
return result, nil
}
func (ws *WshServer) SetVarCommand(ctx context.Context, data wshrpc.CommandVarData) error {
_, fileData, err := filestore.WFS.ReadFile(ctx, data.ZoneId, data.FileName)
if err == fs.ErrNotExist {

View file

@ -54,6 +54,22 @@ func (impl *WshRouterControlImpl) RouteUnannounceCommand(ctx context.Context) er
return impl.Router.unbindRoute(linkId, source)
}
func (impl *WshRouterControlImpl) ControlGetRouteIdCommand(ctx context.Context) (string, error) {
handler := GetRpcResponseHandlerFromContext(ctx)
if handler == nil {
return "", nil
}
linkId := handler.GetIngressLinkId()
if linkId == baseds.NoLinkId {
return "", nil
}
lm := impl.Router.getLinkMeta(linkId)
if lm == nil {
return "", nil
}
return lm.sourceRouteId, nil
}
func (impl *WshRouterControlImpl) SetPeerInfoCommand(ctx context.Context, peerInfo string) error {
source := GetRpcSourceFromContext(ctx)
linkId := impl.Router.GetLinkIdForRoute(source)