wsh file overhaul without cross-remote copy and S3 (#1790)

This commit is contained in:
Evan Simkowitz 2025-01-22 14:50:09 -08:00 committed by GitHub
parent a5fa098d8b
commit 11fec5695c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
61 changed files with 3428 additions and 1212 deletions

View File

@ -26,6 +26,7 @@ func GenerateWshClient() error {
gogen.GenerateBoilerplate(&buf, "wshclient", []string{
"github.com/wavetermdev/waveterm/pkg/wshutil",
"github.com/wavetermdev/waveterm/pkg/wshrpc",
"github.com/wavetermdev/waveterm/pkg/wconfig",
"github.com/wavetermdev/waveterm/pkg/waveobj",
"github.com/wavetermdev/waveterm/pkg/wps",
"github.com/wavetermdev/waveterm/pkg/vdom",

View File

@ -21,6 +21,7 @@ import (
"github.com/wavetermdev/waveterm/pkg/filestore"
"github.com/wavetermdev/waveterm/pkg/panichandler"
"github.com/wavetermdev/waveterm/pkg/remote/conncontroller"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/wshfs"
"github.com/wavetermdev/waveterm/pkg/service"
"github.com/wavetermdev/waveterm/pkg/telemetry"
"github.com/wavetermdev/waveterm/pkg/util/shellutil"
@ -175,6 +176,7 @@ func shutdownActivityUpdate() {
func createMainWshClient() {
rpc := wshserver.GetMainRpcClient()
wshfs.RpcClient = rpc
wshutil.DefaultRouter.RegisterRoute(wshutil.DefaultRoute, rpc, true)
wps.Broker.SetClient(wshutil.DefaultRouter)
localConnWsh := wshutil.MakeWshRpc(nil, nil, wshrpc.RpcContext{Conn: wshrpc.LocalConnName}, &wshremote.ServerImpl{})

View File

@ -18,6 +18,7 @@ import (
"github.com/spf13/cobra"
"github.com/wavetermdev/waveterm/pkg/panichandler"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/wshfs"
"github.com/wavetermdev/waveterm/pkg/util/packetparser"
"github.com/wavetermdev/waveterm/pkg/wavebase"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
@ -180,6 +181,7 @@ func serverRunRouter(jwtToken string) error {
if err != nil {
return fmt.Errorf("error setting up connserver rpc client: %v", err)
}
wshfs.RpcClient = client
go runListener(unixListener, router)
// run the sysinfo loop
wshremote.RunSysInfoLoop(client, client.GetRpcContext().Conn)
@ -224,6 +226,7 @@ func serverRunNormal(jwtToken string) error {
if err != nil {
return err
}
wshfs.RpcClient = RpcClient
WriteStdout("running wsh connserver (%s)\n", RpcContext.Conn)
go wshremote.RunSysInfoLoop(RpcClient, RpcContext.Conn)
select {} // run forever

View File

@ -10,6 +10,9 @@ import (
"io/fs"
"strings"
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
"github.com/wavetermdev/waveterm/pkg/util/fileutil"
"github.com/wavetermdev/waveterm/pkg/util/wavefileutil"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
)
@ -24,15 +27,11 @@ func convertNotFoundErr(err error) error {
return err
}
func ensureWaveFile(origName string, fileData wshrpc.CommandFileData) (*wshrpc.WaveFileInfo, error) {
func ensureFile(origName string, fileData wshrpc.FileData) (*wshrpc.FileInfo, error) {
info, err := wshclient.FileInfoCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: DefaultFileTimeout})
err = convertNotFoundErr(err)
if err == fs.ErrNotExist {
createData := wshrpc.CommandFileCreateData{
ZoneId: fileData.ZoneId,
FileName: fileData.FileName,
}
err = wshclient.FileCreateCommand(RpcClient, createData, &wshrpc.RpcOpts{Timeout: DefaultFileTimeout})
err = wshclient.FileCreateCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: DefaultFileTimeout})
if err != nil {
return nil, fmt.Errorf("creating file: %w", err)
}
@ -48,7 +47,7 @@ func ensureWaveFile(origName string, fileData wshrpc.CommandFileData) (*wshrpc.W
return info, nil
}
func streamWriteToWaveFile(fileData wshrpc.CommandFileData, reader io.Reader) error {
func streamWriteToFile(fileData wshrpc.FileData, reader io.Reader) error {
// First truncate the file with an empty write
emptyWrite := fileData
emptyWrite.Data64 = ""
@ -81,7 +80,7 @@ func streamWriteToWaveFile(fileData wshrpc.CommandFileData, reader io.Reader) er
appendData := fileData
appendData.Data64 = base64.StdEncoding.EncodeToString(chunk)
err = wshclient.FileAppendCommand(RpcClient, appendData, &wshrpc.RpcOpts{Timeout: fileTimeout})
err = wshclient.FileAppendCommand(RpcClient, appendData, &wshrpc.RpcOpts{Timeout: int64(fileTimeout)})
if err != nil {
return fmt.Errorf("appending chunk to file: %w", err)
}
@ -90,7 +89,7 @@ func streamWriteToWaveFile(fileData wshrpc.CommandFileData, reader io.Reader) er
return nil
}
func streamReadFromWaveFile(fileData wshrpc.CommandFileData, size int64, writer io.Writer) error {
func streamReadFromFile(fileData wshrpc.FileData, size int64, writer io.Writer) error {
const chunkSize = 32 * 1024 // 32KB chunks
for offset := int64(0); offset < size; offset += chunkSize {
// Calculate the length of this chunk
@ -100,19 +99,19 @@ func streamReadFromWaveFile(fileData wshrpc.CommandFileData, size int64, writer
}
// Set up the ReadAt request
fileData.At = &wshrpc.CommandFileDataAt{
fileData.At = &wshrpc.FileDataAt{
Offset: offset,
Size: int64(length),
Size: length,
}
// Read the chunk
content64, err := wshclient.FileReadCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: fileTimeout})
data, err := wshclient.FileReadCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: int64(fileTimeout)})
if err != nil {
return fmt.Errorf("reading chunk at offset %d: %w", offset, err)
}
// Decode and write the chunk
chunk, err := base64.StdEncoding.DecodeString(content64)
chunk, err := base64.StdEncoding.DecodeString(data.Data64)
if err != nil {
return fmt.Errorf("decoding chunk at offset %d: %w", offset, err)
}
@ -127,7 +126,7 @@ func streamReadFromWaveFile(fileData wshrpc.CommandFileData, size int64, writer
}
type fileListResult struct {
info *wshrpc.WaveFileInfo
info *wshrpc.FileInfo
err error
}
@ -139,9 +138,9 @@ func streamFileList(zoneId string, path string, recursive bool, filesOnly bool)
go func() {
defer close(resultChan)
fileData := wshrpc.CommandFileData{
ZoneId: zoneId,
FileName: path,
fileData := wshrpc.FileData{
Info: &wshrpc.FileInfo{
Path: fmt.Sprintf(wavefileutil.WaveFilePathPattern, zoneId, path)},
}
info, err := wshclient.FileInfoCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: 2000})
@ -169,13 +168,12 @@ func streamFileList(zoneId string, path string, recursive bool, filesOnly bool)
foundAny := false
for {
listData := wshrpc.CommandFileListData{
ZoneId: zoneId,
Prefix: prefix,
All: recursive,
Offset: offset,
Limit: 100,
}
listData := wshrpc.FileListData{
Path: fmt.Sprintf(wavefileutil.WaveFilePathPattern, zoneId, prefix),
Opts: &wshrpc.FileListOpts{
All: recursive,
Offset: offset,
Limit: 100}}
files, err := wshclient.FileListCommand(RpcClient, listData, &wshrpc.RpcOpts{Timeout: 2000})
if err != nil {
@ -210,3 +208,24 @@ func streamFileList(zoneId string, path string, recursive bool, filesOnly bool)
return resultChan, nil
}
func fixRelativePaths(path string) (string, error) {
conn, err := connparse.ParseURI(path)
if err != nil {
return "", err
}
if conn.Scheme == connparse.ConnectionTypeWsh {
if conn.Host == connparse.ConnHostCurrent {
conn.Host = RpcContext.Conn
fixedPath, err := fileutil.FixPath(conn.Path)
if err != nil {
return "", err
}
conn.Path = fixedPath
}
if conn.Host == "" {
conn.Host = wshrpc.LocalConnName
}
}
return conn.GetFullURI(), nil
}

View File

@ -10,17 +10,17 @@ import (
"fmt"
"io"
"io/fs"
"net/url"
"log"
"os"
"path"
"path/filepath"
"strings"
"text/tabwriter"
"time"
"github.com/spf13/cobra"
"github.com/wavetermdev/waveterm/pkg/util/colprint"
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
"github.com/wavetermdev/waveterm/pkg/waveobj"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
"golang.org/x/term"
@ -32,13 +32,42 @@ const (
WaveFilePrefix = "wavefile://"
DefaultFileTimeout = 5000
TimeoutYear = int64(365) * 24 * 60 * 60 * 1000
UriHelpText = `
URI format: [profile]:[uri-scheme]://[connection]/[path]
Supported URI schemes:
wsh:
Used to access files on remote hosts over SSH via the WSH helper. Allows for file streaming to Wave and other remotes.
Profiles are optional for WSH URIs, provided that you have configured the remote host in your "connections.json" or "~/.ssh/config" file.
If a profile is provided, it must be defined in "profiles.json" in the Wave configuration directory.
Format: wsh://[remote]/[path]
Shorthands can be used for the current remote and your local computer:
[path] a relative or absolute path on the current remote
//[remote]/[path] a path on a remote
/~/[path] a path relative to your home directory on your local computer
s3:
Used to access files on S3-compatible systems.
Requires S3 credentials to be set up, either in the AWS CLI configuration files, or in "profiles.json" in the Wave configuration directory.
If no profile is provided, the default from your AWS CLI configuration will be used. Profiles from the AWS CLI must be prefixed with "aws:".
Format: s3://[bucket]/[path]
aws:[profile]:s3://[bucket]/[path]
[profile]:s3://[bucket]/[path]
wavefile:
Used to retrieve blockfiles from the internal Wave filesystem.
Format: wavefile://[zoneid]/[path]`
)
var fileCmd = &cobra.Command{
Use: "file",
Short: "manage Wave Terminal files",
Long: "Commands to manage Wave Terminal files stored in blocks",
}
Short: "manage files across different storage systems",
Long: "Manage files across different storage systems.\n\nWave Terminal is capable of managing files from remote SSH hosts, S3-compatible systems, and the internal Wave filesystem.\nFiles are addressed via URIs, which vary depending on the storage system." + UriHelpText}
var fileTimeout int
@ -58,86 +87,59 @@ func init() {
fileCmd.AddCommand(fileRmCmd)
fileCmd.AddCommand(fileInfoCmd)
fileCmd.AddCommand(fileAppendCmd)
fileCpCmd.Flags().BoolP("merge", "m", false, "merge directories")
fileCpCmd.Flags().BoolP("recursive", "r", false, "copy directories recursively")
fileCpCmd.Flags().BoolP("force", "f", false, "force overwrite of existing files")
fileCmd.AddCommand(fileCpCmd)
}
type waveFileRef struct {
zoneId string
fileName string
}
func parseWaveFileURL(fileURL string) (*waveFileRef, error) {
if !strings.HasPrefix(fileURL, WaveFilePrefix) {
return nil, fmt.Errorf("invalid file reference %q: must use wavefile:// URL format", fileURL)
}
u, err := url.Parse(fileURL)
if err != nil {
return nil, fmt.Errorf("invalid wavefile URL: %w", err)
}
if u.Scheme != WaveFileScheme {
return nil, fmt.Errorf("invalid URL scheme %q: must be wavefile://", u.Scheme)
}
// Path must start with /
if !strings.HasPrefix(u.Path, "/") {
return nil, fmt.Errorf("invalid wavefile URL: path must start with /")
}
// Must have a host (zone)
if u.Host == "" {
return nil, fmt.Errorf("invalid wavefile URL: must specify zone (e.g., wavefile://block/file.txt)")
}
return &waveFileRef{
zoneId: u.Host,
fileName: strings.TrimPrefix(u.Path, "/"),
}, nil
}
func resolveWaveFile(ref *waveFileRef) (*waveobj.ORef, error) {
return resolveSimpleId(ref.zoneId)
fileMvCmd.Flags().BoolP("recursive", "r", false, "move directories recursively")
fileMvCmd.Flags().BoolP("force", "f", false, "force overwrite of existing files")
fileCmd.AddCommand(fileMvCmd)
}
var fileListCmd = &cobra.Command{
Use: "ls [wavefile://zone[/path]]",
Short: "list wave files",
Example: " wsh file ls wavefile://block/\n wsh file ls wavefile://client/configs/",
Use: "ls [uri]",
Aliases: []string{"list"},
Short: "list files",
Long: "List files in a directory. By default, lists files in the current directory." + UriHelpText,
Example: " wsh file ls wsh://user@ec2/home/user/\n wsh file ls wavefile://client/configs/",
RunE: activityWrap("file", fileListRun),
PreRunE: preRunSetupRpcClient,
}
var fileCatCmd = &cobra.Command{
Use: "cat wavefile://zone/file",
Short: "display contents of a wave file",
Example: " wsh file cat wavefile://block/config.txt\n wsh file cat wavefile://client/settings.json",
Use: "cat [uri]",
Short: "display contents of a file",
Long: "Display the contents of a file." + UriHelpText,
Example: " wsh file cat wsh://user@ec2/home/user/config.txt\n wsh file cat wavefile://client/settings.json",
Args: cobra.ExactArgs(1),
RunE: activityWrap("file", fileCatRun),
PreRunE: preRunSetupRpcClient,
}
var fileInfoCmd = &cobra.Command{
Use: "info wavefile://zone/file",
Use: "info [uri]",
Short: "show wave file information",
Example: " wsh file info wavefile://block/config.txt",
Long: "Show information about a file." + UriHelpText,
Example: " wsh file info wsh://user@ec2/home/user/config.txt\n wsh file info wavefile://client/settings.json",
Args: cobra.ExactArgs(1),
RunE: activityWrap("file", fileInfoRun),
PreRunE: preRunSetupRpcClient,
}
var fileRmCmd = &cobra.Command{
Use: "rm wavefile://zone/file",
Short: "remove a wave file",
Example: " wsh file rm wavefile://block/config.txt",
Use: "rm [uri]",
Short: "remove a file",
Long: "Remove a file." + UriHelpText,
Example: " wsh file rm wsh://user@ec2/home/user/config.txt\n wsh file rm wavefile://client/settings.json",
Args: cobra.ExactArgs(1),
RunE: activityWrap("file", fileRmRun),
PreRunE: preRunSetupRpcClient,
}
var fileWriteCmd = &cobra.Command{
Use: "write wavefile://zone/file",
Short: "write stdin into a wave file (up to 10MB)",
Use: "write [uri]",
Short: "write stdin into a file (up to 10MB)",
Long: "Write stdin into a file, buffering input and respecting 10MB total file size limit." + UriHelpText,
Example: " echo 'hello' | wsh file write wavefile://block/greeting.txt",
Args: cobra.ExactArgs(1),
RunE: activityWrap("file", fileWriteRun),
@ -145,9 +147,9 @@ var fileWriteCmd = &cobra.Command{
}
var fileAppendCmd = &cobra.Command{
Use: "append wavefile://zone/file",
Short: "append stdin to a wave file",
Long: "append stdin to a wave file, buffering input and respecting 10MB total file size limit",
Use: "append [uri]",
Short: "append stdin to a file",
Long: "Append stdin to a file, buffering input and respecting 10MB total file size limit." + UriHelpText,
Example: " tail -f log.txt | wsh file append wavefile://block/app.log",
Args: cobra.ExactArgs(1),
RunE: activityWrap("file", fileAppendRun),
@ -155,43 +157,47 @@ var fileAppendCmd = &cobra.Command{
}
var fileCpCmd = &cobra.Command{
Use: "cp source destination",
Short: "copy between wave files and local files",
Long: `Copy files between wave storage and local filesystem.
Exactly one of source or destination must be a wavefile:// URL.`,
Example: " wsh file cp wavefile://block/config.txt ./local-config.txt\n wsh file cp ./local-config.txt wavefile://block/config.txt",
Use: "cp [source-uri] [destination-uri]" + UriHelpText,
Aliases: []string{"copy"},
Short: "copy files between storage systems",
Long: "Copy files between different storage systems." + UriHelpText,
Example: " wsh file cp wavefile://block/config.txt ./local-config.txt\n wsh file cp ./local-config.txt wavefile://block/config.txt\n wsh file cp wsh://user@ec2/home/user/config.txt wavefile://client/config.txt",
Args: cobra.ExactArgs(2),
RunE: activityWrap("file", fileCpRun),
PreRunE: preRunSetupRpcClient,
}
var fileMvCmd = &cobra.Command{
Use: "mv [source-uri] [destination-uri]" + UriHelpText,
Aliases: []string{"move"},
Short: "move files between storage systems",
Long: "Move files between different storage systems. The source file will be deleted once the operation completes successfully." + UriHelpText,
Example: " wsh file mv wavefile://block/config.txt ./local-config.txt\n wsh file mv ./local-config.txt wavefile://block/config.txt\n wsh file mv wsh://user@ec2/home/user/config.txt wavefile://client/config.txt",
Args: cobra.ExactArgs(2),
RunE: activityWrap("file", fileMvRun),
PreRunE: preRunSetupRpcClient,
}
func fileCatRun(cmd *cobra.Command, args []string) error {
ref, err := parseWaveFileURL(args[0])
path, err := fixRelativePaths(args[0])
if err != nil {
return err
}
fullORef, err := resolveWaveFile(ref)
if err != nil {
return err
}
fileData := wshrpc.CommandFileData{
ZoneId: fullORef.OID,
FileName: ref.fileName,
}
fileData := wshrpc.FileData{
Info: &wshrpc.FileInfo{
Path: path}}
// Get file info first to check existence and get size
info, err := wshclient.FileInfoCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: 2000})
err = convertNotFoundErr(err)
if err == fs.ErrNotExist {
return fmt.Errorf("%s: no such file", args[0])
return fmt.Errorf("%s: no such file", path)
}
if err != nil {
return fmt.Errorf("getting file info: %w", err)
}
err = streamReadFromWaveFile(fileData, info.Size, os.Stdout)
err = streamReadFromFile(fileData, info.Size, os.Stdout)
if err != nil {
return fmt.Errorf("reading file: %w", err)
}
@ -200,63 +206,53 @@ func fileCatRun(cmd *cobra.Command, args []string) error {
}
func fileInfoRun(cmd *cobra.Command, args []string) error {
ref, err := parseWaveFileURL(args[0])
path, err := fixRelativePaths(args[0])
if err != nil {
return err
}
fullORef, err := resolveWaveFile(ref)
if err != nil {
return err
}
fileData := wshrpc.CommandFileData{
ZoneId: fullORef.OID,
FileName: ref.fileName,
}
fileData := wshrpc.FileData{
Info: &wshrpc.FileInfo{
Path: path}}
info, err := wshclient.FileInfoCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: DefaultFileTimeout})
err = convertNotFoundErr(err)
if err == fs.ErrNotExist {
return fmt.Errorf("%s: no such file", args[0])
return fmt.Errorf("%s: no such file", path)
}
if err != nil {
return fmt.Errorf("getting file info: %w", err)
}
WriteStdout("filename: %s\n", info.Name)
WriteStdout("size: %d\n", info.Size)
WriteStdout("ctime: %s\n", time.Unix(info.CreatedTs/1000, 0).Format(time.DateTime))
WriteStdout("mtime: %s\n", time.Unix(info.ModTs/1000, 0).Format(time.DateTime))
if len(info.Meta) > 0 {
WriteStdout("name:\t%s\n", info.Name)
if info.Mode != 0 {
WriteStdout("mode:\t%s\n", info.Mode.String())
}
WriteStdout("mtime:\t%s\n", time.Unix(info.ModTime/1000, 0).Format(time.DateTime))
if !info.IsDir {
WriteStdout("size:\t%d\n", info.Size)
}
if info.Meta != nil && len(*info.Meta) > 0 {
WriteStdout("metadata:\n")
for k, v := range info.Meta {
WriteStdout(" %s: %v\n", k, v)
for k, v := range *info.Meta {
WriteStdout("\t\t\t%s: %v\n", k, v)
}
}
return nil
}
func fileRmRun(cmd *cobra.Command, args []string) error {
ref, err := parseWaveFileURL(args[0])
path, err := fixRelativePaths(args[0])
if err != nil {
return err
}
fullORef, err := resolveWaveFile(ref)
if err != nil {
return err
}
fileData := wshrpc.CommandFileData{
ZoneId: fullORef.OID,
FileName: ref.fileName,
}
fileData := wshrpc.FileData{
Info: &wshrpc.FileInfo{
Path: path}}
_, err = wshclient.FileInfoCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: DefaultFileTimeout})
err = convertNotFoundErr(err)
if err == fs.ErrNotExist {
return fmt.Errorf("%s: no such file", args[0])
return fmt.Errorf("%s: no such file", path)
}
if err != nil {
return fmt.Errorf("getting file info: %w", err)
@ -271,27 +267,20 @@ func fileRmRun(cmd *cobra.Command, args []string) error {
}
func fileWriteRun(cmd *cobra.Command, args []string) error {
ref, err := parseWaveFileURL(args[0])
path, err := fixRelativePaths(args[0])
if err != nil {
return err
}
fileData := wshrpc.FileData{
Info: &wshrpc.FileInfo{
Path: path}}
_, err = ensureFile(path, fileData)
if err != nil {
return err
}
fullORef, err := resolveWaveFile(ref)
if err != nil {
return err
}
fileData := wshrpc.CommandFileData{
ZoneId: fullORef.OID,
FileName: ref.fileName,
}
_, err = ensureWaveFile(args[0], fileData)
if err != nil {
return err
}
err = streamWriteToWaveFile(fileData, WrappedStdin)
err = streamWriteToFile(fileData, WrappedStdin)
if err != nil {
return fmt.Errorf("writing file: %w", err)
}
@ -300,22 +289,15 @@ func fileWriteRun(cmd *cobra.Command, args []string) error {
}
func fileAppendRun(cmd *cobra.Command, args []string) error {
ref, err := parseWaveFileURL(args[0])
path, err := fixRelativePaths(args[0])
if err != nil {
return err
}
fileData := wshrpc.FileData{
Info: &wshrpc.FileInfo{
Path: path}}
fullORef, err := resolveWaveFile(ref)
if err != nil {
return err
}
fileData := wshrpc.CommandFileData{
ZoneId: fullORef.OID,
FileName: ref.fileName,
}
info, err := ensureWaveFile(args[0], fileData)
info, err := ensureFile(path, fileData)
if err != nil {
return err
}
@ -344,7 +326,7 @@ func fileAppendRun(cmd *cobra.Command, args []string) error {
if buf.Len() >= 8192 { // 8KB batch size
fileData.Data64 = base64.StdEncoding.EncodeToString(buf.Bytes())
err = wshclient.FileAppendCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: fileTimeout})
err = wshclient.FileAppendCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: int64(fileTimeout)})
if err != nil {
return fmt.Errorf("appending to file: %w", err)
}
@ -355,7 +337,7 @@ func fileAppendRun(cmd *cobra.Command, args []string) error {
if buf.Len() > 0 {
fileData.Data64 = base64.StdEncoding.EncodeToString(buf.Bytes())
err = wshclient.FileAppendCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: fileTimeout})
err = wshclient.FileAppendCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: int64(fileTimeout)})
if err != nil {
return fmt.Errorf("appending to file: %w", err)
}
@ -395,114 +377,66 @@ func getTargetPath(src, dst string) (string, error) {
}
func fileCpRun(cmd *cobra.Command, args []string) error {
src, origDst := args[0], args[1]
dst, err := getTargetPath(src, origDst)
src, dst := args[0], args[1]
recursive, err := cmd.Flags().GetBool("recursive")
if err != nil {
return err
}
srcIsWave := strings.HasPrefix(src, WaveFilePrefix)
dstIsWave := strings.HasPrefix(dst, WaveFilePrefix)
if srcIsWave == dstIsWave {
return fmt.Errorf("exactly one file must be a wavefile:// URL")
merge, err := cmd.Flags().GetBool("merge")
if err != nil {
return err
}
if srcIsWave {
return copyFromWaveToLocal(src, dst)
} else {
return copyFromLocalToWave(src, dst)
}
}
func copyFromWaveToLocal(src, dst string) error {
ref, err := parseWaveFileURL(src)
force, err := cmd.Flags().GetBool("force")
if err != nil {
return err
}
fullORef, err := resolveWaveFile(ref)
srcPath, err := fixRelativePaths(src)
if err != nil {
return err
}
fileData := wshrpc.CommandFileData{
ZoneId: fullORef.OID,
FileName: ref.fileName,
}
// Get file info first to check existence and get size
info, err := wshclient.FileInfoCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: 2000})
err = convertNotFoundErr(err)
if err == fs.ErrNotExist {
return fmt.Errorf("%s: no such file", src)
return fmt.Errorf("unable to parse src path: %w", err)
}
destPath, err := fixRelativePaths(dst)
if err != nil {
return fmt.Errorf("getting file info: %w", err)
return fmt.Errorf("unable to parse dest path: %w", err)
}
// Create the destination file
f, err := os.Create(dst)
log.Printf("Copying %s to %s; recursive: %v, merge: %v, force: %v", srcPath, destPath, recursive, merge, force)
rpcOpts := &wshrpc.RpcOpts{Timeout: TimeoutYear}
err = wshclient.FileCopyCommand(RpcClient, wshrpc.CommandFileCopyData{SrcUri: srcPath, DestUri: destPath, Opts: &wshrpc.FileCopyOpts{Recursive: recursive, Merge: merge, Overwrite: force, Timeout: TimeoutYear}}, rpcOpts)
if err != nil {
return fmt.Errorf("creating local file: %w", err)
return fmt.Errorf("copying file: %w", err)
}
defer f.Close()
err = streamReadFromWaveFile(fileData, info.Size, f)
if err != nil {
return fmt.Errorf("reading wave file: %w", err)
}
return nil
}
func copyFromLocalToWave(src, dst string) error {
ref, err := parseWaveFileURL(dst)
func fileMvRun(cmd *cobra.Command, args []string) error {
src, dst := args[0], args[1]
recursive, err := cmd.Flags().GetBool("recursive")
if err != nil {
return err
}
force, err := cmd.Flags().GetBool("force")
if err != nil {
return err
}
fullORef, err := resolveWaveFile(ref)
srcPath, err := fixRelativePaths(src)
if err != nil {
return err
}
// stat local file
stat, err := os.Stat(src)
if err == fs.ErrNotExist {
return fmt.Errorf("%s: no such file", src)
return fmt.Errorf("unable to parse src path: %w", err)
}
destPath, err := fixRelativePaths(dst)
if err != nil {
return fmt.Errorf("stat local file: %w", err)
return fmt.Errorf("unable to parse dest path: %w", err)
}
if stat.IsDir() {
return fmt.Errorf("%s: is a directory", src)
}
fileData := wshrpc.CommandFileData{
ZoneId: fullORef.OID,
FileName: ref.fileName,
}
_, err = ensureWaveFile(dst, fileData)
log.Printf("Moving %s to %s; recursive: %v, force: %v", srcPath, destPath, recursive, force)
rpcOpts := &wshrpc.RpcOpts{Timeout: TimeoutYear}
err = wshclient.FileMoveCommand(RpcClient, wshrpc.CommandFileCopyData{SrcUri: srcPath, DestUri: destPath, Opts: &wshrpc.FileCopyOpts{Recursive: recursive, Overwrite: force, Timeout: TimeoutYear}}, rpcOpts)
if err != nil {
return err
return fmt.Errorf("moving file: %w", err)
}
file, err := os.Open(src)
if err != nil {
return fmt.Errorf("opening local file: %w", err)
}
defer file.Close()
err = streamWriteToWaveFile(fileData, file)
if err != nil {
return fmt.Errorf("writing wave file: %w", err)
}
return nil
}
func filePrintColumns(filesChan <-chan fileListResult) error {
func filePrintColumns(filesChan <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]) error {
width := 80 // default if we can't get terminal
if w, _, err := term.GetSize(int(os.Stdout.Fd())); err == nil {
width = w
@ -513,37 +447,35 @@ func filePrintColumns(filesChan <-chan fileListResult) error {
numCols = 1
}
return colprint.PrintColumns(
return colprint.PrintColumnsArray(
filesChan,
numCols,
100, // sample size
func(f fileListResult) (string, error) {
if f.err != nil {
return "", f.err
func(respUnion wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]) ([]string, error) {
if respUnion.Error != nil {
return []string{}, respUnion.Error
}
return f.info.Name, nil
strs := make([]string, len(respUnion.Response.FileInfo))
for i, f := range respUnion.Response.FileInfo {
strs[i] = f.Name
}
return strs, nil
},
os.Stdout,
)
}
func filePrintLong(filesChan <-chan fileListResult) error {
func filePrintLong(filesChan <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]) error {
// Sample first 100 files to determine name width
maxNameLen := 0
var samples []*wshrpc.WaveFileInfo
var samples []*wshrpc.FileInfo
for f := range filesChan {
if f.err != nil {
return f.err
}
samples = append(samples, f.info)
if len(f.info.Name) > maxNameLen {
maxNameLen = len(f.info.Name)
}
if len(samples) >= 100 {
break
for respUnion := range filesChan {
if respUnion.Error != nil {
return respUnion.Error
}
resp := respUnion.Response
samples = append(samples, resp.FileInfo...)
}
// Use sampled width, but cap it at 60 chars to prevent excessive width
@ -552,31 +484,37 @@ func filePrintLong(filesChan <-chan fileListResult) error {
nameWidth = 60
}
writer := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
// Print samples
for _, f := range samples {
name := f.Name
t := time.Unix(f.ModTs/1000, 0)
t := time.Unix(f.ModTime/1000, 0)
timestamp := utilfn.FormatLsTime(t)
if f.Size == 0 && strings.HasSuffix(name, "/") {
fmt.Fprintf(os.Stdout, "%-*s %8s %s\n", nameWidth, name, "-", timestamp)
fmt.Fprintf(writer, "%-*s\t%8s\t%s\n", nameWidth, name, "-", timestamp)
} else {
fmt.Fprintf(os.Stdout, "%-*s %8d %s\n", nameWidth, name, f.Size, timestamp)
fmt.Fprintf(writer, "%-*s\t%8d\t%s\n", nameWidth, name, f.Size, timestamp)
}
}
// Continue with remaining files
for f := range filesChan {
if f.err != nil {
return f.err
for respUnion := range filesChan {
if respUnion.Error != nil {
return respUnion.Error
}
name := f.info.Name
timestamp := time.Unix(f.info.ModTs/1000, 0).Format("Jan 02 15:04")
if f.info.Size == 0 && strings.HasSuffix(name, "/") {
fmt.Fprintf(os.Stdout, "%-*s %8s %s\n", nameWidth, name, "-", timestamp)
} else {
fmt.Fprintf(os.Stdout, "%-*s %8d %s\n", nameWidth, name, f.info.Size, timestamp)
for _, f := range respUnion.Response.FileInfo {
name := f.Name
t := time.Unix(f.ModTime/1000, 0)
timestamp := utilfn.FormatLsTime(t)
if f.Size == 0 && strings.HasSuffix(name, "/") {
fmt.Fprintf(writer, "%-*s\t%8s\t%s\n", nameWidth, name, "-", timestamp)
} else {
fmt.Fprintf(writer, "%-*s\t%8d\t%s\n", nameWidth, name, f.Size, timestamp)
}
}
}
writer.Flush()
return nil
}
@ -585,7 +523,6 @@ func fileListRun(cmd *cobra.Command, args []string) error {
recursive, _ := cmd.Flags().GetBool("recursive")
longForm, _ := cmd.Flags().GetBool("long")
onePerLine, _ := cmd.Flags().GetBool("one")
filesOnly, _ := cmd.Flags().GetBool("files")
// Check if we're in a pipe
stat, _ := os.Stdout.Stat()
@ -594,38 +531,36 @@ func fileListRun(cmd *cobra.Command, args []string) error {
onePerLine = true
}
// Default to listing everything if no path specified
if len(args) == 0 {
args = append(args, "wavefile://client/")
args = []string{"."}
}
ref, err := parseWaveFileURL(args[0])
if err != nil {
return err
}
fullORef, err := resolveWaveFile(ref)
if err != nil {
return err
}
filesChan, err := streamFileList(fullORef.OID, ref.fileName, recursive, filesOnly)
path, err := fixRelativePaths(args[0])
if err != nil {
return err
}
filesChan := wshclient.FileListStreamCommand(RpcClient, wshrpc.FileListData{Path: path, Opts: &wshrpc.FileListOpts{All: recursive}}, &wshrpc.RpcOpts{Timeout: 2000})
// Drain the channel when done
defer func() {
for range filesChan {
}
}()
if longForm {
return filePrintLong(filesChan)
}
if onePerLine {
for f := range filesChan {
if f.err != nil {
return f.err
for respUnion := range filesChan {
if respUnion.Error != nil {
log.Printf("error: %v", respUnion.Error)
return respUnion.Error
}
fmt.Fprintln(os.Stdout, f.info.Name)
for _, f := range respUnion.Response.FileInfo {
fmt.Fprintln(os.Stdout, f.Name)
}
return nil
}
return nil
}
return filePrintColumns(filesChan)

View File

@ -11,6 +11,7 @@ import (
"github.com/spf13/cobra"
"github.com/wavetermdev/waveterm/pkg/util/envutil"
"github.com/wavetermdev/waveterm/pkg/util/wavefileutil"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
)
@ -112,12 +113,11 @@ func getVarRun(cmd *cobra.Command, args []string) error {
}
func getAllVariables(zoneId string) error {
fileData := wshrpc.CommandFileData{
ZoneId: zoneId,
FileName: getVarFileName,
}
fileData := wshrpc.FileData{
Info: &wshrpc.FileInfo{
Path: fmt.Sprintf(wavefileutil.WaveFilePathPattern, zoneId, getVarFileName)}}
envStr64, err := wshclient.FileReadCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: 2000})
data, err := wshclient.FileReadCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: 2000})
err = convertNotFoundErr(err)
if err == fs.ErrNotExist {
return nil
@ -125,7 +125,7 @@ func getAllVariables(zoneId string) error {
if err != nil {
return fmt.Errorf("reading variables: %w", err)
}
envBytes, err := base64.StdEncoding.DecodeString(envStr64)
envBytes, err := base64.StdEncoding.DecodeString(data.Data64)
if err != nil {
return fmt.Errorf("decoding variables: %w", err)
}

View File

@ -5,8 +5,10 @@ package cmd
import (
"encoding/base64"
"fmt"
"github.com/spf13/cobra"
"github.com/wavetermdev/waveterm/pkg/util/wavefileutil"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
)
@ -29,12 +31,12 @@ func runReadFile(cmd *cobra.Command, args []string) {
WriteStderr("[error] %v\n", err)
return
}
resp64, err := wshclient.FileReadCommand(RpcClient, wshrpc.CommandFileData{ZoneId: fullORef.OID, FileName: args[0]}, &wshrpc.RpcOpts{Timeout: 5000})
data, err := wshclient.FileReadCommand(RpcClient, wshrpc.FileData{Info: &wshrpc.FileInfo{Path: fmt.Sprintf(wavefileutil.WaveFilePathPattern, fullORef.OID, args[0])}}, &wshrpc.RpcOpts{Timeout: 5000})
if err != nil {
WriteStderr("[error] reading file: %v\n", err)
return
}
resp, err := base64.StdEncoding.DecodeString(resp64)
resp, err := base64.StdEncoding.DecodeString(data.Data64)
if err != nil {
WriteStderr("[error] decoding file: %v\n", err)
return

View File

@ -12,7 +12,7 @@ import (
"strings"
"github.com/spf13/cobra"
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
"github.com/wavetermdev/waveterm/pkg/util/fileutil"
"github.com/wavetermdev/waveterm/pkg/wavebase"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
@ -131,7 +131,7 @@ func setBgRun(cmd *cobra.Command, args []string) (rtnErr error) {
return fmt.Errorf("path is a directory, not an image file")
}
mimeType := utilfn.DetectMimeType(absPath, fileInfo, true)
mimeType := fileutil.DetectMimeType(absPath, fileInfo, true)
switch mimeType {
case "image/jpeg", "image/png", "image/gif", "image/webp", "image/svg+xml":
// Valid image type

View File

@ -8,6 +8,7 @@ import (
"github.com/spf13/cobra"
"github.com/wavetermdev/waveterm/pkg/waveobj"
"github.com/wavetermdev/waveterm/pkg/wconfig"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
)
@ -41,7 +42,7 @@ func sshRun(cmd *cobra.Command, args []string) (rtnErr error) {
connOpts := wshrpc.ConnRequest{
Host: sshArg,
LogBlockId: blockId,
Keywords: wshrpc.ConnKeywords{
Keywords: wconfig.ConnKeywords{
SshIdentityFile: identityFiles,
},
}

View File

@ -1,7 +1,7 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
import { FileService } from "@/app/store/services";
import { RpcApi } from "@/app/store/wshclientapi";
import { adaptFromElectronKeyEvent } from "@/util/keyutil";
import { Rectangle, shell, WebContentsView } from "electron";
import { getWaveWindowById } from "emain/emain-window";
@ -9,6 +9,7 @@ import path from "path";
import { configureAuthKeyRequestInjection } from "./authkey";
import { setWasActive } from "./emain-activity";
import { handleCtrlShiftFocus, handleCtrlShiftState, shFrameNavHandler, shNavHandler } from "./emain-util";
import { ElectronWshClient } from "./emain-wsh";
import { getElectronAppBasePath, isDevVite } from "./platform";
function computeBgColor(fullConfig: FullConfigType): string {
@ -200,7 +201,7 @@ export async function getOrCreateWebViewForTab(waveWindowId: string, tabId: stri
if (tabView) {
return [tabView, true];
}
const fullConfig = await FileService.GetFullConfig();
const fullConfig = await RpcApi.GetFullConfigCommand(ElectronWshClient);
tabView = getSpareTab(fullConfig);
tabView.waveWindowId = waveWindowId;
tabView.lastUsedTs = Date.now();

View File

@ -1,7 +1,8 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
import { ClientService, FileService, ObjectService, WindowService, WorkspaceService } from "@/app/store/services";
import { ClientService, ObjectService, WindowService, WorkspaceService } from "@/app/store/services";
import { RpcApi } from "@/app/store/wshclientapi";
import { fireAndForget } from "@/util/util";
import { BaseWindow, BaseWindowConstructorOptions, dialog, globalShortcut, ipcMain, screen } from "electron";
import path from "path";
@ -15,6 +16,7 @@ import {
} from "./emain-activity";
import { getOrCreateWebViewForTab, getWaveTabViewByWebContentsId, WaveTabView } from "./emain-tabview";
import { delay, ensureBoundsAreVisible, waveKeyToElectronKey } from "./emain-util";
import { ElectronWshClient } from "./emain-wsh";
import { log } from "./log";
import { getElectronAppBasePath, unamePlatform } from "./platform";
import { updater } from "./updater";
@ -255,7 +257,7 @@ export class WaveBrowserWindow extends BaseWindow {
e.preventDefault();
fireAndForget(async () => {
const numWindows = waveWindowMap.size;
const fullConfig = await FileService.GetFullConfig();
const fullConfig = await RpcApi.GetFullConfigCommand(ElectronWshClient);
if (numWindows > 1 || !fullConfig.settings["window:savelastwindow"]) {
console.log("numWindows > 1 or user does not want last window saved", numWindows);
if (fullConfig.settings["window:confirmclose"]) {
@ -621,7 +623,7 @@ export async function createWindowForWorkspace(workspaceId: string) {
if (!newWin) {
console.log("error creating new window", this.waveWindowId);
}
const newBwin = await createBrowserWindow(newWin, await FileService.GetFullConfig(), {
const newBwin = await createBrowserWindow(newWin, await RpcApi.GetFullConfigCommand(ElectronWshClient), {
unamePlatform,
});
newBwin.show();
@ -743,7 +745,7 @@ ipcMain.on("delete-workspace", (event, workspaceId) => {
export async function createNewWaveWindow() {
log("createNewWaveWindow");
const clientData = await ClientService.GetClientData();
const fullConfig = await FileService.GetFullConfig();
const fullConfig = await RpcApi.GetFullConfigCommand(ElectronWshClient);
let recreatedWindow = false;
const allWindows = getAllWaveWindows();
if (allWindows.length === 0 && clientData?.windowids?.length >= 1) {
@ -780,7 +782,7 @@ export async function relaunchBrowserWindows() {
setGlobalIsRelaunching(false);
const clientData = await ClientService.GetClientData();
const fullConfig = await FileService.GetFullConfig();
const fullConfig = await RpcApi.GetFullConfigCommand(ElectronWshClient);
const wins: WaveBrowserWindow[] = [];
for (const windowId of clientData.windowids.slice().reverse()) {
const windowData: WaveWindow = await WindowService.GetWindow(windowId);

View File

@ -1,7 +1,8 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
import { FileService, WindowService } from "@/app/store/services";
import { WindowService } from "@/app/store/services";
import { RpcApi } from "@/app/store/wshclientapi";
import { Notification } from "electron";
import { getResolvedUpdateChannel } from "emain/updater";
import { RpcResponseHelper, WshClient } from "../frontend/app/store/wshclient";
@ -44,7 +45,7 @@ export class ElectronWshClientType extends WshClient {
async handle_focuswindow(rh: RpcResponseHelper, windowId: string) {
console.log(`focuswindow ${windowId}`);
const fullConfig = await FileService.GetFullConfig();
const fullConfig = await RpcApi.GetFullConfigCommand(ElectronWshClient);
let ww = getWaveWindowById(windowId);
if (ww == null) {
const window = await WindowService.GetWindow(windowId);

View File

@ -14,7 +14,7 @@ import * as services from "../frontend/app/store/services";
import { initElectronWshrpc, shutdownWshrpc } from "../frontend/app/store/wshrpcutil";
import { getWebServerEndpoint } from "../frontend/util/endpoints";
import * as keyutil from "../frontend/util/keyutil";
import { fireAndForget } from "../frontend/util/util";
import { fireAndForget, sleep } from "../frontend/util/util";
import { AuthKey, configureAuthKeyRequestInjection } from "./authkey";
import { initDocsite } from "./docsite";
import {
@ -95,7 +95,7 @@ function handleWSEvent(evtMsg: WSEventType) {
if (windowData == null) {
return;
}
const fullConfig = await services.FileService.GetFullConfig();
const fullConfig = await RpcApi.GetFullConfigCommand(ElectronWshClient);
const newWin = await createBrowserWindow(windowData, fullConfig, { unamePlatform });
newWin.show();
} else if (evtMsg.eventtype == "electron:closewindow") {
@ -316,7 +316,7 @@ if (unamePlatform !== "darwin") {
electron.ipcMain.on("update-window-controls-overlay", async (event, rect: Dimensions) => {
// Bail out if the user requests the native titlebar
const fullConfig = await services.FileService.GetFullConfig();
const fullConfig = await RpcApi.GetFullConfigCommand(ElectronWshClient);
if (fullConfig.settings["window:nativetitlebar"]) return;
const zoomFactor = event.sender.getZoomFactor();
@ -595,18 +595,21 @@ async function appMain() {
console.log("wavesrv ready signal received", ready, Date.now() - startTs, "ms");
await electronApp.whenReady();
configureAuthKeyRequestInjection(electron.session.defaultSession);
const fullConfig = await services.FileService.GetFullConfig();
checkIfRunningUnderARM64Translation(fullConfig);
ensureHotSpareTab(fullConfig);
await relaunchBrowserWindows();
await initDocsite();
setTimeout(runActiveTimer, 5000); // start active timer, wait 5s just to be safe
await sleep(10); // wait a bit for wavesrv to be ready
try {
initElectronWshClient();
initElectronWshrpc(ElectronWshClient, { authKey: AuthKey });
} catch (e) {
console.log("error initializing wshrpc", e);
}
const fullConfig = await RpcApi.GetFullConfigCommand(ElectronWshClient);
checkIfRunningUnderARM64Translation(fullConfig);
ensureHotSpareTab(fullConfig);
await relaunchBrowserWindows();
await initDocsite();
setTimeout(runActiveTimer, 5000); // start active timer, wait 5s just to be safe
makeAppMenu();
await configureAutoUpdater();
setGlobalIsStarting(false);

View File

@ -6,7 +6,6 @@ import { autoUpdater } from "electron-updater";
import { readFileSync } from "fs";
import path from "path";
import YAML from "yaml";
import { FileService } from "../frontend/app/store/services";
import { RpcApi } from "../frontend/app/store/wshclientapi";
import { isDev } from "../frontend/util/isdev";
import { fireAndForget } from "../frontend/util/util";
@ -240,7 +239,7 @@ export async function configureAutoUpdater() {
try {
console.log("Configuring updater");
const settings = (await FileService.GetFullConfig()).settings;
const settings = (await RpcApi.GetFullConfigCommand(ElectronWshClient)).settings;
updater = new Updater(settings);
await updater.start();
} catch (e) {

View File

@ -47,46 +47,6 @@ class ClientServiceType {
export const ClientService = new ClientServiceType();
// fileservice.FileService (file)
class FileServiceType {
// delete file
DeleteFile(connection: string, path: string): Promise<void> {
return WOS.callBackendService("file", "DeleteFile", Array.from(arguments))
}
GetFullConfig(): Promise<FullConfigType> {
return WOS.callBackendService("file", "GetFullConfig", Array.from(arguments))
}
GetWaveFile(arg1: string, arg2: string): Promise<any> {
return WOS.callBackendService("file", "GetWaveFile", Array.from(arguments))
}
Mkdir(arg1: string, arg2: string): Promise<void> {
return WOS.callBackendService("file", "Mkdir", Array.from(arguments))
}
// read file
ReadFile(connection: string, path: string): Promise<FullFile> {
return WOS.callBackendService("file", "ReadFile", Array.from(arguments))
}
Rename(arg1: string, arg2: string, arg3: string): Promise<void> {
return WOS.callBackendService("file", "Rename", Array.from(arguments))
}
// save file
SaveFile(connection: string, path: string, data64: string): Promise<void> {
return WOS.callBackendService("file", "SaveFile", Array.from(arguments))
}
// get file info
StatFile(connection: string, path: string): Promise<FileInfo> {
return WOS.callBackendService("file", "StatFile", Array.from(arguments))
}
TouchFile(arg1: string, arg2: string): Promise<void> {
return WOS.callBackendService("file", "TouchFile", Array.from(arguments))
}
}
export const FileService = new FileServiceType();
// objectservice.ObjectService (object)
class ObjectServiceType {
// @returns blockId (and object updates)

View File

@ -148,7 +148,7 @@ class RpcApiType {
}
// command "fileappend" [call]
FileAppendCommand(client: WshClient, data: CommandFileData, opts?: RpcOpts): Promise<void> {
FileAppendCommand(client: WshClient, data: FileData, opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("fileappend", data, opts);
}
@ -157,33 +157,58 @@ class RpcApiType {
return client.wshRpcCall("fileappendijson", data, opts);
}
// command "filecopy" [call]
FileCopyCommand(client: WshClient, data: CommandFileCopyData, opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("filecopy", data, opts);
}
// command "filecreate" [call]
FileCreateCommand(client: WshClient, data: CommandFileCreateData, opts?: RpcOpts): Promise<void> {
FileCreateCommand(client: WshClient, data: FileData, opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("filecreate", data, opts);
}
// command "filedelete" [call]
FileDeleteCommand(client: WshClient, data: CommandFileData, opts?: RpcOpts): Promise<void> {
FileDeleteCommand(client: WshClient, data: FileData, opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("filedelete", data, opts);
}
// command "fileinfo" [call]
FileInfoCommand(client: WshClient, data: CommandFileData, opts?: RpcOpts): Promise<WaveFileInfo> {
FileInfoCommand(client: WshClient, data: FileData, opts?: RpcOpts): Promise<FileInfo> {
return client.wshRpcCall("fileinfo", data, opts);
}
// command "filelist" [call]
FileListCommand(client: WshClient, data: CommandFileListData, opts?: RpcOpts): Promise<WaveFileInfo[]> {
FileListCommand(client: WshClient, data: FileListData, opts?: RpcOpts): Promise<FileInfo[]> {
return client.wshRpcCall("filelist", data, opts);
}
// command "fileliststream" [responsestream]
FileListStreamCommand(client: WshClient, data: FileListData, opts?: RpcOpts): AsyncGenerator<CommandRemoteListEntriesRtnData, void, boolean> {
return client.wshRpcStream("fileliststream", data, opts);
}
// command "filemkdir" [call]
FileMkdirCommand(client: WshClient, data: FileData, opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("filemkdir", data, opts);
}
// command "filemove" [call]
FileMoveCommand(client: WshClient, data: CommandFileCopyData, opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("filemove", data, opts);
}
// command "fileread" [call]
FileReadCommand(client: WshClient, data: CommandFileData, opts?: RpcOpts): Promise<string> {
FileReadCommand(client: WshClient, data: FileData, opts?: RpcOpts): Promise<FileData> {
return client.wshRpcCall("fileread", data, opts);
}
// command "filestreamtar" [responsestream]
FileStreamTarCommand(client: WshClient, data: CommandRemoteStreamTarData, opts?: RpcOpts): AsyncGenerator<string, void, boolean> {
return client.wshRpcStream("filestreamtar", data, opts);
}
// command "filewrite" [call]
FileWriteCommand(client: WshClient, data: CommandFileData, opts?: RpcOpts): Promise<void> {
FileWriteCommand(client: WshClient, data: FileData, opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("filewrite", data, opts);
}
@ -192,6 +217,11 @@ class RpcApiType {
return client.wshRpcCall("focuswindow", data, opts);
}
// command "getfullconfig" [call]
GetFullConfigCommand(client: WshClient, opts?: RpcOpts): Promise<FullConfigType> {
return client.wshRpcCall("getfullconfig", null, opts);
}
// command "getmeta" [call]
GetMetaCommand(client: WshClient, data: CommandGetMetaData, opts?: RpcOpts): Promise<MetaType> {
return client.wshRpcCall("getmeta", data, opts);
@ -222,6 +252,11 @@ class RpcApiType {
return client.wshRpcCall("path", data, opts);
}
// command "remotefilecopy" [call]
RemoteFileCopyCommand(client: WshClient, data: CommandRemoteFileCopyData, opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("remotefilecopy", data, opts);
}
// command "remotefiledelete" [call]
RemoteFileDeleteCommand(client: WshClient, data: string, opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("remotefiledelete", data, opts);
@ -237,9 +272,9 @@ class RpcApiType {
return client.wshRpcCall("remotefilejoin", data, opts);
}
// command "remotefilerename" [call]
RemoteFileRenameCommand(client: WshClient, data: string[], opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("remotefilerename", data, opts);
// command "remotefilemove" [call]
RemoteFileMoveCommand(client: WshClient, data: CommandRemoteFileCopyData, opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("remotefilemove", data, opts);
}
// command "remotefiletouch" [call]
@ -257,6 +292,11 @@ class RpcApiType {
return client.wshRpcCall("remoteinstallrcfiles", null, opts);
}
// command "remotelistentries" [responsestream]
RemoteListEntriesCommand(client: WshClient, data: CommandRemoteListEntriesData, opts?: RpcOpts): AsyncGenerator<CommandRemoteListEntriesRtnData, void, boolean> {
return client.wshRpcStream("remotelistentries", data, opts);
}
// command "remotemkdir" [call]
RemoteMkdirCommand(client: WshClient, data: string, opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("remotemkdir", data, opts);
@ -268,12 +308,17 @@ class RpcApiType {
}
// command "remotestreamfile" [responsestream]
RemoteStreamFileCommand(client: WshClient, data: CommandRemoteStreamFileData, opts?: RpcOpts): AsyncGenerator<CommandRemoteStreamFileRtnData, void, boolean> {
RemoteStreamFileCommand(client: WshClient, data: CommandRemoteStreamFileData, opts?: RpcOpts): AsyncGenerator<FileData, void, boolean> {
return client.wshRpcStream("remotestreamfile", data, opts);
}
// command "remotetarstream" [responsestream]
RemoteTarStreamCommand(client: WshClient, data: CommandRemoteStreamTarData, opts?: RpcOpts): AsyncGenerator<string, void, boolean> {
return client.wshRpcStream("remotetarstream", data, opts);
}
// command "remotewritefile" [call]
RemoteWriteFileCommand(client: WshClient, data: CommandRemoteWriteFileData, opts?: RpcOpts): Promise<void> {
RemoteWriteFileCommand(client: WshClient, data: FileData, opts?: RpcOpts): Promise<void> {
return client.wshRpcCall("remotewritefile", data, opts);
}

View File

@ -4,11 +4,12 @@
import { Button } from "@/app/element/button";
import { Input } from "@/app/element/input";
import { ContextMenuModel } from "@/app/store/contextmenu";
import { PLATFORM, atoms, createBlock, getApi, globalStore } from "@/app/store/global";
import { FileService } from "@/app/store/services";
import { PLATFORM, atoms, createBlock, getApi } from "@/app/store/global";
import { RpcApi } from "@/app/store/wshclientapi";
import { TabRpcClient } from "@/app/store/wshrpcutil";
import type { PreviewModel } from "@/app/view/preview/preview";
import { checkKeyPressed, isCharacterKeyEvent } from "@/util/keyutil";
import { base64ToString, fireAndForget, isBlank } from "@/util/util";
import { fireAndForget, isBlank } from "@/util/util";
import { offset, useDismiss, useFloating, useInteractions } from "@floating-ui/react";
import {
Column,
@ -67,7 +68,7 @@ const displaySuffixes = {
};
function getBestUnit(bytes: number, si: boolean = false, sigfig: number = 3): string {
if (bytes < 0) {
if (bytes === undefined || bytes < 0) {
return "-";
}
const units = si ? ["kB", "MB", "GB", "TB"] : ["KiB", "MiB", "GiB", "TiB"];
@ -290,11 +291,17 @@ function DirectoryTable({
onSave: (newName: string) => {
let newPath: string;
if (newName !== fileName) {
newPath = path.replace(fileName, newName);
const lastInstance = path.lastIndexOf(fileName);
newPath = path.substring(0, lastInstance) + newName;
console.log(`replacing ${fileName} with ${newName}: ${path}`);
fireAndForget(async () => {
const connection = await globalStore.get(model.connection);
await FileService.Rename(connection, path, newPath);
await RpcApi.FileMoveCommand(TabRpcClient, {
srcuri: await model.formatRemoteUri(path),
desturi: await model.formatRemoteUri(newPath),
opts: {
recursive: true,
},
});
model.refreshCallback();
});
}
@ -603,7 +610,7 @@ function TableBody({
meta: {
controller: "shell",
view: "term",
"cmd:cwd": finfo.path,
"cmd:cwd": await model.formatRemoteUri(finfo.path),
},
};
await createBlock(termBlockDef);
@ -618,7 +625,11 @@ function TableBody({
label: "Delete",
click: () => {
fireAndForget(async () => {
await FileService.DeleteFile(conn, finfo.path).catch((e) => console.log(e));
await RpcApi.FileDeleteCommand(TabRpcClient, {
info: {
path: await model.formatRemoteUri(finfo.path),
},
}).catch((e) => console.log(e));
setRefreshVersion((current) => current + 1);
});
},
@ -711,22 +722,28 @@ function DirectoryPreview({ model }: DirectoryPreviewProps) {
useEffect(() => {
const getContent = async () => {
const file = await FileService.ReadFile(conn, dirPath);
const serializedContent = base64ToString(file?.data64);
const content: FileInfo[] = JSON.parse(serializedContent);
setUnfilteredData(content);
const file = await RpcApi.FileReadCommand(
TabRpcClient,
{
info: {
path: await model.formatRemoteUri(dirPath),
},
},
null
);
setUnfilteredData(file.entries);
};
getContent();
}, [conn, dirPath, refreshVersion]);
useEffect(() => {
const filtered = unfilteredData.filter((fileInfo) => {
const filtered = unfilteredData?.filter((fileInfo) => {
if (!showHiddenFiles && fileInfo.name.startsWith(".") && fileInfo.name != "..") {
return false;
}
return fileInfo.name.toLowerCase().includes(searchText);
});
setFilteredData(filtered);
setFilteredData(filtered ?? []);
}, [unfilteredData, showHiddenFiles, searchText]);
useEffect(() => {
@ -765,7 +782,6 @@ function DirectoryPreview({ model }: DirectoryPreviewProps) {
!blockData?.meta?.connection
) {
getApi().onQuicklook(selectedPath);
console.log(selectedPath);
return true;
}
if (isCharacterKeyEvent(waveEvent)) {
@ -805,8 +821,15 @@ function DirectoryPreview({ model }: DirectoryPreviewProps) {
onSave: (newName: string) => {
console.log(`newFile: ${newName}`);
fireAndForget(async () => {
const connection = await globalStore.get(model.connection);
await FileService.TouchFile(connection, `${dirPath}/${newName}`);
await RpcApi.FileCreateCommand(
TabRpcClient,
{
info: {
path: await model.formatRemoteUri(`${dirPath}/${newName}`),
},
},
null
);
model.refreshCallback();
});
setEntryManagerProps(undefined);
@ -819,8 +842,11 @@ function DirectoryPreview({ model }: DirectoryPreviewProps) {
onSave: (newName: string) => {
console.log(`newDirectory: ${newName}`);
fireAndForget(async () => {
const connection = await globalStore.get(model.connection);
await FileService.Mkdir(connection, `${dirPath}/${newName}`);
await RpcApi.FileMkdirCommand(TabRpcClient, {
info: {
path: await model.formatRemoteUri(`${dirPath}/${newName}`),
},
});
model.refreshCallback();
});
setEntryManagerProps(undefined);

View File

@ -139,7 +139,7 @@ export class PreviewModel implements ViewModel {
loadableFileInfo: Atom<Loadable<FileInfo>>;
connection: Atom<Promise<string>>;
statFile: Atom<Promise<FileInfo>>;
fullFile: Atom<Promise<FullFile>>;
fullFile: Atom<Promise<FileData>>;
fileMimeType: Atom<Promise<string>>;
fileMimeTypeLoadable: Atom<Loadable<string>>;
fileContentSaved: PrimitiveAtom<string | null>;
@ -368,8 +368,12 @@ export class PreviewModel implements ViewModel {
if (fileName == null) {
return null;
}
const conn = (await get(this.connection)) ?? "";
const statFile = await services.FileService.StatFile(conn, fileName);
const statFile = await RpcApi.FileInfoCommand(TabRpcClient, {
info: {
path: await this.formatRemoteUri(fileName),
},
});
console.log("stat file", statFile);
return statFile;
});
this.fileMimeType = atom<Promise<string>>(async (get) => {
@ -380,13 +384,17 @@ export class PreviewModel implements ViewModel {
this.newFileContent = atom(null) as PrimitiveAtom<string | null>;
this.goParentDirectory = this.goParentDirectory.bind(this);
const fullFileAtom = atom<Promise<FullFile>>(async (get) => {
const fullFileAtom = atom<Promise<FileData>>(async (get) => {
const fileName = get(this.metaFilePath);
if (fileName == null) {
return null;
}
const conn = (await get(this.connection)) ?? "";
const file = await services.FileService.ReadFile(conn, fileName);
const file = await RpcApi.FileReadCommand(TabRpcClient, {
info: {
path: await this.formatRemoteUri(fileName),
},
});
console.log("full file", file);
return file;
});
@ -405,7 +413,7 @@ export class PreviewModel implements ViewModel {
const fullFile = await get(fullFileAtom);
return base64ToString(fullFile?.data64);
},
(get, set, update: string) => {
(_, set, update: string) => {
set(this.fileContentSaved, update);
}
);
@ -438,7 +446,6 @@ export class PreviewModel implements ViewModel {
const connErr = getFn(this.connectionError);
const editMode = getFn(this.editMode);
const parentFileInfo = await this.getParentInfo(fileInfo);
console.log(parentFileInfo);
if (connErr != "") {
return { errorStr: `Connection Error: ${connErr}` };
@ -515,6 +522,7 @@ export class PreviewModel implements ViewModel {
const parentFileInfo = await RpcApi.RemoteFileJoinCommand(TabRpcClient, [fileInfo.path, ".."], {
route: makeConnRoute(conn),
});
console.log("parent file info", parentFileInfo);
return parentFileInfo;
} catch {
return undefined;
@ -541,7 +549,6 @@ export class PreviewModel implements ViewModel {
this.goParentDirectory({ fileInfo: newFileInfo });
return;
}
console.log(newFileInfo.path);
this.updateOpenFileModalAndError(false);
await this.goHistory(newFileInfo.path);
refocusNode(this.blockId);
@ -591,9 +598,13 @@ export class PreviewModel implements ViewModel {
console.log("not saving file, newFileContent is null");
return;
}
const conn = (await globalStore.get(this.connection)) ?? "";
try {
await services.FileService.SaveFile(conn, filePath, stringToBase64(newFileContent));
await RpcApi.FileWriteCommand(TabRpcClient, {
info: {
path: await this.formatRemoteUri(filePath),
},
data64: stringToBase64(newFileContent),
});
globalStore.set(this.fileContent, newFileContent);
globalStore.set(this.newFileContent, null);
console.log("saved file", filePath);
@ -765,6 +776,11 @@ export class PreviewModel implements ViewModel {
}
return false;
}
async formatRemoteUri(path: string): Promise<string> {
const conn = (await globalStore.get(this.connection)) ?? "local";
return `wsh://${conn}/${path}`;
}
}
function makePreviewModel(blockId: string, nodeModel: BlockNodeModel): PreviewModel {

View File

@ -74,7 +74,7 @@ declare global {
tabid: string;
workspaceid: string;
block: Block;
files: WaveFile[];
files: FileInfo[];
};
// webcmd.BlockInputWSCommand
@ -178,35 +178,11 @@ declare global {
maxitems: number;
};
// wshrpc.CommandFileCreateData
type CommandFileCreateData = {
zoneid: string;
filename: string;
meta?: {[key: string]: any};
opts?: FileOptsType;
};
// wshrpc.CommandFileData
type CommandFileData = {
zoneid: string;
filename: string;
data64?: string;
at?: CommandFileDataAt;
};
// wshrpc.CommandFileDataAt
type CommandFileDataAt = {
offset: number;
size?: number;
};
// wshrpc.CommandFileListData
type CommandFileListData = {
zoneid: string;
prefix?: string;
all?: boolean;
offset?: number;
limit?: number;
// wshrpc.CommandFileCopyData
type CommandFileCopyData = {
srcuri: string;
desturi: string;
opts?: FileCopyOpts;
};
// wshrpc.CommandGetMetaData
@ -220,23 +196,34 @@ declare global {
message: string;
};
// wshrpc.CommandRemoteFileCopyData
type CommandRemoteFileCopyData = {
srcuri: string;
desturi: string;
opts?: FileCopyOpts;
};
// wshrpc.CommandRemoteListEntriesData
type CommandRemoteListEntriesData = {
path: string;
opts?: FileListOpts;
};
// wshrpc.CommandRemoteListEntriesRtnData
type CommandRemoteListEntriesRtnData = {
fileinfo?: FileInfo[];
};
// wshrpc.CommandRemoteStreamFileData
type CommandRemoteStreamFileData = {
path: string;
byterange?: string;
};
// wshrpc.CommandRemoteStreamFileRtnData
type CommandRemoteStreamFileRtnData = {
fileinfo?: FileInfo[];
data64?: string;
};
// wshrpc.CommandRemoteWriteFileData
type CommandRemoteWriteFileData = {
// wshrpc.CommandRemoteStreamTarData
type CommandRemoteStreamTarData = {
path: string;
data64: string;
createmode?: number;
opts?: FileCopyOpts;
};
// wshrpc.CommandResolveIdsData
@ -305,10 +292,11 @@ declare global {
logblockid?: string;
};
// wshrpc.ConnKeywords
// wconfig.ConnKeywords
type ConnKeywords = {
"conn:wshenabled"?: boolean;
"conn:askbeforewshinstall"?: boolean;
"conn:overrideconfig"?: boolean;
"conn:wshpath"?: string;
"conn:shellpath"?: string;
"conn:ignoresshconfig"?: boolean;
@ -322,7 +310,6 @@ declare global {
"ssh:hostname"?: string;
"ssh:port"?: string;
"ssh:identityfile"?: string[];
"ssh:identitiesonly"?: boolean;
"ssh:batchmode"?: boolean;
"ssh:pubkeyauthentication"?: boolean;
"ssh:passwordauthentication"?: boolean;
@ -330,6 +317,7 @@ declare global {
"ssh:preferredauthentications"?: string[];
"ssh:addkeystoagent"?: boolean;
"ssh:identityagent"?: string;
"ssh:identitiesonly"?: boolean;
"ssh:proxyjump"?: string[];
"ssh:userknownhostsfile"?: string[];
"ssh:globalknownhostsfile"?: string[];
@ -372,6 +360,28 @@ declare global {
height: number;
};
// wshrpc.FileCopyOpts
type FileCopyOpts = {
overwrite?: boolean;
recursive?: boolean;
merge?: boolean;
timeout?: number;
};
// wshrpc.FileData
type FileData = {
info?: FileInfo;
data64?: string;
entries?: FileInfo[];
at?: FileDataAt;
};
// wshrpc.FileDataAt
type FileDataAt = {
offset: number;
size?: number;
};
// waveobj.FileDef
type FileDef = {
content?: string;
@ -381,24 +391,42 @@ declare global {
// wshrpc.FileInfo
type FileInfo = {
path: string;
dir: string;
name: string;
dir?: string;
name?: string;
notfound?: boolean;
size: number;
mode: number;
modestr: string;
modtime: number;
opts?: FileOpts;
size?: number;
meta?: {[key: string]: any};
mode?: number;
modestr?: string;
modtime?: number;
isdir?: boolean;
supportsmkdir?: boolean;
mimetype?: string;
readonly?: boolean;
};
// filestore.FileOptsType
type FileOptsType = {
// wshrpc.FileListData
type FileListData = {
path: string;
opts?: FileListOpts;
};
// wshrpc.FileListOpts
type FileListOpts = {
all?: boolean;
offset?: number;
limit?: number;
};
// wshrpc.FileOpts
type FileOpts = {
maxsize?: number;
circular?: boolean;
ijson?: boolean;
ijsonbudget?: number;
truncate?: boolean;
append?: boolean;
};
// wconfig.FullConfigType
@ -413,12 +441,6 @@ declare global {
configerrors: ConfigError[];
};
// fileservice.FullFile
type FullFile = {
info: FileInfo;
data64: string;
};
// waveobj.LayoutActionData
type LayoutActionData = {
actiontype: string;
@ -1069,25 +1091,13 @@ declare global {
type WaveFile = {
zoneid: string;
name: string;
opts: FileOptsType;
opts: FileOpts;
createdts: number;
size: number;
modts: number;
meta: {[key: string]: any};
};
// wshrpc.WaveFileInfo
type WaveFileInfo = {
zoneid: string;
name: string;
opts?: FileOptsType;
size?: number;
createdts?: number;
modts?: number;
meta?: {[key: string]: any};
isdir?: boolean;
};
// wshrpc.WaveInfoData
type WaveInfoData = {
version: string;

View File

@ -302,6 +302,10 @@ function makeConnRoute(conn: string): string {
return "conn:" + conn;
}
function sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}
export {
atomWithDebounce,
atomWithThrottle,
@ -321,6 +325,7 @@ export {
makeConnRoute,
makeExternLink,
makeIconClass,
sleep,
stringToBase64,
useAtomValueSafe,
};

View File

@ -9,7 +9,6 @@ import {
registerGlobalKeys,
} from "@/app/store/keymodel";
import { modalsModel } from "@/app/store/modalmodel";
import { FileService } from "@/app/store/services";
import { RpcApi } from "@/app/store/wshclientapi";
import { initWshrpc, TabRpcClient } from "@/app/store/wshrpcutil";
import { loadMonaco } from "@/app/view/codeeditor/codeeditor";
@ -185,7 +184,7 @@ async function initWave(initOpts: WaveInitOpts) {
registerElectronReinjectKeyHandler();
registerControlShiftStateUpdateHandler();
setTimeout(loadMonaco, 30);
const fullConfig = await FileService.GetFullConfig();
const fullConfig = await RpcApi.GetFullConfigCommand(TabRpcClient);
console.log("fullconfig", fullConfig);
globalStore.set(atoms.fullConfigAtom, fullConfig);
console.log("Wave First Render");

19
go.mod
View File

@ -4,6 +4,10 @@ go 1.23.4
require (
github.com/alexflint/go-filemutex v1.3.0
github.com/aws/aws-sdk-go-v2 v1.33.0
github.com/aws/aws-sdk-go-v2/config v1.29.1
github.com/aws/aws-sdk-go-v2/service/s3 v1.73.2
github.com/aws/smithy-go v1.22.1
github.com/creack/pty v1.1.21
github.com/fsnotify/fsnotify v1.8.0
github.com/golang-jwt/jwt/v5 v5.2.1
@ -30,6 +34,7 @@ require (
golang.org/x/sys v0.29.0
golang.org/x/term v0.28.0
google.golang.org/api v0.217.0
gopkg.in/ini.v1 v1.67.0
)
require (
@ -39,6 +44,20 @@ require (
cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect
cloud.google.com/go/compute/metadata v0.6.0 // indirect
cloud.google.com/go/longrunning v0.5.7 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.54 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.24 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.28 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.28 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.28 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.9 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.24.11 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.10 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.9 // indirect
github.com/ebitengine/purego v0.8.1 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-logr/logr v1.4.2 // indirect

38
go.sum
View File

@ -16,6 +16,42 @@ github.com/0xrawsec/golang-utils v1.3.2 h1:ww4jrtHRSnX9xrGzJYbalx5nXoZewy4zPxiY+
github.com/0xrawsec/golang-utils v1.3.2/go.mod h1:m7AzHXgdSAkFCD9tWWsApxNVxMlyy7anpPVOyT/yM7E=
github.com/alexflint/go-filemutex v1.3.0 h1:LgE+nTUWnQCyRKbpoceKZsPQbs84LivvgwUymZXdOcM=
github.com/alexflint/go-filemutex v1.3.0/go.mod h1:U0+VA/i30mGBlLCrFPGtTe9y6wGQfNAWPBTekHQ+c8A=
github.com/aws/aws-sdk-go-v2 v1.33.0 h1:Evgm4DI9imD81V0WwD+TN4DCwjUMdc94TrduMLbgZJs=
github.com/aws/aws-sdk-go-v2 v1.33.0/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc=
github.com/aws/aws-sdk-go-v2/config v1.29.1 h1:JZhGawAyZ/EuJeBtbQYnaoftczcb2drR2Iq36Wgz4sQ=
github.com/aws/aws-sdk-go-v2/config v1.29.1/go.mod h1:7bR2YD5euaxBhzt2y/oDkt3uNRb6tjFp98GlTFueRwk=
github.com/aws/aws-sdk-go-v2/credentials v1.17.54 h1:4UmqeOqJPvdvASZWrKlhzpRahAulBfyTJQUaYy4+hEI=
github.com/aws/aws-sdk-go-v2/credentials v1.17.54/go.mod h1:RTdfo0P0hbbTxIhmQrOsC/PquBZGabEPnCaxxKRPSnI=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.24 h1:5grmdTdMsovn9kPZPI23Hhvp0ZyNm5cRO+IZFIYiAfw=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.24/go.mod h1:zqi7TVKTswH3Ozq28PkmBmgzG1tona7mo9G2IJg4Cis=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.28 h1:igORFSiH3bfq4lxKFkTSYDhJEUCYo6C8VKiWJjYwQuQ=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.28/go.mod h1:3So8EA/aAYm36L7XIvCVwLa0s5N0P7o2b1oqnx/2R4g=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.28 h1:1mOW9zAUMhTSrMDssEHS/ajx8JcAj/IcftzcmNlmVLI=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.28/go.mod h1:kGlXVIWDfvt2Ox5zEaNglmq0hXPHgQFNMix33Tw22jA=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.28 h1:7kpeALOUeThs2kEjlAxlADAVfxKmkYAedlpZ3kdoSJ4=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.28/go.mod h1:pyaOYEdp1MJWgtXLy6q80r3DhsVdOIOZNB9hdTcJIvI=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.2 h1:e6um6+DWYQP1XCa+E9YVtG/9v1qk5lyAOelMOVwSyO8=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.2/go.mod h1:dIW8puxSbYLSPv/ju0d9A3CpwXdtqvJtYKDMVmPLOWE=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9 h1:TQmKDyETFGiXVhZfQ/I0cCFziqqX58pi4tKJGYGFSz0=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9/go.mod h1:HVLPK2iHQBUx7HfZeOQSEu3v2ubZaAY2YPbAm5/WUyY=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.9 h1:2aInXbh02XsbO0KobPGMNXyv2QP73VDKsWPNJARj/+4=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.9/go.mod h1:dgXS1i+HgWnYkPXqNoPIPKeUsUUYHaUbThC90aDnNiE=
github.com/aws/aws-sdk-go-v2/service/s3 v1.73.2 h1:F3h8VYq9ZLBXYurmwrT8W0SPhgCcU0q+0WZJfT1dFt0=
github.com/aws/aws-sdk-go-v2/service/s3 v1.73.2/go.mod h1:jGJ/v7FIi7Ys9t54tmEFnrxuaWeJLpwNgKp2DXAVhOU=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.11 h1:kuIyu4fTT38Kj7YCC7ouNbVZSSpqkZ+LzIfhCr6Dg+I=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.11/go.mod h1:Ro744S4fKiCCuZECXgOi760TiYylUM8ZBf6OGiZzJtY=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.10 h1:l+dgv/64iVlQ3WsBbnn+JSbkj01jIi+SM0wYsj3y/hY=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.10/go.mod h1:Fzsj6lZEb8AkTE5S68OhcbBqeWPsR8RnGuKPr8Todl8=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.9 h1:BRVDbewN6VZcwr+FBOszDKvYeXY1kJ+GGMCcpghlw0U=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.9/go.mod h1:f6vjfZER1M17Fokn0IzssOTMT2N8ZSq+7jnNF0tArvw=
github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro=
github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@ -174,6 +210,8 @@ google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7
google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU=
google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=

View File

@ -321,7 +321,7 @@ func (bc *BlockController) setupAndStartShellProcess(logCtx context.Context, rc
// create a circular blockfile for the output
ctx, cancelFn := context.WithTimeout(context.Background(), 2*time.Second)
defer cancelFn()
fsErr := filestore.WFS.MakeFile(ctx, bc.BlockId, BlockFile_Term, nil, filestore.FileOptsType{MaxSize: DefaultTermMaxFileSize, Circular: true})
fsErr := filestore.WFS.MakeFile(ctx, bc.BlockId, BlockFile_Term, nil, wshrpc.FileOpts{MaxSize: DefaultTermMaxFileSize, Circular: true})
if fsErr != nil && fsErr != fs.ErrExist {
return nil, fmt.Errorf("error creating blockfile: %w", fsErr)
}
@ -414,7 +414,7 @@ func (bc *BlockController) setupAndStartShellProcess(logCtx context.Context, rc
if err != nil {
return nil, err
}
conn := conncontroller.GetConn(credentialCtx, opts, &wshrpc.ConnKeywords{})
conn := conncontroller.GetConn(credentialCtx, opts, &wconfig.ConnKeywords{})
connStatus := conn.DeriveConnStatus()
if connStatus.Status != conncontroller.Status_Connected {
return nil, fmt.Errorf("not connected, cannot start shellproc")
@ -776,7 +776,7 @@ func CheckConnStatus(blockId string) error {
if err != nil {
return fmt.Errorf("error parsing connection name: %w", err)
}
conn := conncontroller.GetConn(context.Background(), opts, &wshrpc.ConnKeywords{})
conn := conncontroller.GetConn(context.Background(), opts, &wconfig.ConnKeywords{})
connStatus := conn.DeriveConnStatus()
if connStatus.Status != conncontroller.Status_Connected {
return fmt.Errorf("not connected: %s", connStatus.Status)

View File

@ -12,12 +12,14 @@ import (
"fmt"
"io/fs"
"log"
"math"
"sync"
"sync/atomic"
"time"
"github.com/wavetermdev/waveterm/pkg/ijson"
"github.com/wavetermdev/waveterm/pkg/panichandler"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
)
const (
@ -49,26 +51,17 @@ var WFS *FileStore = &FileStore{
Cache: make(map[cacheKey]*CacheEntry),
}
type FileOptsType struct {
MaxSize int64 `json:"maxsize,omitempty"`
Circular bool `json:"circular,omitempty"`
IJson bool `json:"ijson,omitempty"`
IJsonBudget int `json:"ijsonbudget,omitempty"`
}
type FileMeta = map[string]any
type WaveFile struct {
// these fields are static (not updated)
ZoneId string `json:"zoneid"`
Name string `json:"name"`
Opts FileOptsType `json:"opts"`
CreatedTs int64 `json:"createdts"`
ZoneId string `json:"zoneid"`
Name string `json:"name"`
Opts wshrpc.FileOpts `json:"opts"`
CreatedTs int64 `json:"createdts"`
// these fields are mutable
Size int64 `json:"size"`
ModTs int64 `json:"modts"`
Meta FileMeta `json:"meta"` // only top-level keys can be updated (lower levels are immutable)
Size int64 `json:"size"`
ModTs int64 `json:"modts"`
Meta wshrpc.FileMeta `json:"meta"` // only top-level keys can be updated (lower levels are immutable)
}
// for regular files this is just Size
@ -90,8 +83,8 @@ func (f WaveFile) DataStartIdx() int64 {
}
// this works because lower levels are immutable
func copyMeta(meta FileMeta) FileMeta {
newMeta := make(FileMeta)
func copyMeta(meta wshrpc.FileMeta) wshrpc.FileMeta {
newMeta := make(wshrpc.FileMeta)
for k, v := range meta {
newMeta[k] = v
}
@ -119,7 +112,7 @@ type FileData struct {
func (FileData) UseDBMap() {}
// synchronous (does not interact with the cache)
func (s *FileStore) MakeFile(ctx context.Context, zoneId string, name string, meta FileMeta, opts FileOptsType) error {
func (s *FileStore) MakeFile(ctx context.Context, zoneId string, name string, meta wshrpc.FileMeta, opts wshrpc.FileOpts) error {
if opts.MaxSize < 0 {
return fmt.Errorf("max size must be non-negative")
}
@ -210,7 +203,7 @@ func (s *FileStore) ListFiles(ctx context.Context, zoneId string) ([]*WaveFile,
return files, nil
}
func (s *FileStore) WriteMeta(ctx context.Context, zoneId string, name string, meta FileMeta, merge bool) error {
func (s *FileStore) WriteMeta(ctx context.Context, zoneId string, name string, meta wshrpc.FileMeta, merge bool) error {
return withLock(s, zoneId, name, func(entry *CacheEntry) error {
err := entry.loadFileIntoCache(ctx)
if err != nil {
@ -289,7 +282,7 @@ func (s *FileStore) AppendData(ctx context.Context, zoneId string, name string,
func metaIncrement(file *WaveFile, key string, amount int) int {
if file.Meta == nil {
file.Meta = make(FileMeta)
file.Meta = make(wshrpc.FileMeta)
}
val, ok := file.Meta[key].(int)
if !ok {
@ -375,6 +368,9 @@ func (s *FileStore) GetAllZoneIds(ctx context.Context) ([]string, error) {
// returns (offset, data, error)
// we return the offset because the offset may have been adjusted if the size was too big (for circular files)
func (s *FileStore) ReadAt(ctx context.Context, zoneId string, name string, offset int64, size int64) (rtnOffset int64, rtnData []byte, rtnErr error) {
if size < 0 || size > math.MaxInt {
return 0, nil, fmt.Errorf("size must be non-negative and less than MaxInt")
}
withLock(s, zoneId, name, func(entry *CacheEntry) error {
rtnOffset, rtnData, rtnErr = entry.readAt(ctx, offset, size, false)
return nil

View File

@ -18,6 +18,7 @@ import (
"github.com/google/uuid"
"github.com/wavetermdev/waveterm/pkg/ijson"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
)
func initDb(t *testing.T) {
@ -82,7 +83,7 @@ func TestCreate(t *testing.T) {
ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
defer cancelFn()
zoneId := uuid.NewString()
err := WFS.MakeFile(ctx, zoneId, "testfile", nil, FileOptsType{})
err := WFS.MakeFile(ctx, zoneId, "testfile", nil, wshrpc.FileOpts{})
if err != nil {
t.Fatalf("error creating file: %v", err)
}
@ -156,7 +157,7 @@ func TestDelete(t *testing.T) {
ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
defer cancelFn()
zoneId := uuid.NewString()
err := WFS.MakeFile(ctx, zoneId, "testfile", nil, FileOptsType{})
err := WFS.MakeFile(ctx, zoneId, "testfile", nil, wshrpc.FileOpts{})
if err != nil {
t.Fatalf("error creating file: %v", err)
}
@ -170,11 +171,11 @@ func TestDelete(t *testing.T) {
}
// create two files in same zone, use DeleteZone to delete
err = WFS.MakeFile(ctx, zoneId, "testfile1", nil, FileOptsType{})
err = WFS.MakeFile(ctx, zoneId, "testfile1", nil, wshrpc.FileOpts{})
if err != nil {
t.Fatalf("error creating file: %v", err)
}
err = WFS.MakeFile(ctx, zoneId, "testfile2", nil, FileOptsType{})
err = WFS.MakeFile(ctx, zoneId, "testfile2", nil, wshrpc.FileOpts{})
if err != nil {
t.Fatalf("error creating file: %v", err)
}
@ -219,7 +220,7 @@ func TestSetMeta(t *testing.T) {
ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
defer cancelFn()
zoneId := uuid.NewString()
err := WFS.MakeFile(ctx, zoneId, "testfile", nil, FileOptsType{})
err := WFS.MakeFile(ctx, zoneId, "testfile", nil, wshrpc.FileOpts{})
if err != nil {
t.Fatalf("error creating file: %v", err)
}
@ -323,7 +324,7 @@ func TestAppend(t *testing.T) {
defer cancelFn()
zoneId := uuid.NewString()
fileName := "t2"
err := WFS.MakeFile(ctx, zoneId, fileName, nil, FileOptsType{})
err := WFS.MakeFile(ctx, zoneId, fileName, nil, wshrpc.FileOpts{})
if err != nil {
t.Fatalf("error creating file: %v", err)
}
@ -351,7 +352,7 @@ func TestWriteFile(t *testing.T) {
defer cancelFn()
zoneId := uuid.NewString()
fileName := "t3"
err := WFS.MakeFile(ctx, zoneId, fileName, nil, FileOptsType{})
err := WFS.MakeFile(ctx, zoneId, fileName, nil, wshrpc.FileOpts{})
if err != nil {
t.Fatalf("error creating file: %v", err)
}
@ -372,7 +373,7 @@ func TestWriteFile(t *testing.T) {
checkFileData(t, ctx, zoneId, fileName, "hello")
// circular file
err = WFS.MakeFile(ctx, zoneId, "c1", nil, FileOptsType{Circular: true, MaxSize: 50})
err = WFS.MakeFile(ctx, zoneId, "c1", nil, wshrpc.FileOpts{Circular: true, MaxSize: 50})
if err != nil {
t.Fatalf("error creating file: %v", err)
}
@ -394,7 +395,7 @@ func TestCircularWrites(t *testing.T) {
ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
defer cancelFn()
zoneId := uuid.NewString()
err := WFS.MakeFile(ctx, zoneId, "c1", nil, FileOptsType{Circular: true, MaxSize: 50})
err := WFS.MakeFile(ctx, zoneId, "c1", nil, wshrpc.FileOpts{Circular: true, MaxSize: 50})
if err != nil {
t.Fatalf("error creating file: %v", err)
}
@ -483,7 +484,7 @@ func TestMultiPart(t *testing.T) {
zoneId := uuid.NewString()
fileName := "m2"
data := makeText(80)
err := WFS.MakeFile(ctx, zoneId, fileName, nil, FileOptsType{})
err := WFS.MakeFile(ctx, zoneId, fileName, nil, wshrpc.FileOpts{})
if err != nil {
t.Fatalf("error creating file: %v", err)
}
@ -537,7 +538,7 @@ func TestComputePartMap(t *testing.T) {
testIntMapsEq(t, "map5", m, map[int]int{8: 80, 9: 100, 10: 100, 11: 60})
// now test circular
file = &WaveFile{Opts: FileOptsType{Circular: true, MaxSize: 1000}}
file = &WaveFile{Opts: wshrpc.FileOpts{Circular: true, MaxSize: 1000}}
m = file.computePartMap(10, 250)
testIntMapsEq(t, "map6", m, map[int]int{0: 90, 1: 100, 2: 60})
m = file.computePartMap(990, 40)
@ -558,7 +559,7 @@ func TestSimpleDBFlush(t *testing.T) {
defer cancelFn()
zoneId := uuid.NewString()
fileName := "t1"
err := WFS.MakeFile(ctx, zoneId, fileName, nil, FileOptsType{})
err := WFS.MakeFile(ctx, zoneId, fileName, nil, wshrpc.FileOpts{})
if err != nil {
t.Fatalf("error creating file: %v", err)
}
@ -590,7 +591,7 @@ func TestConcurrentAppend(t *testing.T) {
defer cancelFn()
zoneId := uuid.NewString()
fileName := "t1"
err := WFS.MakeFile(ctx, zoneId, fileName, nil, FileOptsType{})
err := WFS.MakeFile(ctx, zoneId, fileName, nil, wshrpc.FileOpts{})
if err != nil {
t.Fatalf("error creating file: %v", err)
}
@ -678,7 +679,7 @@ func TestIJson(t *testing.T) {
defer cancelFn()
zoneId := uuid.NewString()
fileName := "ij1"
err := WFS.MakeFile(ctx, zoneId, fileName, nil, FileOptsType{IJson: true})
err := WFS.MakeFile(ctx, zoneId, fileName, nil, wshrpc.FileOpts{IJson: true})
if err != nil {
t.Fatalf("error creating file: %v", err)
}

View File

@ -0,0 +1,148 @@
// Description: This package is used to create a connection to AWS services.
package awsconn
import (
"context"
"errors"
"fmt"
"log"
"os"
"regexp"
"strings"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go"
"github.com/wavetermdev/waveterm/pkg/waveobj"
"github.com/wavetermdev/waveterm/pkg/wconfig"
"gopkg.in/ini.v1"
)
const (
ProfileConfigKey = "profile:config"
ProfileCredentialsKey = "profile:credentials"
ProfilePrefix = "aws:"
TempFilePattern = "waveterm-awsconfig-%s"
)
var connectionRe = regexp.MustCompile(`^(.*):\w+:\/\/.*$`)
var tempfiles map[string]string = make(map[string]string)
func GetConfig(ctx context.Context, profile string) (*aws.Config, error) {
optfns := []func(*config.LoadOptions) error{}
// If profile is empty, use default config
if profile != "" {
connMatch := connectionRe.FindStringSubmatch(profile)
if connMatch == nil {
return nil, fmt.Errorf("invalid connection string: %s)", profile)
}
profile = connMatch[1]
log.Printf("GetConfig: profile=%s", profile)
profiles, cerrs := wconfig.ReadWaveHomeConfigFile(wconfig.ProfilesFile)
if len(cerrs) > 0 {
return nil, fmt.Errorf("error reading config file: %v", cerrs[0])
}
if profiles[profile] != nil {
configfilepath, _ := getTempFileFromConfig(profiles, ProfileConfigKey, profile)
credentialsfilepath, _ := getTempFileFromConfig(profiles, ProfileCredentialsKey, profile)
if configfilepath != "" {
log.Printf("configfilepath: %s", configfilepath)
optfns = append(optfns, config.WithSharedConfigFiles([]string{configfilepath}))
tempfiles[profile+"_config"] = configfilepath
}
if credentialsfilepath != "" {
log.Printf("credentialsfilepath: %s", credentialsfilepath)
optfns = append(optfns, config.WithSharedCredentialsFiles([]string{credentialsfilepath}))
tempfiles[profile+"_credentials"] = credentialsfilepath
}
}
trimmedProfile := strings.TrimPrefix(profile, ProfilePrefix)
optfns = append(optfns, config.WithSharedConfigProfile(trimmedProfile))
}
cfg, err := config.LoadDefaultConfig(ctx, optfns...)
if err != nil {
return nil, fmt.Errorf("error loading config: %v", err)
}
return &cfg, nil
}
func getTempFileFromConfig(config waveobj.MetaMapType, key string, profile string) (string, error) {
connectionconfig := config.GetMap(profile)
if connectionconfig[key] != "" {
awsConfig := connectionconfig.GetString(key, "")
if awsConfig != "" {
tempfile, err := os.CreateTemp("", fmt.Sprintf(TempFilePattern, profile))
if err != nil {
return "", fmt.Errorf("error creating temp file: %v", err)
}
_, err = tempfile.WriteString(awsConfig)
if err != nil {
return "", fmt.Errorf("error writing to temp file: %v", err)
}
return tempfile.Name(), nil
}
}
return "", nil
}
func ParseProfiles() map[string]struct{} {
profiles := make(map[string]struct{})
fname := config.DefaultSharedConfigFilename() // Get aws.config default shared configuration file name
f, err := ini.Load(fname) // Load ini file
if err != nil {
log.Printf("error reading aws config file: %v", err)
return nil
}
for _, v := range f.Sections() {
if len(v.Keys()) != 0 { // Get only the sections having Keys
parts := strings.Split(v.Name(), " ")
if len(parts) == 2 && parts[0] == "profile" { // skip default
profiles[ProfilePrefix+parts[1]] = struct{}{}
}
}
}
fname = config.DefaultSharedCredentialsFilename()
f, err = ini.Load(fname)
if err != nil {
log.Printf("error reading aws credentials file: %v", err)
if profiles == nil {
profiles = make(map[string]struct{})
}
return profiles
}
for _, v := range f.Sections() {
profiles[ProfilePrefix+v.Name()] = struct{}{}
}
return profiles
}
func ListBuckets(ctx context.Context, client *s3.Client) ([]types.Bucket, error) {
var err error
var output *s3.ListBucketsOutput
var buckets []types.Bucket
region := client.Options().Region
bucketPaginator := s3.NewListBucketsPaginator(client, &s3.ListBucketsInput{BucketRegion: &region})
for bucketPaginator.HasMorePages() {
output, err = bucketPaginator.NextPage(ctx)
log.Printf("output: %v", output)
if err != nil {
var apiErr smithy.APIError
if errors.As(err, &apiErr) && apiErr.ErrorCode() == "AccessDenied" {
fmt.Println("You don't have permission to list buckets for this account.")
err = apiErr
} else {
return nil, fmt.Errorf("Couldn't list buckets for your account. Here's why: %v\n", err)
}
break
}
if output == nil {
break
}
buckets = append(buckets, output.Buckets...)
}
return buckets, nil
}

View File

@ -528,7 +528,7 @@ func (conn *SSHConn) WaitForConnect(ctx context.Context) error {
}
// does not return an error since that error is stored inside of SSHConn
func (conn *SSHConn) Connect(ctx context.Context, connFlags *wshrpc.ConnKeywords) error {
func (conn *SSHConn) Connect(ctx context.Context, connFlags *wconfig.ConnKeywords) error {
blocklogger.Infof(ctx, "\n")
var connectAllowed bool
conn.WithLock(func() {
@ -698,11 +698,11 @@ func (conn *SSHConn) tryEnableWsh(ctx context.Context, clientDisplayName string)
}
}
func (conn *SSHConn) getConnectionConfig() (wshrpc.ConnKeywords, bool) {
func (conn *SSHConn) getConnectionConfig() (wconfig.ConnKeywords, bool) {
config := wconfig.GetWatcher().GetFullConfig()
connSettings, ok := config.Connections[conn.GetName()]
if !ok {
return wshrpc.ConnKeywords{}, false
return wconfig.ConnKeywords{}, false
}
return connSettings, true
}
@ -729,7 +729,7 @@ func (conn *SSHConn) persistWshInstalled(ctx context.Context, result WshCheckRes
}
// returns (connect-error)
func (conn *SSHConn) connectInternal(ctx context.Context, connFlags *wshrpc.ConnKeywords) error {
func (conn *SSHConn) connectInternal(ctx context.Context, connFlags *wconfig.ConnKeywords) error {
conn.Infof(ctx, "connectInternal %s\n", conn.GetName())
client, _, err := remote.ConnectToClient(ctx, conn.Opts, nil, 0, connFlags)
if err != nil {
@ -812,7 +812,7 @@ func getConnInternal(opts *remote.SSHOpts) *SSHConn {
}
// does NOT connect, can return nil if connection does not exist
func GetConn(ctx context.Context, opts *remote.SSHOpts, connFlags *wshrpc.ConnKeywords) *SSHConn {
func GetConn(ctx context.Context, opts *remote.SSHOpts, connFlags *wconfig.ConnKeywords) *SSHConn {
conn := getConnInternal(opts)
return conn
}
@ -826,7 +826,7 @@ func EnsureConnection(ctx context.Context, connName string) error {
if err != nil {
return fmt.Errorf("error parsing connection name: %w", err)
}
conn := GetConn(ctx, connOpts, &wshrpc.ConnKeywords{})
conn := GetConn(ctx, connOpts, &wconfig.ConnKeywords{})
if conn == nil {
return fmt.Errorf("connection not found: %s", connName)
}
@ -837,7 +837,7 @@ func EnsureConnection(ctx context.Context, connName string) error {
case Status_Connecting:
return conn.WaitForConnect(ctx)
case Status_Init, Status_Disconnected:
return conn.Connect(ctx, &wshrpc.ConnKeywords{})
return conn.Connect(ctx, &wconfig.ConnKeywords{})
case Status_Error:
return fmt.Errorf("connection error: %s", connStatus.Error)
default:

View File

@ -0,0 +1,143 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
package connparse
import (
"context"
"fmt"
"strings"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshutil"
)
const (
ConnectionTypeWsh = "wsh"
ConnectionTypeS3 = "s3"
ConnectionTypeWave = "wavefile"
ConnHostCurrent = "current"
ConnHostWaveSrv = "wavesrv"
)
type Connection struct {
Scheme string
Host string
Path string
}
func (c *Connection) GetSchemeParts() []string {
return strings.Split(c.Scheme, ":")
}
func (c *Connection) GetType() string {
lastInd := strings.LastIndex(c.Scheme, ":")
if lastInd == -1 {
return c.Scheme
}
return c.Scheme[lastInd+1:]
}
func (c *Connection) GetPathWithHost() string {
if c.Host == "" {
return ""
}
if strings.HasPrefix(c.Path, "/") {
return c.Host + c.Path
}
return c.Host + "/" + c.Path
}
func (c *Connection) GetFullURI() string {
return c.Scheme + "://" + c.GetPathWithHost()
}
func ParseURIAndReplaceCurrentHost(ctx context.Context, uri string) (*Connection, error) {
conn, err := ParseURI(uri)
if err != nil {
return nil, fmt.Errorf("error parsing connection: %v", err)
}
if conn.Host == ConnHostCurrent {
source, err := GetConnNameFromContext(ctx)
if err != nil {
return nil, fmt.Errorf("error getting connection name from context: %v", err)
}
// RPC context connection is empty for local connections
if source == "" {
source = wshrpc.LocalConnName
}
conn.Host = source
}
return conn, nil
}
func GetConnNameFromContext(ctx context.Context) (string, error) {
handler := wshutil.GetRpcResponseHandlerFromContext(ctx)
if handler == nil {
return "", fmt.Errorf("error getting rpc response handler from context")
}
return handler.GetRpcContext().Conn, nil
}
// ParseURI parses a connection URI and returns the connection type, host/path, and parameters.
func ParseURI(uri string) (*Connection, error) {
split := strings.SplitN(uri, "://", 2)
var scheme string
var rest string
if len(split) > 1 {
scheme = split[0]
rest = split[1]
} else {
rest = split[0]
}
var host string
var remotePath string
if scheme == "" {
scheme = ConnectionTypeWsh
if strings.HasPrefix(rest, "//") {
rest = strings.TrimPrefix(rest, "//")
split = strings.SplitN(rest, "/", 2)
if len(split) > 1 {
host = split[0]
remotePath = "/" + split[1]
} else {
host = split[0]
remotePath = "/"
}
} else if strings.HasPrefix(rest, "/~") {
host = wshrpc.LocalConnName
remotePath = rest
} else {
host = ConnHostCurrent
remotePath = rest
}
} else {
split = strings.SplitN(rest, "/", 2)
if len(split) > 1 {
host = split[0]
remotePath = "/" + split[1]
} else {
host = split[0]
remotePath = "/"
}
}
if scheme == ConnectionTypeWsh {
if host == "" {
host = wshrpc.LocalConnName
}
if strings.HasPrefix(remotePath, "/~") {
remotePath = strings.TrimPrefix(remotePath, "/")
}
}
conn := &Connection{
Scheme: scheme,
Host: host,
Path: remotePath,
}
return conn, nil
}

View File

@ -0,0 +1,262 @@
package connparse_test
import (
"testing"
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
)
func TestParseURI_WSHWithScheme(t *testing.T) {
t.Parallel()
// Test with localhost
cstr := "wsh://user@localhost:8080/path/to/file"
c, err := connparse.ParseURI(cstr)
if err != nil {
t.Fatalf("failed to parse URI: %v", err)
}
expected := "/path/to/file"
if c.Path != expected {
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
}
expected = "user@localhost:8080"
if c.Host != expected {
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
}
expected = "user@localhost:8080/path/to/file"
pathWithHost := c.GetPathWithHost()
if pathWithHost != expected {
t.Fatalf("expected path with host to be %q, got %q", expected, pathWithHost)
}
expected = "wsh"
if c.Scheme != expected {
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
}
if len(c.GetSchemeParts()) != 1 {
t.Fatalf("expected scheme parts to be 1, got %d", len(c.GetSchemeParts()))
}
// Test with an IP address
cstr = "wsh://user@192.168.0.1:22/path/to/file"
c, err = connparse.ParseURI(cstr)
if err != nil {
t.Fatalf("failed to parse URI: %v", err)
}
expected = "/path/to/file"
if c.Path != expected {
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
}
expected = "user@192.168.0.1:22"
if c.Host != expected {
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
}
expected = "user@192.168.0.1:22/path/to/file"
pathWithHost = c.GetPathWithHost()
if pathWithHost != expected {
t.Fatalf("expected path with host to be %q, got %q", expected, pathWithHost)
}
expected = "wsh"
if c.GetType() != expected {
t.Fatalf("expected conn type to be %q, got %q", expected, c.Scheme)
}
if len(c.GetSchemeParts()) != 1 {
t.Fatalf("expected scheme parts to be 1, got %d", len(c.GetSchemeParts()))
}
got := c.GetFullURI()
if got != cstr {
t.Fatalf("expected full URI to be %q, got %q", cstr, got)
}
}
func TestParseURI_WSHRemoteShorthand(t *testing.T) {
t.Parallel()
// Test with a simple remote path
cstr := "//conn/path/to/file"
c, err := connparse.ParseURI(cstr)
if err != nil {
t.Fatalf("failed to parse URI: %v", err)
}
expected := "/path/to/file"
if c.Path != expected {
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
}
if c.Host != "conn" {
t.Fatalf("expected host to be empty, got %q", c.Host)
}
expected = "wsh"
if c.Scheme != expected {
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
}
expected = "wsh://conn/path/to/file"
if c.GetFullURI() != expected {
t.Fatalf("expected full URI to be %q, got %q", expected, c.GetFullURI())
}
// Test with a complex remote path
cstr = "//user@localhost:8080/path/to/file"
c, err = connparse.ParseURI(cstr)
if err != nil {
t.Fatalf("failed to parse URI: %v", err)
}
expected = "/path/to/file"
if c.Path != expected {
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
}
expected = "user@localhost:8080"
if c.Host != expected {
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
}
expected = "wsh"
if c.Scheme != expected {
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
}
expected = "wsh://user@localhost:8080/path/to/file"
if c.GetFullURI() != expected {
t.Fatalf("expected full URI to be %q, got %q", expected, c.GetFullURI())
}
// Test with an IP address
cstr = "//user@192.168.0.1:8080/path/to/file"
c, err = connparse.ParseURI(cstr)
if err != nil {
t.Fatalf("failed to parse URI: %v", err)
}
expected = "/path/to/file"
if c.Path != expected {
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
}
expected = "user@192.168.0.1:8080"
if c.Host != expected {
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
}
expected = "wsh"
if c.Scheme != expected {
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
}
expected = "wsh://user@192.168.0.1:8080/path/to/file"
if c.GetFullURI() != expected {
t.Fatalf("expected full URI to be %q, got %q", expected, c.GetFullURI())
}
}
func TestParseURI_WSHCurrentPathShorthand(t *testing.T) {
t.Parallel()
// Test with a relative path to home
cstr := "~/path/to/file"
c, err := connparse.ParseURI(cstr)
if err != nil {
t.Fatalf("failed to parse URI: %v", err)
}
expected := "~/path/to/file"
if c.Path != expected {
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
}
expected = "current"
if c.Host != expected {
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
}
expected = "wsh"
if c.Scheme != expected {
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
}
expected = "wsh://current/~/path/to/file"
if c.GetFullURI() != expected {
t.Fatalf("expected full URI to be %q, got %q", expected, c.GetFullURI())
}
// Test with a absolute path
cstr = "/path/to/file"
c, err = connparse.ParseURI(cstr)
if err != nil {
t.Fatalf("expected nil, got %v", err)
}
expected = "/path/to/file"
if c.Path != expected {
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
}
expected = "current"
if c.Host != expected {
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
}
expected = "wsh"
if c.Scheme != expected {
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
}
expected = "wsh://current/path/to/file"
if c.GetFullURI() != expected {
t.Fatalf("expected full URI to be %q, got %q", expected, c.GetFullURI())
}
}
func TestParseURI_WSHLocalShorthand(t *testing.T) {
t.Parallel()
cstr := "/~/path/to/file"
c, err := connparse.ParseURI(cstr)
if err != nil {
t.Fatalf("failed to parse URI: %v", err)
}
expected := "~/path/to/file"
if c.Path != expected {
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
}
if c.Host != "local" {
t.Fatalf("expected host to be empty, got %q", c.Host)
}
expected = "wsh"
if c.Scheme != expected {
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
}
cstr = "wsh:///~/path/to/file"
c, err = connparse.ParseURI(cstr)
if err != nil {
t.Fatalf("failed to parse URI: %v", err)
}
expected = "~/path/to/file"
if c.Path != expected {
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
}
if c.Host != "local" {
t.Fatalf("expected host to be empty, got %q", c.Host)
}
expected = "wsh"
if c.Scheme != expected {
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
}
expected = "wsh://local/~/path/to/file"
if c.GetFullURI() != expected {
t.Fatalf("expected full URI to be %q, got %q", expected, c.GetFullURI())
}
}
func TestParseURI_BasicS3(t *testing.T) {
t.Parallel()
cstr := "profile:s3://bucket/path/to/file"
c, err := connparse.ParseURI(cstr)
if err != nil {
t.Fatalf("failed to parse URI: %v", err)
}
expected := "/path/to/file"
if c.Path != expected {
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
}
expected = "bucket"
if c.Host != expected {
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
}
expected = "bucket/path/to/file"
pathWithHost := c.GetPathWithHost()
if pathWithHost != expected {
t.Fatalf("expected path with host to be %q, got %q", expected, pathWithHost)
}
expected = "s3"
if c.GetType() != expected {
t.Fatalf("expected conn type to be %q, got %q", expected, c.GetType())
}
if len(c.GetSchemeParts()) != 2 {
t.Fatalf("expected scheme parts to be 2, got %d", len(c.GetSchemeParts()))
}
}

View File

@ -19,8 +19,11 @@ import (
"github.com/wavetermdev/waveterm/pkg/blocklogger"
"github.com/wavetermdev/waveterm/pkg/genconn"
"github.com/wavetermdev/waveterm/pkg/remote/awsconn"
"github.com/wavetermdev/waveterm/pkg/util/iterfn"
"github.com/wavetermdev/waveterm/pkg/util/shellutil"
"github.com/wavetermdev/waveterm/pkg/wavebase"
"github.com/wavetermdev/waveterm/pkg/wconfig"
"golang.org/x/crypto/ssh"
)
@ -187,3 +190,18 @@ func NormalizeConfigPattern(pattern string) string {
}
return fmt.Sprintf("%s%s%s", userName, pattern, port)
}
func ParseProfiles() []string {
connfile, cerrs := wconfig.ReadWaveHomeConfigFile(wconfig.ProfilesFile)
if len(cerrs) > 0 {
log.Printf("error reading config file: %v", cerrs[0])
return nil
}
awsProfiles := awsconn.ParseProfiles()
for profile := range awsProfiles {
connfile[profile] = struct{}{}
}
return iterfn.MapKeysToSorted(connfile)
}

View File

@ -0,0 +1,173 @@
package fileshare
import (
"context"
"fmt"
"log"
"github.com/wavetermdev/waveterm/pkg/remote/awsconn"
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fstype"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/s3fs"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/wavefs"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/wshfs"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshutil"
)
const (
ErrorParsingConnection = "error creating fileshare client, could not parse connection %s"
)
// CreateFileShareClient creates a fileshare client based on the connection string
// Returns the client and the parsed connection
func CreateFileShareClient(ctx context.Context, connection string) (fstype.FileShareClient, *connparse.Connection) {
log.Printf("CreateFileShareClient: connection=%s", connection)
conn, err := connparse.ParseURIAndReplaceCurrentHost(ctx, connection)
if err != nil {
log.Printf("error parsing connection: %v", err)
return nil, nil
}
conntype := conn.GetType()
log.Printf("CreateFileShareClient: conntype=%s", conntype)
if conntype == connparse.ConnectionTypeS3 {
config, err := awsconn.GetConfig(ctx, connection)
if err != nil {
log.Printf("error getting aws config: %v", err)
return nil, nil
}
return s3fs.NewS3Client(config), conn
} else if conntype == connparse.ConnectionTypeWave {
return wavefs.NewWaveClient(), conn
} else if conntype == connparse.ConnectionTypeWsh {
return wshfs.NewWshClient(), conn
} else {
log.Printf("unsupported connection type: %s", conntype)
return nil, nil
}
}
func Read(ctx context.Context, data wshrpc.FileData) (*wshrpc.FileData, error) {
log.Printf("Read: path=%s", data.Info.Path)
client, conn := CreateFileShareClient(ctx, data.Info.Path)
if conn == nil || client == nil {
return nil, fmt.Errorf(ErrorParsingConnection, data.Info.Path)
}
return client.Read(ctx, conn, data)
}
func ReadStream(ctx context.Context, data wshrpc.FileData) <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData] {
log.Printf("ReadStream: path=%s", data.Info.Path)
client, conn := CreateFileShareClient(ctx, data.Info.Path)
if conn == nil || client == nil {
return wshutil.SendErrCh[wshrpc.FileData](fmt.Errorf(ErrorParsingConnection, data.Info.Path))
}
return client.ReadStream(ctx, conn, data)
}
func ReadTarStream(ctx context.Context, data wshrpc.CommandRemoteStreamTarData) <-chan wshrpc.RespOrErrorUnion[[]byte] {
log.Printf("ReadTarStream: path=%s", data.Path)
client, conn := CreateFileShareClient(ctx, data.Path)
if conn == nil || client == nil {
return wshutil.SendErrCh[[]byte](fmt.Errorf(ErrorParsingConnection, data.Path))
}
return client.ReadTarStream(ctx, conn, data.Opts)
}
func ListEntries(ctx context.Context, path string, opts *wshrpc.FileListOpts) ([]*wshrpc.FileInfo, error) {
log.Printf("ListEntries: path=%s", path)
client, conn := CreateFileShareClient(ctx, path)
if conn == nil || client == nil {
return nil, fmt.Errorf(ErrorParsingConnection, path)
}
return client.ListEntries(ctx, conn, opts)
}
func ListEntriesStream(ctx context.Context, path string, opts *wshrpc.FileListOpts) <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
log.Printf("ListEntriesStream: path=%s", path)
client, conn := CreateFileShareClient(ctx, path)
if conn == nil || client == nil {
return wshutil.SendErrCh[wshrpc.CommandRemoteListEntriesRtnData](fmt.Errorf(ErrorParsingConnection, path))
}
return client.ListEntriesStream(ctx, conn, opts)
}
func Stat(ctx context.Context, path string) (*wshrpc.FileInfo, error) {
log.Printf("Stat: path=%s", path)
client, conn := CreateFileShareClient(ctx, path)
if conn == nil || client == nil {
return nil, fmt.Errorf(ErrorParsingConnection, path)
}
return client.Stat(ctx, conn)
}
func PutFile(ctx context.Context, data wshrpc.FileData) error {
log.Printf("PutFile: path=%s", data.Info.Path)
client, conn := CreateFileShareClient(ctx, data.Info.Path)
if conn == nil || client == nil {
return fmt.Errorf(ErrorParsingConnection, data.Info.Path)
}
return client.PutFile(ctx, conn, data)
}
func Mkdir(ctx context.Context, path string) error {
log.Printf("Mkdir: path=%s", path)
client, conn := CreateFileShareClient(ctx, path)
if conn == nil || client == nil {
return fmt.Errorf(ErrorParsingConnection, path)
}
return client.Mkdir(ctx, conn)
}
func Move(ctx context.Context, data wshrpc.CommandFileCopyData) error {
log.Printf("Move: src=%s, dest=%s", data.SrcUri, data.DestUri)
srcConn, err := connparse.ParseURIAndReplaceCurrentHost(ctx, data.SrcUri)
if err != nil {
return fmt.Errorf("error parsing source connection %s: %v", data.SrcUri, err)
}
destClient, destConn := CreateFileShareClient(ctx, data.DestUri)
if destConn == nil || destClient == nil {
return fmt.Errorf("error creating fileshare client, could not parse connection %s or %s", data.SrcUri, data.DestUri)
}
return destClient.Move(ctx, srcConn, destConn, data.Opts)
}
func Copy(ctx context.Context, data wshrpc.CommandFileCopyData) error {
log.Printf("Copy: src=%s, dest=%s", data.SrcUri, data.DestUri)
srcConn, err := connparse.ParseURIAndReplaceCurrentHost(ctx, data.SrcUri)
if err != nil {
return fmt.Errorf("error parsing source connection %s: %v", data.SrcUri, err)
}
destClient, destConn := CreateFileShareClient(ctx, data.DestUri)
if destConn == nil || destClient == nil {
return fmt.Errorf("error creating fileshare client, could not parse connection %s or %s", data.SrcUri, data.DestUri)
}
return destClient.Copy(ctx, srcConn, destConn, data.Opts)
}
func Delete(ctx context.Context, path string) error {
log.Printf("Delete: path=%s", path)
client, conn := CreateFileShareClient(ctx, path)
if conn == nil || client == nil {
return fmt.Errorf(ErrorParsingConnection, path)
}
return client.Delete(ctx, conn)
}
func Join(ctx context.Context, path string, parts ...string) (string, error) {
log.Printf("Join: path=%s, parts=%v", path, parts)
client, conn := CreateFileShareClient(ctx, path)
if conn == nil || client == nil {
return "", fmt.Errorf(ErrorParsingConnection, path)
}
return client.Join(ctx, conn, parts...)
}
func Append(ctx context.Context, data wshrpc.FileData) error {
log.Printf("Append: path=%s", data.Info.Path)
client, conn := CreateFileShareClient(ctx, data.Info.Path)
if conn == nil || client == nil {
return fmt.Errorf(ErrorParsingConnection, data.Info.Path)
}
return client.AppendFile(ctx, conn, data)
}

View File

@ -0,0 +1,42 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
package fstype
import (
"context"
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
)
type FileShareClient interface {
// Stat returns the file info at the given parsed connection path
Stat(ctx context.Context, conn *connparse.Connection) (*wshrpc.FileInfo, error)
// Read returns the file info at the given path, if it's a directory, then the list of entries
Read(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) (*wshrpc.FileData, error)
// ReadStream returns a stream of file data at the given path. If it's a directory, then the list of entries
ReadStream(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData]
// ReadTarStream returns a stream of tar data at the given path
ReadTarStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileCopyOpts) <-chan wshrpc.RespOrErrorUnion[[]byte]
// ListEntries returns the list of entries at the given path, or nothing if the path is a file
ListEntries(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) ([]*wshrpc.FileInfo, error)
// ListEntriesStream returns a stream of entries at the given path
ListEntriesStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]
// PutFile writes the given data to the file at the given path
PutFile(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) error
// AppendFile appends the given data to the file at the given path
AppendFile(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) error
// Mkdir creates a directory at the given path
Mkdir(ctx context.Context, conn *connparse.Connection) error
// Move moves the file from srcConn to destConn
Move(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error
// Copy copies the file from srcConn to destConn
Copy(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error
// Delete deletes the entry at the given path
Delete(ctx context.Context, conn *connparse.Connection) error
// Join joins the given parts to the connection path
Join(ctx context.Context, conn *connparse.Connection, parts ...string) (string, error)
// GetConnectionType returns the type of connection for the fileshare
GetConnectionType() string
}

View File

@ -0,0 +1,118 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
package s3fs
import (
"context"
"log"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/wavetermdev/waveterm/pkg/remote/awsconn"
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fstype"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshutil"
)
type S3Client struct {
client *s3.Client
}
var _ fstype.FileShareClient = S3Client{}
func NewS3Client(config *aws.Config) *S3Client {
return &S3Client{
client: s3.NewFromConfig(*config),
}
}
func (c S3Client) Read(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) (*wshrpc.FileData, error) {
return nil, nil
}
func (c S3Client) ReadStream(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData] {
return nil
}
func (c S3Client) ReadTarStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileCopyOpts) <-chan wshrpc.RespOrErrorUnion[[]byte] {
return nil
}
func (c S3Client) ListEntriesStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
ch := make(chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData], 16)
go func() {
defer close(ch)
list, err := c.ListEntries(ctx, conn, opts)
if err != nil {
ch <- wshutil.RespErr[wshrpc.CommandRemoteListEntriesRtnData](err)
return
}
if list == nil {
ch <- wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]{Response: wshrpc.CommandRemoteListEntriesRtnData{}}
return
}
for i := 0; i < len(list); i += wshrpc.DirChunkSize {
ch <- wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]{Response: wshrpc.CommandRemoteListEntriesRtnData{FileInfo: list[i:min(i+wshrpc.DirChunkSize, len(list))]}}
}
}()
return ch
}
func (c S3Client) ListEntries(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) ([]*wshrpc.FileInfo, error) {
if conn.Path == "" || conn.Path == "/" {
buckets, err := awsconn.ListBuckets(ctx, c.client)
if err != nil {
return nil, err
}
var entries []*wshrpc.FileInfo
for _, bucket := range buckets {
log.Printf("bucket: %v", *bucket.Name)
if bucket.Name != nil {
entries = append(entries, &wshrpc.FileInfo{
Path: *bucket.Name,
IsDir: true,
})
}
}
return entries, nil
}
return nil, nil
}
func (c S3Client) Stat(ctx context.Context, conn *connparse.Connection) (*wshrpc.FileInfo, error) {
return nil, nil
}
func (c S3Client) PutFile(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) error {
return nil
}
func (c S3Client) AppendFile(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) error {
return nil
}
func (c S3Client) Mkdir(ctx context.Context, conn *connparse.Connection) error {
return nil
}
func (c S3Client) Move(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error {
return nil
}
func (c S3Client) Copy(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error {
return nil
}
func (c S3Client) Delete(ctx context.Context, conn *connparse.Connection) error {
return nil
}
func (c S3Client) Join(ctx context.Context, conn *connparse.Connection, parts ...string) (string, error) {
return "", nil
}
func (c S3Client) GetConnectionType() string {
return connparse.ConnectionTypeS3
}

View File

@ -0,0 +1,419 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
package wavefs
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io/fs"
"path"
"strings"
"github.com/wavetermdev/waveterm/pkg/filestore"
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fstype"
"github.com/wavetermdev/waveterm/pkg/util/wavefileutil"
"github.com/wavetermdev/waveterm/pkg/waveobj"
"github.com/wavetermdev/waveterm/pkg/wps"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshutil"
)
type WaveClient struct{}
var _ fstype.FileShareClient = WaveClient{}
func NewWaveClient() *WaveClient {
return &WaveClient{}
}
func (c WaveClient) ReadStream(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData] {
ch := make(chan wshrpc.RespOrErrorUnion[wshrpc.FileData], 16)
go func() {
defer close(ch)
rtnData, err := c.Read(ctx, conn, data)
if err != nil {
ch <- wshutil.RespErr[wshrpc.FileData](err)
return
}
dataLen := len(rtnData.Data64)
if !rtnData.Info.IsDir {
for i := 0; i < dataLen; i += wshrpc.FileChunkSize {
if ctx.Err() != nil {
ch <- wshutil.RespErr[wshrpc.FileData](ctx.Err())
return
}
dataEnd := min(i+wshrpc.FileChunkSize, dataLen)
ch <- wshrpc.RespOrErrorUnion[wshrpc.FileData]{Response: wshrpc.FileData{Data64: rtnData.Data64[i:dataEnd], Info: rtnData.Info, At: &wshrpc.FileDataAt{Offset: int64(i), Size: dataEnd - i}}}
}
} else {
for i := 0; i < len(rtnData.Entries); i += wshrpc.DirChunkSize {
if ctx.Err() != nil {
ch <- wshutil.RespErr[wshrpc.FileData](ctx.Err())
return
}
ch <- wshrpc.RespOrErrorUnion[wshrpc.FileData]{Response: wshrpc.FileData{Entries: rtnData.Entries[i:min(i+wshrpc.DirChunkSize, len(rtnData.Entries))], Info: rtnData.Info}}
}
}
}()
return ch
}
func (c WaveClient) Read(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) (*wshrpc.FileData, error) {
zoneId := conn.Host
if zoneId == "" {
return nil, fmt.Errorf("zoneid not found in connection")
}
fileName, err := cleanPath(conn.Path)
if err != nil {
return nil, fmt.Errorf("error cleaning path: %w", err)
}
if data.At != nil {
_, dataBuf, err := filestore.WFS.ReadAt(ctx, zoneId, fileName, data.At.Offset, int64(data.At.Size))
if err == nil {
return &wshrpc.FileData{Info: data.Info, Data64: base64.StdEncoding.EncodeToString(dataBuf)}, nil
} else if errors.Is(err, fs.ErrNotExist) {
return nil, fmt.Errorf("NOTFOUND: %w", err)
} else {
return nil, fmt.Errorf("error reading blockfile: %w", err)
}
} else {
_, dataBuf, err := filestore.WFS.ReadFile(ctx, zoneId, fileName)
if err == nil {
return &wshrpc.FileData{Info: data.Info, Data64: base64.StdEncoding.EncodeToString(dataBuf)}, nil
} else if !errors.Is(err, fs.ErrNotExist) {
return nil, fmt.Errorf("error reading blockfile: %w", err)
}
}
list, err := c.ListEntries(ctx, conn, nil)
if err != nil {
return nil, fmt.Errorf("error listing blockfiles: %w", err)
}
return &wshrpc.FileData{Info: data.Info, Entries: list}, nil
}
func (c WaveClient) ReadTarStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileCopyOpts) <-chan wshrpc.RespOrErrorUnion[[]byte] {
return nil
}
func (c WaveClient) ListEntriesStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
ch := make(chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData], 16)
go func() {
defer close(ch)
list, err := c.ListEntries(ctx, conn, opts)
if err != nil {
ch <- wshutil.RespErr[wshrpc.CommandRemoteListEntriesRtnData](err)
return
}
for i := 0; i < len(list); i += wshrpc.DirChunkSize {
ch <- wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]{Response: wshrpc.CommandRemoteListEntriesRtnData{FileInfo: list[i:min(i+wshrpc.DirChunkSize, len(list))]}}
}
}()
return ch
}
func (c WaveClient) ListEntries(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) ([]*wshrpc.FileInfo, error) {
zoneId := conn.Host
if zoneId == "" {
return nil, fmt.Errorf("zoneid not found in connection")
}
prefix, err := cleanPath(conn.Path)
if err != nil {
return nil, fmt.Errorf("error cleaning path: %w", err)
}
fileListOrig, err := filestore.WFS.ListFiles(ctx, zoneId)
if err != nil {
return nil, fmt.Errorf("error listing blockfiles: %w", err)
}
var fileList []*wshrpc.FileInfo
for _, wf := range fileListOrig {
fileList = append(fileList, wavefileutil.WaveFileToFileInfo(wf))
}
if prefix != "" {
var filteredList []*wshrpc.FileInfo
for _, file := range fileList {
if strings.HasPrefix(file.Name, prefix) {
filteredList = append(filteredList, file)
}
}
fileList = filteredList
}
if !opts.All {
var filteredList []*wshrpc.FileInfo
dirMap := make(map[string]any) // the value is max modtime
for _, file := range fileList {
// if there is an extra "/" after the prefix, don't include it
// first strip the prefix
relPath := strings.TrimPrefix(file.Name, prefix)
// then check if there is a "/" after the prefix
if strings.Contains(relPath, "/") {
dirPath := strings.Split(relPath, "/")[0]
dirMap[dirPath] = struct{}{}
continue
}
filteredList = append(filteredList, file)
}
for dir := range dirMap {
dirName := prefix + dir + "/"
filteredList = append(filteredList, &wshrpc.FileInfo{
Path: fmt.Sprintf(wavefileutil.WaveFilePathPattern, zoneId, dirName),
Name: dirName,
Dir: dirName,
Size: 0,
IsDir: true,
SupportsMkdir: false,
})
}
fileList = filteredList
}
if opts.Offset > 0 {
if opts.Offset >= len(fileList) {
fileList = nil
} else {
fileList = fileList[opts.Offset:]
}
}
if opts.Limit > 0 {
if opts.Limit < len(fileList) {
fileList = fileList[:opts.Limit]
}
}
return fileList, nil
}
func (c WaveClient) Stat(ctx context.Context, conn *connparse.Connection) (*wshrpc.FileInfo, error) {
zoneId := conn.Host
if zoneId == "" {
return nil, fmt.Errorf("zoneid not found in connection")
}
fileName, err := cleanPath(conn.Path)
if err != nil {
return nil, fmt.Errorf("error cleaning path: %w", err)
}
fileInfo, err := filestore.WFS.Stat(ctx, zoneId, fileName)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, fmt.Errorf("NOTFOUND: %w", err)
}
return nil, fmt.Errorf("error getting file info: %w", err)
}
return wavefileutil.WaveFileToFileInfo(fileInfo), nil
}
func (c WaveClient) PutFile(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) error {
dataBuf, err := base64.StdEncoding.DecodeString(data.Data64)
if err != nil {
return fmt.Errorf("error decoding data64: %w", err)
}
zoneId := conn.Host
if zoneId == "" {
return fmt.Errorf("zoneid not found in connection")
}
fileName, err := cleanPath(conn.Path)
if err != nil {
return fmt.Errorf("error cleaning path: %w", err)
}
_, err = filestore.WFS.Stat(ctx, zoneId, fileName)
if err != nil {
if !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("error getting blockfile info: %w", err)
}
var opts wshrpc.FileOpts
var meta wshrpc.FileMeta
if data.Info != nil {
if data.Info.Opts != nil {
opts = *data.Info.Opts
}
if data.Info.Meta != nil {
meta = *data.Info.Meta
}
}
err := filestore.WFS.MakeFile(ctx, zoneId, fileName, meta, opts)
if err != nil {
return fmt.Errorf("error making blockfile: %w", err)
}
}
if data.At != nil && data.At.Offset >= 0 {
err = filestore.WFS.WriteAt(ctx, zoneId, fileName, data.At.Offset, dataBuf)
if errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("NOTFOUND: %w", err)
}
if err != nil {
return fmt.Errorf("error writing to blockfile: %w", err)
}
} else {
err = filestore.WFS.WriteFile(ctx, zoneId, fileName, dataBuf)
if errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("NOTFOUND: %w", err)
}
if err != nil {
return fmt.Errorf("error writing to blockfile: %w", err)
}
}
wps.Broker.Publish(wps.WaveEvent{
Event: wps.Event_BlockFile,
Scopes: []string{waveobj.MakeORef(waveobj.OType_Block, zoneId).String()},
Data: &wps.WSFileEventData{
ZoneId: zoneId,
FileName: fileName,
FileOp: wps.FileOp_Invalidate,
},
})
return nil
}
/*
path := data.Info.Path
log.Printf("Append: path=%s", path)
client, conn := CreateFileShareClient(ctx, path)
if conn == nil || client == nil {
return fmt.Errorf(ErrorParsingConnection, path)
}
finfo, err := client.Stat(ctx, conn)
if err != nil {
return err
}
if data.Info == nil {
data.Info = &wshrpc.FileInfo{}
}
oldInfo := data.Info
data.Info = finfo
if oldInfo.Opts != nil {
data.Info.Opts = oldInfo.Opts
}
data.At = &wshrpc.FileDataAt{
Offset: finfo.Size,
}
log.Printf("Append: offset=%d", data.At.Offset)
return client.PutFile(ctx, conn, data)
*/
func (c WaveClient) AppendFile(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) error {
dataBuf, err := base64.StdEncoding.DecodeString(data.Data64)
if err != nil {
return fmt.Errorf("error decoding data64: %w", err)
}
zoneId := conn.Host
if zoneId == "" {
return fmt.Errorf("zoneid not found in connection")
}
fileName, err := cleanPath(conn.Path)
if err != nil {
return fmt.Errorf("error cleaning path: %w", err)
}
_, err = filestore.WFS.Stat(ctx, zoneId, fileName)
if err != nil {
if !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("error getting blockfile info: %w", err)
}
var opts wshrpc.FileOpts
var meta wshrpc.FileMeta
if data.Info != nil {
if data.Info.Opts != nil {
opts = *data.Info.Opts
}
if data.Info.Meta != nil {
meta = *data.Info.Meta
}
}
err := filestore.WFS.MakeFile(ctx, zoneId, fileName, meta, opts)
if err != nil {
return fmt.Errorf("error making blockfile: %w", err)
}
}
err = filestore.WFS.AppendData(ctx, zoneId, fileName, dataBuf)
if errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("NOTFOUND: %w", err)
}
if err != nil {
return fmt.Errorf("error writing to blockfile: %w", err)
}
wps.Broker.Publish(wps.WaveEvent{
Event: wps.Event_BlockFile,
Scopes: []string{waveobj.MakeORef(waveobj.OType_Block, zoneId).String()},
Data: &wps.WSFileEventData{
ZoneId: zoneId,
FileName: fileName,
FileOp: wps.FileOp_Invalidate,
},
})
return nil
}
// WaveFile does not support directories, only prefix-based listing
func (c WaveClient) Mkdir(ctx context.Context, conn *connparse.Connection) error {
return nil
}
func (c WaveClient) Move(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error {
return nil
}
func (c WaveClient) Copy(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error {
return nil
}
func (c WaveClient) Delete(ctx context.Context, conn *connparse.Connection) error {
zoneId := conn.Host
if zoneId == "" {
return fmt.Errorf("zoneid not found in connection")
}
fileName, err := cleanPath(conn.Path)
if err != nil {
return fmt.Errorf("error cleaning path: %w", err)
}
err = filestore.WFS.DeleteFile(ctx, zoneId, fileName)
if err != nil {
return fmt.Errorf("error deleting blockfile: %w", err)
}
wps.Broker.Publish(wps.WaveEvent{
Event: wps.Event_BlockFile,
Scopes: []string{waveobj.MakeORef(waveobj.OType_Block, zoneId).String()},
Data: &wps.WSFileEventData{
ZoneId: zoneId,
FileName: fileName,
FileOp: wps.FileOp_Delete,
},
})
return nil
}
func (c WaveClient) Join(ctx context.Context, conn *connparse.Connection, parts ...string) (string, error) {
newPath := path.Join(append([]string{conn.Path}, parts...)...)
newPath, err := cleanPath(newPath)
if err != nil {
return "", fmt.Errorf("error cleaning path: %w", err)
}
return newPath, nil
}
func cleanPath(path string) (string, error) {
if path == "" {
return "", fmt.Errorf("path is empty")
}
if strings.HasPrefix(path, "/") {
path = path[1:]
}
if strings.HasPrefix(path, "~") || strings.HasPrefix(path, ".") || strings.HasPrefix(path, "..") {
return "", fmt.Errorf("wavefile path cannot start with ~, ., or ..")
}
var newParts []string
for _, part := range strings.Split(path, "/") {
if part == ".." {
if len(newParts) > 0 {
newParts = newParts[:len(newParts)-1]
}
} else if part != "." {
newParts = append(newParts, part)
}
}
return strings.Join(newParts, "/"), nil
}
func (c WaveClient) GetConnectionType() string {
return connparse.ConnectionTypeWave
}

View File

@ -0,0 +1,184 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
package wshfs
import (
"bytes"
"context"
"encoding/base64"
"fmt"
"io"
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fstype"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
"github.com/wavetermdev/waveterm/pkg/wshutil"
)
const (
ThirtySeconds = 30 * 1000
)
// This needs to be set by whoever initializes the client, either main-server or wshcmd-connserver
var RpcClient *wshutil.WshRpc
type WshClient struct{}
var _ fstype.FileShareClient = WshClient{}
func NewWshClient() *WshClient {
return &WshClient{}
}
func (c WshClient) Read(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) (*wshrpc.FileData, error) {
rtnCh := c.ReadStream(ctx, conn, data)
var fileData *wshrpc.FileData
firstPk := true
isDir := false
var fileBuf bytes.Buffer
for respUnion := range rtnCh {
if respUnion.Error != nil {
return nil, respUnion.Error
}
resp := respUnion.Response
if firstPk {
firstPk = false
// first packet has the fileinfo
if resp.Info == nil {
return nil, fmt.Errorf("stream file protocol error, first pk fileinfo is empty")
}
fileData = &resp
if fileData.Info.IsDir {
isDir = true
}
continue
}
if isDir {
if len(resp.Entries) == 0 {
continue
}
fileData.Entries = append(fileData.Entries, resp.Entries...)
} else {
if resp.Data64 == "" {
continue
}
decoder := base64.NewDecoder(base64.StdEncoding, bytes.NewReader([]byte(resp.Data64)))
_, err := io.Copy(&fileBuf, decoder)
if err != nil {
return nil, fmt.Errorf("stream file, failed to decode base64 data: %w", err)
}
}
}
if !isDir {
fileData.Data64 = base64.StdEncoding.EncodeToString(fileBuf.Bytes())
}
return fileData, nil
}
func (c WshClient) ReadStream(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData] {
byteRange := ""
if data.At != nil && data.At.Size > 0 {
byteRange = fmt.Sprintf("%d-%d", data.At.Offset, data.At.Offset+int64(data.At.Size))
}
streamFileData := wshrpc.CommandRemoteStreamFileData{Path: conn.Path, ByteRange: byteRange}
return wshclient.RemoteStreamFileCommand(RpcClient, streamFileData, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
}
func (c WshClient) ReadTarStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileCopyOpts) <-chan wshrpc.RespOrErrorUnion[[]byte] {
timeout := opts.Timeout
if timeout == 0 {
timeout = ThirtySeconds
}
return wshclient.RemoteTarStreamCommand(RpcClient, wshrpc.CommandRemoteStreamTarData{Path: conn.Path, Opts: opts}, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host), Timeout: timeout})
}
func (c WshClient) ListEntries(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) ([]*wshrpc.FileInfo, error) {
var entries []*wshrpc.FileInfo
rtnCh := c.ListEntriesStream(ctx, conn, opts)
for respUnion := range rtnCh {
if respUnion.Error != nil {
return nil, respUnion.Error
}
resp := respUnion.Response
entries = append(entries, resp.FileInfo...)
}
return entries, nil
}
func (c WshClient) ListEntriesStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
return wshclient.RemoteListEntriesCommand(RpcClient, wshrpc.CommandRemoteListEntriesData{Path: conn.Path, Opts: opts}, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
}
func (c WshClient) Stat(ctx context.Context, conn *connparse.Connection) (*wshrpc.FileInfo, error) {
return wshclient.RemoteFileInfoCommand(RpcClient, conn.Path, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
}
func (c WshClient) PutFile(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) error {
info := data.Info
if info == nil {
info = &wshrpc.FileInfo{Opts: &wshrpc.FileOpts{}}
} else if info.Opts == nil {
info.Opts = &wshrpc.FileOpts{}
}
info.Path = conn.Path
info.Opts.Truncate = true
data.Info = info
return wshclient.RemoteWriteFileCommand(RpcClient, data, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
}
func (c WshClient) AppendFile(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) error {
info := data.Info
if info == nil {
info = &wshrpc.FileInfo{Path: conn.Path, Opts: &wshrpc.FileOpts{}}
} else if info.Opts == nil {
info.Opts = &wshrpc.FileOpts{}
}
info.Path = conn.Path
info.Opts.Append = true
data.Info = info
return wshclient.RemoteWriteFileCommand(RpcClient, data, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
}
func (c WshClient) Mkdir(ctx context.Context, conn *connparse.Connection) error {
return wshclient.RemoteMkdirCommand(RpcClient, conn.Path, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
}
func (c WshClient) Move(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error {
if opts == nil {
opts = &wshrpc.FileCopyOpts{}
}
timeout := opts.Timeout
if timeout == 0 {
timeout = ThirtySeconds
}
return wshclient.RemoteFileMoveCommand(RpcClient, wshrpc.CommandRemoteFileCopyData{SrcUri: srcConn.GetFullURI(), DestUri: destConn.GetFullURI(), Opts: opts}, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(destConn.Host), Timeout: timeout})
}
func (c WshClient) Copy(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error {
if opts == nil {
opts = &wshrpc.FileCopyOpts{}
}
timeout := opts.Timeout
if timeout == 0 {
timeout = ThirtySeconds
}
return wshclient.RemoteFileCopyCommand(RpcClient, wshrpc.CommandRemoteFileCopyData{SrcUri: srcConn.GetFullURI(), DestUri: destConn.GetFullURI(), Opts: opts}, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(destConn.Host), Timeout: timeout})
}
func (c WshClient) Delete(ctx context.Context, conn *connparse.Connection) error {
return wshclient.RemoteFileDeleteCommand(RpcClient, conn.Path, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
}
func (c WshClient) Join(ctx context.Context, conn *connparse.Connection, parts ...string) (string, error) {
finfo, err := wshclient.RemoteFileJoinCommand(RpcClient, append([]string{conn.Path}, parts...), &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
if err != nil {
return "", err
}
return finfo.Path, nil
}
func (c WshClient) GetConnectionType() string {
return connparse.ConnectionTypeWsh
}

View File

@ -31,7 +31,6 @@ import (
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
"github.com/wavetermdev/waveterm/pkg/wavebase"
"github.com/wavetermdev/waveterm/pkg/wconfig"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
xknownhosts "golang.org/x/crypto/ssh/knownhosts"
@ -115,7 +114,7 @@ func createDummySigner() ([]ssh.Signer, error) {
// they were successes. An error in this function prevents any other
// keys from being attempted. But if there's an error because of a dummy
// file, the library can still try again with a new key.
func createPublicKeyCallback(connCtx context.Context, sshKeywords *wshrpc.ConnKeywords, authSockSignersExt []ssh.Signer, agentClient agent.ExtendedAgent, debugInfo *ConnectionDebugInfo) func() ([]ssh.Signer, error) {
func createPublicKeyCallback(connCtx context.Context, sshKeywords *wconfig.ConnKeywords, authSockSignersExt []ssh.Signer, agentClient agent.ExtendedAgent, debugInfo *ConnectionDebugInfo) func() ([]ssh.Signer, error) {
var identityFiles []string
existingKeys := make(map[string][]byte)
@ -420,7 +419,7 @@ func lineContainsMatch(line []byte, matches [][]byte) bool {
return false
}
func createHostKeyCallback(ctx context.Context, sshKeywords *wshrpc.ConnKeywords) (ssh.HostKeyCallback, HostKeyAlgorithms, error) {
func createHostKeyCallback(ctx context.Context, sshKeywords *wconfig.ConnKeywords) (ssh.HostKeyCallback, HostKeyAlgorithms, error) {
globalKnownHostsFiles := sshKeywords.SshGlobalKnownHostsFile
userKnownHostsFiles := sshKeywords.SshUserKnownHostsFile
@ -592,7 +591,7 @@ func createHostKeyCallback(ctx context.Context, sshKeywords *wshrpc.ConnKeywords
return waveHostKeyCallback, hostKeyAlgorithms, nil
}
func createClientConfig(connCtx context.Context, sshKeywords *wshrpc.ConnKeywords, debugInfo *ConnectionDebugInfo) (*ssh.ClientConfig, error) {
func createClientConfig(connCtx context.Context, sshKeywords *wconfig.ConnKeywords, debugInfo *ConnectionDebugInfo) (*ssh.ClientConfig, error) {
chosenUser := utilfn.SafeDeref(sshKeywords.SshUser)
chosenHostName := utilfn.SafeDeref(sshKeywords.SshHostName)
chosenPort := utilfn.SafeDeref(sshKeywords.SshPort)
@ -604,7 +603,7 @@ func createClientConfig(connCtx context.Context, sshKeywords *wshrpc.ConnKeyword
var authSockSigners []ssh.Signer
var agentClient agent.ExtendedAgent
// IdentitiesOnly indicates that only the keys listed in IdentityFile should be used, even if there are matches in the SSH Agent, PKCS11Provider, or SecurityKeyProvider. See https://man.openbsd.org/ssh_config#IdentitiesOnly
// IdentitiesOnly indicates that only the keys listed in the identity and certificate files or passed as arguments should be used, even if there are matches in the SSH Agent, PKCS11Provider, or SecurityKeyProvider. See https://man.openbsd.org/ssh_config#IdentitiesOnly
// TODO: Update if we decide to support PKCS11Provider and SecurityKeyProvider
if !utilfn.SafeDeref(sshKeywords.SshIdentitiesOnly) {
conn, err := net.Dial("unix", utilfn.SafeDeref(sshKeywords.SshIdentityAgent))
@ -689,7 +688,7 @@ func connectInternal(ctx context.Context, networkAddr string, clientConfig *ssh.
return ssh.NewClient(c, chans, reqs), nil
}
func ConnectToClient(connCtx context.Context, opts *SSHOpts, currentClient *ssh.Client, jumpNum int32, connFlags *wshrpc.ConnKeywords) (*ssh.Client, int32, error) {
func ConnectToClient(connCtx context.Context, opts *SSHOpts, currentClient *ssh.Client, jumpNum int32, connFlags *wconfig.ConnKeywords) (*ssh.Client, int32, error) {
blocklogger.Infof(connCtx, "[conndebug] ConnectToClient %s (jump:%d)...\n", opts.String(), jumpNum)
debugInfo := &ConnectionDebugInfo{
CurrentClient: currentClient,
@ -704,10 +703,10 @@ func ConnectToClient(connCtx context.Context, opts *SSHOpts, currentClient *ssh.
fullConfig := wconfig.GetWatcher().GetFullConfig()
internalSshConfigKeywords, ok := fullConfig.Connections[rawName]
if !ok {
internalSshConfigKeywords = wshrpc.ConnKeywords{}
internalSshConfigKeywords = wconfig.ConnKeywords{}
}
var sshConfigKeywords *wshrpc.ConnKeywords
var sshConfigKeywords *wconfig.ConnKeywords
if utilfn.SafeDeref(internalSshConfigKeywords.ConnIgnoreSshConfig) {
var err error
sshConfigKeywords, err = findSshDefaults(opts.SSHHost)
@ -724,7 +723,7 @@ func ConnectToClient(connCtx context.Context, opts *SSHOpts, currentClient *ssh.
}
}
parsedKeywords := &wshrpc.ConnKeywords{}
parsedKeywords := &wconfig.ConnKeywords{}
if opts.SSHUser != "" {
parsedKeywords.SshUser = &opts.SSHUser
}
@ -760,7 +759,7 @@ func ConnectToClient(connCtx context.Context, opts *SSHOpts, currentClient *ssh.
}
// do not apply supplied keywords to proxies - ssh config must be used for that
debugInfo.CurrentClient, jumpNum, err = ConnectToClient(connCtx, proxyOpts, debugInfo.CurrentClient, jumpNum, &wshrpc.ConnKeywords{})
debugInfo.CurrentClient, jumpNum, err = ConnectToClient(connCtx, proxyOpts, debugInfo.CurrentClient, jumpNum, &wconfig.ConnKeywords{})
if err != nil {
// do not add a context on a recursive call
// (this can cause a recursive nested context that's arbitrarily deep)
@ -782,7 +781,7 @@ func ConnectToClient(connCtx context.Context, opts *SSHOpts, currentClient *ssh.
// note that a `var == "yes"` will default to false
// but `var != "no"` will default to true
// when given unexpected strings
func findSshConfigKeywords(hostPattern string) (connKeywords *wshrpc.ConnKeywords, outErr error) {
func findSshConfigKeywords(hostPattern string) (connKeywords *wconfig.ConnKeywords, outErr error) {
defer func() {
panicErr := panichandler.PanicHandler("sshclient:find-ssh-config-keywords", recover())
if panicErr != nil {
@ -790,7 +789,7 @@ func findSshConfigKeywords(hostPattern string) (connKeywords *wshrpc.ConnKeyword
}
}()
WaveSshConfigUserSettings().ReloadConfigs()
sshKeywords := &wshrpc.ConnKeywords{}
sshKeywords := &wconfig.ConnKeywords{}
var err error
userRaw, err := WaveSshConfigUserSettings().GetStrict(hostPattern, "User")
@ -920,8 +919,8 @@ func findSshConfigKeywords(hostPattern string) (connKeywords *wshrpc.ConnKeyword
return sshKeywords, nil
}
func findSshDefaults(hostPattern string) (connKeywords *wshrpc.ConnKeywords, outErr error) {
sshKeywords := &wshrpc.ConnKeywords{}
func findSshDefaults(hostPattern string) (connKeywords *wconfig.ConnKeywords, outErr error) {
sshKeywords := &wconfig.ConnKeywords{}
userDetails, err := user.Current()
if err != nil {
@ -963,9 +962,9 @@ func (opts SSHOpts) String() string {
return stringRepr
}
func mergeKeywords(oldKeywords *wshrpc.ConnKeywords, newKeywords *wshrpc.ConnKeywords) *wshrpc.ConnKeywords {
func mergeKeywords(oldKeywords *wconfig.ConnKeywords, newKeywords *wconfig.ConnKeywords) *wconfig.ConnKeywords {
if oldKeywords == nil {
oldKeywords = &wshrpc.ConnKeywords{}
oldKeywords = &wconfig.ConnKeywords{}
}
if newKeywords == nil {
return oldKeywords

View File

@ -54,12 +54,12 @@ func (bs *BlockService) SaveTerminalState(ctx context.Context, blockId string, s
return fmt.Errorf("invalid state type: %q", stateType)
}
// ignore MakeFile error (already exists is ok)
filestore.WFS.MakeFile(ctx, blockId, "cache:term:"+stateType, nil, filestore.FileOptsType{})
filestore.WFS.MakeFile(ctx, blockId, "cache:term:"+stateType, nil, wshrpc.FileOpts{})
err = filestore.WFS.WriteFile(ctx, blockId, "cache:term:"+stateType, []byte(state))
if err != nil {
return fmt.Errorf("cannot save terminal state: %w", err)
}
fileMeta := filestore.FileMeta{
fileMeta := wshrpc.FileMeta{
"ptyoffset": ptyOffset,
"termsize": termSize,
}
@ -84,7 +84,7 @@ func (bs *BlockService) SaveWaveAiData(ctx context.Context, blockId string, hist
return fmt.Errorf("unable to serialize ai history: %v", err)
}
// ignore MakeFile error (already exists is ok)
filestore.WFS.MakeFile(ctx, blockId, "aidata", nil, filestore.FileOptsType{})
filestore.WFS.MakeFile(ctx, blockId, "aidata", nil, wshrpc.FileOpts{})
err = filestore.WFS.WriteFile(ctx, blockId, "aidata", historyBytes)
if err != nil {
return fmt.Errorf("cannot save terminal state: %w", err)

View File

@ -1,186 +0,0 @@
package fileservice
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"time"
"github.com/wavetermdev/waveterm/pkg/filestore"
"github.com/wavetermdev/waveterm/pkg/tsgen/tsgenmeta"
"github.com/wavetermdev/waveterm/pkg/wconfig"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshserver"
"github.com/wavetermdev/waveterm/pkg/wshutil"
)
const MaxFileSize = 10 * 1024 * 1024 // 10M
const DefaultTimeout = 2 * time.Second
type FileService struct{}
type FullFile struct {
Info *wshrpc.FileInfo `json:"info"`
Data64 string `json:"data64"` // base64 encoded
}
func (fs *FileService) SaveFile_Meta() tsgenmeta.MethodMeta {
return tsgenmeta.MethodMeta{
Desc: "save file",
ArgNames: []string{"connection", "path", "data64"},
}
}
func (fs *FileService) SaveFile(connection string, path string, data64 string) error {
if connection == "" {
connection = wshrpc.LocalConnName
}
connRoute := wshutil.MakeConnectionRouteId(connection)
client := wshserver.GetMainRpcClient()
writeData := wshrpc.CommandRemoteWriteFileData{Path: path, Data64: data64}
return wshclient.RemoteWriteFileCommand(client, writeData, &wshrpc.RpcOpts{Route: connRoute})
}
func (fs *FileService) StatFile_Meta() tsgenmeta.MethodMeta {
return tsgenmeta.MethodMeta{
Desc: "get file info",
ArgNames: []string{"connection", "path"},
}
}
func (fs *FileService) StatFile(connection string, path string) (*wshrpc.FileInfo, error) {
if connection == "" {
connection = wshrpc.LocalConnName
}
connRoute := wshutil.MakeConnectionRouteId(connection)
client := wshserver.GetMainRpcClient()
return wshclient.RemoteFileInfoCommand(client, path, &wshrpc.RpcOpts{Route: connRoute})
}
func (fs *FileService) Mkdir(connection string, path string) error {
if connection == "" {
connection = wshrpc.LocalConnName
}
connRoute := wshutil.MakeConnectionRouteId(connection)
client := wshserver.GetMainRpcClient()
return wshclient.RemoteMkdirCommand(client, path, &wshrpc.RpcOpts{Route: connRoute})
}
func (fs *FileService) TouchFile(connection string, path string) error {
if connection == "" {
connection = wshrpc.LocalConnName
}
connRoute := wshutil.MakeConnectionRouteId(connection)
client := wshserver.GetMainRpcClient()
return wshclient.RemoteFileTouchCommand(client, path, &wshrpc.RpcOpts{Route: connRoute})
}
func (fs *FileService) Rename(connection string, path string, newPath string) error {
if connection == "" {
connection = wshrpc.LocalConnName
}
connRoute := wshutil.MakeConnectionRouteId(connection)
client := wshserver.GetMainRpcClient()
return wshclient.RemoteFileRenameCommand(client, [2]string{path, newPath}, &wshrpc.RpcOpts{Route: connRoute})
}
func (fs *FileService) ReadFile_Meta() tsgenmeta.MethodMeta {
return tsgenmeta.MethodMeta{
Desc: "read file",
ArgNames: []string{"connection", "path"},
}
}
func (fs *FileService) ReadFile(connection string, path string) (*FullFile, error) {
if connection == "" {
connection = wshrpc.LocalConnName
}
connRoute := wshutil.MakeConnectionRouteId(connection)
client := wshserver.GetMainRpcClient()
streamFileData := wshrpc.CommandRemoteStreamFileData{Path: path}
rtnCh := wshclient.RemoteStreamFileCommand(client, streamFileData, &wshrpc.RpcOpts{Route: connRoute})
fullFile := &FullFile{}
firstPk := true
isDir := false
var fileBuf bytes.Buffer
var fileInfoArr []*wshrpc.FileInfo
for respUnion := range rtnCh {
if respUnion.Error != nil {
return nil, respUnion.Error
}
resp := respUnion.Response
if firstPk {
firstPk = false
// first packet has the fileinfo
if len(resp.FileInfo) != 1 {
return nil, fmt.Errorf("stream file protocol error, first pk fileinfo len=%d", len(resp.FileInfo))
}
fullFile.Info = resp.FileInfo[0]
if fullFile.Info.IsDir {
isDir = true
}
continue
}
if isDir {
if len(resp.FileInfo) == 0 {
continue
}
fileInfoArr = append(fileInfoArr, resp.FileInfo...)
} else {
if resp.Data64 == "" {
continue
}
decoder := base64.NewDecoder(base64.StdEncoding, bytes.NewReader([]byte(resp.Data64)))
_, err := io.Copy(&fileBuf, decoder)
if err != nil {
return nil, fmt.Errorf("stream file, failed to decode base64 data %q: %w", resp.Data64, err)
}
}
}
if isDir {
fiBytes, err := json.Marshal(fileInfoArr)
if err != nil {
return nil, fmt.Errorf("unable to serialize files %s", path)
}
fullFile.Data64 = base64.StdEncoding.EncodeToString(fiBytes)
} else {
// we can avoid this re-encoding if we ensure the remote side always encodes chunks of 3 bytes so we don't get padding chars
fullFile.Data64 = base64.StdEncoding.EncodeToString(fileBuf.Bytes())
}
return fullFile, nil
}
func (fs *FileService) GetWaveFile(id string, path string) (any, error) {
ctx, cancelFn := context.WithTimeout(context.Background(), DefaultTimeout)
defer cancelFn()
file, err := filestore.WFS.Stat(ctx, id, path)
if err != nil {
return nil, fmt.Errorf("error getting file: %w", err)
}
return file, nil
}
func (fs *FileService) DeleteFile_Meta() tsgenmeta.MethodMeta {
return tsgenmeta.MethodMeta{
Desc: "delete file",
ArgNames: []string{"connection", "path"},
}
}
func (fs *FileService) DeleteFile(connection string, path string) error {
if connection == "" {
connection = wshrpc.LocalConnName
}
connRoute := wshutil.MakeConnectionRouteId(connection)
client := wshserver.GetMainRpcClient()
return wshclient.RemoteFileDeleteCommand(client, path, &wshrpc.RpcOpts{Route: connRoute})
}
func (fs *FileService) GetFullConfig() wconfig.FullConfigType {
watcher := wconfig.GetWatcher()
return watcher.GetFullConfig()
}

View File

@ -11,7 +11,6 @@ import (
"github.com/wavetermdev/waveterm/pkg/service/blockservice"
"github.com/wavetermdev/waveterm/pkg/service/clientservice"
"github.com/wavetermdev/waveterm/pkg/service/fileservice"
"github.com/wavetermdev/waveterm/pkg/service/objectservice"
"github.com/wavetermdev/waveterm/pkg/service/userinputservice"
"github.com/wavetermdev/waveterm/pkg/service/windowservice"
@ -25,7 +24,6 @@ import (
var ServiceMap = map[string]any{
"block": blockservice.BlockServiceInstance,
"object": &objectservice.ObjectService{},
"file": &fileservice.FileService{},
"client": &clientservice.ClientService{},
"window": &windowservice.WindowService{},
"workspace": &workspaceservice.WorkspaceService{},

View File

@ -11,6 +11,69 @@ import (
// formatFn is a function that converts a value of type T to its string representation
type formatFn[T any] func(T) (string, error)
type formatFnArray[T any] func(T) ([]string, error)
func PrintColumnsArray[T any](values <-chan T, numCols int, sampleSize int, format formatFnArray[T], w io.Writer) error {
// Get first batch and determine column width
maxLen := 0
var samples []T
for v := range values {
samples = append(samples, v)
str, err := format(v)
if err != nil {
return err
}
for _, s := range str {
if len(s) > maxLen {
maxLen = len(s)
}
}
if len(samples) >= sampleSize {
break
}
}
colWidth := maxLen + 2 // Add minimum padding
if colWidth < 1 {
colWidth = 1
}
// Print in columns using our determined width
col := 0
for _, v := range samples {
str, err := format(v)
if err != nil {
return err
}
for _, s := range str {
if err := printColHelper(s, colWidth, &col, numCols, w); err != nil {
return err
}
}
}
// Continue with any remaining values
for v := range values {
str, err := format(v)
if err != nil {
return err
}
for _, s := range str {
if err := printColHelper(s, colWidth, &col, numCols, w); err != nil {
return err
}
}
}
if col > 0 {
if _, err := fmt.Fprint(w, "\n"); err != nil {
return err
}
}
return nil
}
// PrintColumns prints values in columns, adapting to long values by letting them span multiple columns
func PrintColumns[T any](values <-chan T, numCols int, sampleSize int, format formatFn[T], w io.Writer) error {
// Get first batch and determine column width

View File

@ -0,0 +1,115 @@
package fileutil
import (
"io"
"io/fs"
"log"
"mime"
"net/http"
"os"
"path/filepath"
"strings"
"github.com/wavetermdev/waveterm/pkg/wavebase"
)
func FixPath(path string) (string, error) {
if strings.HasPrefix(path, "~") {
return filepath.Join(wavebase.GetHomeDir(), path[1:]), nil
} else if !filepath.IsAbs(path) {
log.Printf("FixPath: path is not absolute: %s", path)
path, err := filepath.Abs(path)
if err != nil {
return "", err
}
log.Printf("FixPath: fixed path: %s", path)
return path, nil
} else {
return path, nil
}
}
const (
winFlagSoftlink = uint32(0x8000) // FILE_ATTRIBUTE_REPARSE_POINT
winFlagJunction = uint32(0x80) // FILE_ATTRIBUTE_JUNCTION
)
func WinSymlinkDir(path string, bits os.FileMode) bool {
// Windows compatibility layer doesn't expose symlink target type through fileInfo
// so we need to check file attributes and extension patterns
isFileSymlink := func(filepath string) bool {
if len(filepath) == 0 {
return false
}
return strings.LastIndex(filepath, ".") > strings.LastIndex(filepath, "/")
}
flags := uint32(bits >> 12)
if flags == winFlagSoftlink {
return !isFileSymlink(path)
} else if flags == winFlagJunction {
return true
} else {
return false
}
}
// on error just returns ""
// does not return "application/octet-stream" as this is considered a detection failure
// can pass an existing fileInfo to avoid re-statting the file
// falls back to text/plain for 0 byte files
func DetectMimeType(path string, fileInfo fs.FileInfo, extended bool) string {
if fileInfo == nil {
statRtn, err := os.Stat(path)
if err != nil {
return ""
}
fileInfo = statRtn
}
if fileInfo.IsDir() || WinSymlinkDir(path, fileInfo.Mode()) {
return "directory"
}
if fileInfo.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
return "pipe"
}
charDevice := os.ModeDevice | os.ModeCharDevice
if fileInfo.Mode()&charDevice == charDevice {
return "character-special"
}
if fileInfo.Mode()&os.ModeDevice == os.ModeDevice {
return "block-special"
}
ext := filepath.Ext(path)
if mimeType, ok := StaticMimeTypeMap[ext]; ok {
return mimeType
}
if mimeType := mime.TypeByExtension(ext); mimeType != "" {
return mimeType
}
if fileInfo.Size() == 0 {
return "text/plain"
}
if !extended {
return ""
}
fd, err := os.Open(path)
if err != nil {
return ""
}
defer fd.Close()
buf := make([]byte, 512)
// ignore the error (EOF / UnexpectedEOF is fine, just process how much we got back)
n, _ := io.ReadAtLeast(fd, buf, 512)
if n == 0 {
return ""
}
buf = buf[:n]
rtn := http.DetectContentType(buf)
if rtn == "application/octet-stream" {
return ""
}
return rtn
}

View File

@ -1,7 +1,7 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
package utilfn
package fileutil
var StaticMimeTypeMap = map[string]string{
".a2l": "application/A2L",

90
pkg/util/iochan/iochan.go Normal file
View File

@ -0,0 +1,90 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
// allows for streaming an io.Reader to a channel and an io.Writer from a channel
package iochan
import (
"context"
"errors"
"fmt"
"io"
"log"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshutil"
)
// ReaderChan reads from an io.Reader and sends the data to a channel
func ReaderChan(ctx context.Context, r io.Reader, chunkSize int64, callback func()) chan wshrpc.RespOrErrorUnion[[]byte] {
ch := make(chan wshrpc.RespOrErrorUnion[[]byte], 32)
go func() {
defer func() {
log.Printf("ReaderChan: closing channel")
close(ch)
callback()
}()
buf := make([]byte, chunkSize)
for {
select {
case <-ctx.Done():
if ctx.Err() == context.Canceled {
return
}
log.Printf("ReaderChan: context error: %v", ctx.Err())
return
default:
if n, err := r.Read(buf); err != nil {
if errors.Is(err, io.EOF) {
log.Printf("ReaderChan: EOF")
return
}
ch <- wshutil.RespErr[[]byte](fmt.Errorf("ReaderChan: read error: %v", err))
log.Printf("ReaderChan: read error: %v", err)
return
} else if n > 0 {
// log.Printf("ReaderChan: read %d bytes", n)
ch <- wshrpc.RespOrErrorUnion[[]byte]{Response: buf[:n]}
}
}
}
}()
return ch
}
// WriterChan reads from a channel and writes the data to an io.Writer
func WriterChan(ctx context.Context, w io.Writer, ch <-chan wshrpc.RespOrErrorUnion[[]byte], callback func(), errCallback func(error)) {
go func() {
defer func() {
log.Printf("WriterChan: closing channel")
callback()
drainChannel(ch)
}()
for {
select {
case <-ctx.Done():
return
case resp, ok := <-ch:
if !ok {
return
}
if resp.Error != nil {
log.Printf("WriterChan: error: %v", resp.Error)
errCallback(resp.Error)
return
}
if _, err := w.Write(resp.Response); err != nil {
log.Printf("WriterChan: write error: %v", err)
errCallback(err)
return
} else {
// log.Printf("WriterChan: wrote %d bytes", n)
}
}
}
}()
}
func drainChannel(ch <-chan wshrpc.RespOrErrorUnion[[]byte]) {
for range ch {}
}

View File

@ -0,0 +1,69 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0
package iochan_test
import (
"context"
"io"
"testing"
"time"
"github.com/wavetermdev/waveterm/pkg/util/iochan"
)
const (
buflen = 1024
)
func TestIochan_Basic(t *testing.T) {
// Write the packet to the source pipe from a goroutine
srcPipeReader, srcPipeWriter := io.Pipe()
packet := []byte("hello world")
go func() {
srcPipeWriter.Write(packet)
srcPipeWriter.Close()
}()
// Initialize the reader channel
readerChanCallbackCalled := false
readerChanCallback := func() {
srcPipeReader.Close()
readerChanCallbackCalled = true
}
defer readerChanCallback() // Ensure the callback is called
ioch := iochan.ReaderChan(context.TODO(), srcPipeReader, buflen, readerChanCallback)
// Initialize the destination pipe and the writer channel
destPipeReader, destPipeWriter := io.Pipe()
writerChanCallbackCalled := false
writerChanCallback := func() {
destPipeReader.Close()
destPipeWriter.Close()
writerChanCallbackCalled = true
}
defer writerChanCallback() // Ensure the callback is called
iochan.WriterChan(context.TODO(), destPipeWriter, ioch, writerChanCallback, func(err error) {})
// Read the packet from the destination pipe and compare it to the original packet
buf := make([]byte, buflen)
n, err := destPipeReader.Read(buf)
if err != nil {
t.Fatalf("Read failed: %v", err)
}
if n != len(packet) {
t.Fatalf("Read length mismatch: %d != %d", n, len(packet))
}
if string(buf[:n]) != string(packet) {
t.Fatalf("Read data mismatch: %s != %s", buf[:n], packet)
}
// Give the callbacks a chance to run before checking if they were called
time.Sleep(10 * time.Millisecond)
if !readerChanCallbackCalled {
t.Fatalf("ReaderChan callback not called")
}
if !writerChanCallbackCalled {
t.Fatalf("WriterChan callback not called")
}
}

29
pkg/util/iterfn/iterfn.go Normal file
View File

@ -0,0 +1,29 @@
package iterfn
import (
"cmp"
"iter"
"maps"
"slices"
)
func CollectSeqToSorted[T cmp.Ordered](seq iter.Seq[T]) []T {
rtn := []T{}
for v := range seq {
rtn = append(rtn, v)
}
slices.Sort(rtn)
return rtn
}
func CollectSeq[T any](seq iter.Seq[T]) []T {
rtn := []T{}
for v := range seq {
rtn = append(rtn, v)
}
return rtn
}
func MapKeysToSorted[K cmp.Ordered, V any](m map[K]V) []K {
return CollectSeqToSorted(maps.Keys(m))
}

View File

@ -0,0 +1,51 @@
package iterfn_test
import (
"maps"
"slices"
"testing"
"github.com/wavetermdev/waveterm/pkg/util/iterfn"
)
func TestCollectSeqToSorted(t *testing.T) {
t.Parallel()
// Test code here
m := map[int]struct{}{1: {}, 3: {}, 2: {}}
got := iterfn.CollectSeqToSorted(maps.Keys(m))
want := []int{1, 2, 3}
if !slices.Equal(got, want) {
t.Errorf("got %v, want %v", got, want)
}
}
func TestCollectSeq(t *testing.T) {
t.Parallel()
// Test code here
m := map[int]struct{}{1: {}, 3: {}, 2: {}}
got := iterfn.CollectSeq(maps.Keys(m))
i := 0
for _, v := range got {
if _, ok := m[v]; !ok {
t.Errorf("collected value %v not in original map", v)
}
i++
}
if i != len(m) {
t.Errorf("collected array length %v, want %v", i, len(m))
}
}
func TestMapKeysToSorted(t *testing.T) {
t.Parallel()
// Test code here
m := map[int]struct{}{1: {}, 3: {}, 2: {}}
got := iterfn.MapKeysToSorted(m)
want := []int{1, 2, 3}
if !slices.Equal(got, want) {
t.Errorf("got %v, want %v", got, want)
}
}

View File

@ -14,14 +14,10 @@ import (
"errors"
"fmt"
"io"
"io/fs"
"math"
mathrand "math/rand"
"mime"
"net/http"
"os"
"os/exec"
"path/filepath"
"reflect"
"regexp"
"sort"
@ -618,91 +614,6 @@ func CopyToChannel(outputCh chan<- []byte, reader io.Reader) error {
}
}
const (
winFlagSoftlink = uint32(0x8000) // FILE_ATTRIBUTE_REPARSE_POINT
winFlagJunction = uint32(0x80) // FILE_ATTRIBUTE_JUNCTION
)
func WinSymlinkDir(path string, bits os.FileMode) bool {
// Windows compatibility layer doesn't expose symlink target type through fileInfo
// so we need to check file attributes and extension patterns
isFileSymlink := func(filepath string) bool {
if len(filepath) == 0 {
return false
}
return strings.LastIndex(filepath, ".") > strings.LastIndex(filepath, "/")
}
flags := uint32(bits >> 12)
if flags == winFlagSoftlink {
return !isFileSymlink(path)
} else if flags == winFlagJunction {
return true
} else {
return false
}
}
// on error just returns ""
// does not return "application/octet-stream" as this is considered a detection failure
// can pass an existing fileInfo to avoid re-statting the file
// falls back to text/plain for 0 byte files
func DetectMimeType(path string, fileInfo fs.FileInfo, extended bool) string {
if fileInfo == nil {
statRtn, err := os.Stat(path)
if err != nil {
return ""
}
fileInfo = statRtn
}
if fileInfo.IsDir() || WinSymlinkDir(path, fileInfo.Mode()) {
return "directory"
}
if fileInfo.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
return "pipe"
}
charDevice := os.ModeDevice | os.ModeCharDevice
if fileInfo.Mode()&charDevice == charDevice {
return "character-special"
}
if fileInfo.Mode()&os.ModeDevice == os.ModeDevice {
return "block-special"
}
ext := filepath.Ext(path)
if mimeType, ok := StaticMimeTypeMap[ext]; ok {
return mimeType
}
if mimeType := mime.TypeByExtension(ext); mimeType != "" {
return mimeType
}
if fileInfo.Size() == 0 {
return "text/plain"
}
if !extended {
return ""
}
fd, err := os.Open(path)
if err != nil {
return ""
}
defer fd.Close()
buf := make([]byte, 512)
// ignore the error (EOF / UnexpectedEOF is fine, just process how much we got back)
n, _ := io.ReadAtLeast(fd, buf, 512)
if n == 0 {
return ""
}
buf = buf[:n]
rtn := http.DetectContentType(buf)
if rtn == "application/octet-stream" {
return ""
}
return rtn
}
func GetCmdExitCode(cmd *exec.Cmd, err error) int {
if cmd == nil || cmd.ProcessState == nil {
return GetExitCode(err)

View File

@ -0,0 +1,32 @@
package wavefileutil
import (
"fmt"
"github.com/wavetermdev/waveterm/pkg/filestore"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
)
const (
WaveFilePathPattern = "wavefile://%s/%s"
)
func WaveFileToFileInfo(wf *filestore.WaveFile) *wshrpc.FileInfo {
path := fmt.Sprintf(WaveFilePathPattern, wf.ZoneId, wf.Name)
return &wshrpc.FileInfo{
Path: path,
Name: wf.Name,
Opts: &wf.Opts,
Size: wf.Size,
Meta: &wf.Meta,
SupportsMkdir: false,
}
}
func WaveFileListToFileInfoList(wfList []*filestore.WaveFile) []*wshrpc.FileInfo {
var fileInfoList []*wshrpc.FileInfo
for _, wf := range wfList {
fileInfoList = append(fileInfoList, WaveFileToFileInfo(wf))
}
return fileInfoList
}

View File

@ -19,11 +19,11 @@ import (
"github.com/wavetermdev/waveterm/pkg/wavebase"
"github.com/wavetermdev/waveterm/pkg/waveobj"
"github.com/wavetermdev/waveterm/pkg/wconfig/defaultconfig"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
)
const SettingsFile = "settings.json"
const ConnectionsFile = "connections.json"
const ProfilesFile = "profiles.json"
const AnySchema = `
{
@ -132,9 +132,41 @@ type FullConfigType struct {
Widgets map[string]WidgetConfigType `json:"widgets"`
Presets map[string]waveobj.MetaMapType `json:"presets"`
TermThemes map[string]TermThemeType `json:"termthemes"`
Connections map[string]wshrpc.ConnKeywords `json:"connections"`
Connections map[string]ConnKeywords `json:"connections"`
ConfigErrors []ConfigError `json:"configerrors" configfile:"-"`
}
type ConnKeywords struct {
ConnWshEnabled *bool `json:"conn:wshenabled,omitempty"`
ConnAskBeforeWshInstall *bool `json:"conn:askbeforewshinstall,omitempty"`
ConnOverrideConfig bool `json:"conn:overrideconfig,omitempty"`
ConnWshPath string `json:"conn:wshpath,omitempty"`
ConnShellPath string `json:"conn:shellpath,omitempty"`
ConnIgnoreSshConfig *bool `json:"conn:ignoresshconfig,omitempty"`
DisplayHidden *bool `json:"display:hidden,omitempty"`
DisplayOrder float32 `json:"display:order,omitempty"`
TermClear bool `json:"term:*,omitempty"`
TermFontSize float64 `json:"term:fontsize,omitempty"`
TermFontFamily string `json:"term:fontfamily,omitempty"`
TermTheme string `json:"term:theme,omitempty"`
SshUser *string `json:"ssh:user,omitempty"`
SshHostName *string `json:"ssh:hostname,omitempty"`
SshPort *string `json:"ssh:port,omitempty"`
SshIdentityFile []string `json:"ssh:identityfile,omitempty"`
SshBatchMode *bool `json:"ssh:batchmode,omitempty"`
SshPubkeyAuthentication *bool `json:"ssh:pubkeyauthentication,omitempty"`
SshPasswordAuthentication *bool `json:"ssh:passwordauthentication,omitempty"`
SshKbdInteractiveAuthentication *bool `json:"ssh:kbdinteractiveauthentication,omitempty"`
SshPreferredAuthentications []string `json:"ssh:preferredauthentications,omitempty"`
SshAddKeysToAgent *bool `json:"ssh:addkeystoagent,omitempty"`
SshIdentityAgent *string `json:"ssh:identityagent,omitempty"`
SshIdentitiesOnly *bool `json:"ssh:identitiesonly,omitempty"`
SshProxyJump []string `json:"ssh:proxyjump,omitempty"`
SshUserKnownHostsFile []string `json:"ssh:userknownhostsfile,omitempty"`
SshGlobalKnownHostsFile []string `json:"ssh:globalknownhostsfile,omitempty"`
}
func DefaultBoolPtr(arg *bool, def bool) bool {
if arg == nil {

View File

@ -83,7 +83,7 @@ func CreateBlock(ctx context.Context, tabId string, blockDef *waveobj.BlockDef,
// upload the files if present
if len(blockDef.Files) > 0 {
for fileName, fileDef := range blockDef.Files {
err := filestore.WFS.MakeFile(ctx, newBlockOID, fileName, fileDef.Meta, filestore.FileOptsType{})
err := filestore.WFS.MakeFile(ctx, newBlockOID, fileName, fileDef.Meta, wshrpc.FileOpts{})
if err != nil {
return nil, fmt.Errorf("error making blockfile %q: %w", fileName, err)
}

View File

@ -267,10 +267,10 @@ func handleRemoteStreamFile(w http.ResponseWriter, _ *http.Request, conn string,
}
if firstPk {
firstPk = false
if len(respUnion.Response.FileInfo) != 1 {
return fmt.Errorf("stream file protocol error, first pk fileinfo len=%d", len(respUnion.Response.FileInfo))
if respUnion.Response.Info == nil {
return fmt.Errorf("stream file protocol error, fileinfo is empty")
}
fileInfo = respUnion.Response.FileInfo[0]
fileInfo = respUnion.Response.Info
if fileInfo.NotFound {
if no404 {
serveTransparentGIF(w)

View File

@ -8,6 +8,7 @@ package wshclient
import (
"github.com/wavetermdev/waveterm/pkg/wshutil"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wconfig"
"github.com/wavetermdev/waveterm/pkg/waveobj"
"github.com/wavetermdev/waveterm/pkg/wps"
"github.com/wavetermdev/waveterm/pkg/vdom"
@ -182,7 +183,7 @@ func EventUnsubAllCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) error {
}
// command "fileappend", wshserver.FileAppendCommand
func FileAppendCommand(w *wshutil.WshRpc, data wshrpc.CommandFileData, opts *wshrpc.RpcOpts) error {
func FileAppendCommand(w *wshutil.WshRpc, data wshrpc.FileData, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "fileappend", data, opts)
return err
}
@ -193,38 +194,66 @@ func FileAppendIJsonCommand(w *wshutil.WshRpc, data wshrpc.CommandAppendIJsonDat
return err
}
// command "filecopy", wshserver.FileCopyCommand
func FileCopyCommand(w *wshutil.WshRpc, data wshrpc.CommandFileCopyData, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "filecopy", data, opts)
return err
}
// command "filecreate", wshserver.FileCreateCommand
func FileCreateCommand(w *wshutil.WshRpc, data wshrpc.CommandFileCreateData, opts *wshrpc.RpcOpts) error {
func FileCreateCommand(w *wshutil.WshRpc, data wshrpc.FileData, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "filecreate", data, opts)
return err
}
// command "filedelete", wshserver.FileDeleteCommand
func FileDeleteCommand(w *wshutil.WshRpc, data wshrpc.CommandFileData, opts *wshrpc.RpcOpts) error {
func FileDeleteCommand(w *wshutil.WshRpc, data wshrpc.FileData, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "filedelete", data, opts)
return err
}
// command "fileinfo", wshserver.FileInfoCommand
func FileInfoCommand(w *wshutil.WshRpc, data wshrpc.CommandFileData, opts *wshrpc.RpcOpts) (*wshrpc.WaveFileInfo, error) {
resp, err := sendRpcRequestCallHelper[*wshrpc.WaveFileInfo](w, "fileinfo", data, opts)
func FileInfoCommand(w *wshutil.WshRpc, data wshrpc.FileData, opts *wshrpc.RpcOpts) (*wshrpc.FileInfo, error) {
resp, err := sendRpcRequestCallHelper[*wshrpc.FileInfo](w, "fileinfo", data, opts)
return resp, err
}
// command "filelist", wshserver.FileListCommand
func FileListCommand(w *wshutil.WshRpc, data wshrpc.CommandFileListData, opts *wshrpc.RpcOpts) ([]*wshrpc.WaveFileInfo, error) {
resp, err := sendRpcRequestCallHelper[[]*wshrpc.WaveFileInfo](w, "filelist", data, opts)
func FileListCommand(w *wshutil.WshRpc, data wshrpc.FileListData, opts *wshrpc.RpcOpts) ([]*wshrpc.FileInfo, error) {
resp, err := sendRpcRequestCallHelper[[]*wshrpc.FileInfo](w, "filelist", data, opts)
return resp, err
}
// command "fileliststream", wshserver.FileListStreamCommand
func FileListStreamCommand(w *wshutil.WshRpc, data wshrpc.FileListData, opts *wshrpc.RpcOpts) chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
return sendRpcRequestResponseStreamHelper[wshrpc.CommandRemoteListEntriesRtnData](w, "fileliststream", data, opts)
}
// command "filemkdir", wshserver.FileMkdirCommand
func FileMkdirCommand(w *wshutil.WshRpc, data wshrpc.FileData, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "filemkdir", data, opts)
return err
}
// command "filemove", wshserver.FileMoveCommand
func FileMoveCommand(w *wshutil.WshRpc, data wshrpc.CommandFileCopyData, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "filemove", data, opts)
return err
}
// command "fileread", wshserver.FileReadCommand
func FileReadCommand(w *wshutil.WshRpc, data wshrpc.CommandFileData, opts *wshrpc.RpcOpts) (string, error) {
resp, err := sendRpcRequestCallHelper[string](w, "fileread", data, opts)
func FileReadCommand(w *wshutil.WshRpc, data wshrpc.FileData, opts *wshrpc.RpcOpts) (*wshrpc.FileData, error) {
resp, err := sendRpcRequestCallHelper[*wshrpc.FileData](w, "fileread", data, opts)
return resp, err
}
// command "filestreamtar", wshserver.FileStreamTarCommand
func FileStreamTarCommand(w *wshutil.WshRpc, data wshrpc.CommandRemoteStreamTarData, opts *wshrpc.RpcOpts) chan wshrpc.RespOrErrorUnion[[]uint8] {
return sendRpcRequestResponseStreamHelper[[]uint8](w, "filestreamtar", data, opts)
}
// command "filewrite", wshserver.FileWriteCommand
func FileWriteCommand(w *wshutil.WshRpc, data wshrpc.CommandFileData, opts *wshrpc.RpcOpts) error {
func FileWriteCommand(w *wshutil.WshRpc, data wshrpc.FileData, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "filewrite", data, opts)
return err
}
@ -235,6 +264,12 @@ func FocusWindowCommand(w *wshutil.WshRpc, data string, opts *wshrpc.RpcOpts) er
return err
}
// command "getfullconfig", wshserver.GetFullConfigCommand
func GetFullConfigCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) (wconfig.FullConfigType, error) {
resp, err := sendRpcRequestCallHelper[wconfig.FullConfigType](w, "getfullconfig", nil, opts)
return resp, err
}
// command "getmeta", wshserver.GetMetaCommand
func GetMetaCommand(w *wshutil.WshRpc, data wshrpc.CommandGetMetaData, opts *wshrpc.RpcOpts) (waveobj.MetaMapType, error) {
resp, err := sendRpcRequestCallHelper[waveobj.MetaMapType](w, "getmeta", data, opts)
@ -271,6 +306,12 @@ func PathCommand(w *wshutil.WshRpc, data wshrpc.PathCommandData, opts *wshrpc.Rp
return resp, err
}
// command "remotefilecopy", wshserver.RemoteFileCopyCommand
func RemoteFileCopyCommand(w *wshutil.WshRpc, data wshrpc.CommandRemoteFileCopyData, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "remotefilecopy", data, opts)
return err
}
// command "remotefiledelete", wshserver.RemoteFileDeleteCommand
func RemoteFileDeleteCommand(w *wshutil.WshRpc, data string, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "remotefiledelete", data, opts)
@ -289,9 +330,9 @@ func RemoteFileJoinCommand(w *wshutil.WshRpc, data []string, opts *wshrpc.RpcOpt
return resp, err
}
// command "remotefilerename", wshserver.RemoteFileRenameCommand
func RemoteFileRenameCommand(w *wshutil.WshRpc, data [2]string, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "remotefilerename", data, opts)
// command "remotefilemove", wshserver.RemoteFileMoveCommand
func RemoteFileMoveCommand(w *wshutil.WshRpc, data wshrpc.CommandRemoteFileCopyData, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "remotefilemove", data, opts)
return err
}
@ -313,6 +354,11 @@ func RemoteInstallRcFilesCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) error
return err
}
// command "remotelistentries", wshserver.RemoteListEntriesCommand
func RemoteListEntriesCommand(w *wshutil.WshRpc, data wshrpc.CommandRemoteListEntriesData, opts *wshrpc.RpcOpts) chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
return sendRpcRequestResponseStreamHelper[wshrpc.CommandRemoteListEntriesRtnData](w, "remotelistentries", data, opts)
}
// command "remotemkdir", wshserver.RemoteMkdirCommand
func RemoteMkdirCommand(w *wshutil.WshRpc, data string, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "remotemkdir", data, opts)
@ -325,12 +371,17 @@ func RemoteStreamCpuDataCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) chan ws
}
// command "remotestreamfile", wshserver.RemoteStreamFileCommand
func RemoteStreamFileCommand(w *wshutil.WshRpc, data wshrpc.CommandRemoteStreamFileData, opts *wshrpc.RpcOpts) chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteStreamFileRtnData] {
return sendRpcRequestResponseStreamHelper[wshrpc.CommandRemoteStreamFileRtnData](w, "remotestreamfile", data, opts)
func RemoteStreamFileCommand(w *wshutil.WshRpc, data wshrpc.CommandRemoteStreamFileData, opts *wshrpc.RpcOpts) chan wshrpc.RespOrErrorUnion[wshrpc.FileData] {
return sendRpcRequestResponseStreamHelper[wshrpc.FileData](w, "remotestreamfile", data, opts)
}
// command "remotetarstream", wshserver.RemoteTarStreamCommand
func RemoteTarStreamCommand(w *wshutil.WshRpc, data wshrpc.CommandRemoteStreamTarData, opts *wshrpc.RpcOpts) chan wshrpc.RespOrErrorUnion[[]uint8] {
return sendRpcRequestResponseStreamHelper[[]uint8](w, "remotetarstream", data, opts)
}
// command "remotewritefile", wshserver.RemoteWriteFileCommand
func RemoteWriteFileCommand(w *wshutil.WshRpc, data wshrpc.CommandRemoteWriteFileData, opts *wshrpc.RpcOpts) error {
func RemoteWriteFileCommand(w *wshutil.WshRpc, data wshrpc.FileData, opts *wshrpc.RpcOpts) error {
_, err := sendRpcRequestCallHelper[any](w, "remotewritefile", data, opts)
return err
}

View File

@ -52,7 +52,7 @@ func sendRpcRequestResponseStreamHelper[T any](w *wshutil.WshRpc, command string
if opts == nil {
opts = &wshrpc.RpcOpts{}
}
respChan := make(chan wshrpc.RespOrErrorUnion[T])
respChan := make(chan wshrpc.RespOrErrorUnion[T], 32)
if w == nil {
rtnErr(respChan, errors.New("nil wshrpc passed to wshclient"))
return respChan

View File

@ -4,6 +4,7 @@
package wshremote
import (
"archive/tar"
"context"
"encoding/base64"
"errors"
@ -14,18 +15,17 @@ import (
"os"
"path/filepath"
"strings"
"time"
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
"github.com/wavetermdev/waveterm/pkg/util/fileutil"
"github.com/wavetermdev/waveterm/pkg/util/iochan"
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
"github.com/wavetermdev/waveterm/pkg/wavebase"
"github.com/wavetermdev/waveterm/pkg/wshrpc"
"github.com/wavetermdev/waveterm/pkg/wshutil"
)
const MaxFileSize = 50 * 1024 * 1024 // 10M
const MaxDirSize = 1024
const FileChunkSize = 16 * 1024
const DirChunkSize = 128
type ServerImpl struct {
LogWriter io.Writer
}
@ -45,10 +45,6 @@ func (impl *ServerImpl) MessageCommand(ctx context.Context, data wshrpc.CommandM
return nil
}
func respErr(err error) wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteStreamFileRtnData] {
return wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteStreamFileRtnData]{Error: err}
}
type ByteRangeType struct {
All bool
Start int64
@ -70,24 +66,25 @@ func parseByteRange(rangeStr string) (ByteRangeType, error) {
return ByteRangeType{Start: start, End: end}, nil
}
func (impl *ServerImpl) remoteStreamFileDir(ctx context.Context, path string, byteRange ByteRangeType, dataCallback func(fileInfo []*wshrpc.FileInfo, data []byte)) error {
func (impl *ServerImpl) remoteStreamFileDir(ctx context.Context, path string, byteRange ByteRangeType, dataCallback func(fileInfo []*wshrpc.FileInfo, data []byte, byteRange ByteRangeType)) error {
innerFilesEntries, err := os.ReadDir(path)
if err != nil {
return fmt.Errorf("cannot open dir %q: %w", path, err)
}
if byteRange.All {
if len(innerFilesEntries) > MaxDirSize {
innerFilesEntries = innerFilesEntries[:MaxDirSize]
if len(innerFilesEntries) > wshrpc.MaxDirSize {
innerFilesEntries = innerFilesEntries[:wshrpc.MaxDirSize]
}
} else {
if byteRange.Start >= int64(len(innerFilesEntries)) {
return nil
if byteRange.Start < int64(len(innerFilesEntries)) {
realEnd := byteRange.End
if realEnd > int64(len(innerFilesEntries)) {
realEnd = int64(len(innerFilesEntries))
}
innerFilesEntries = innerFilesEntries[byteRange.Start:realEnd]
} else {
innerFilesEntries = []os.DirEntry{}
}
realEnd := byteRange.End
if realEnd > int64(len(innerFilesEntries)) {
realEnd = int64(len(innerFilesEntries))
}
innerFilesEntries = innerFilesEntries[byteRange.Start:realEnd]
}
var fileInfoArr []*wshrpc.FileInfo
parent := filepath.Dir(path)
@ -107,19 +104,20 @@ func (impl *ServerImpl) remoteStreamFileDir(ctx context.Context, path string, by
}
innerFileInfo := statToFileInfo(filepath.Join(path, innerFileInfoInt.Name()), innerFileInfoInt, false)
fileInfoArr = append(fileInfoArr, innerFileInfo)
if len(fileInfoArr) >= DirChunkSize {
dataCallback(fileInfoArr, nil)
if len(fileInfoArr) >= wshrpc.DirChunkSize {
log.Printf("sending %d entries\n", len(fileInfoArr))
dataCallback(fileInfoArr, nil, byteRange)
fileInfoArr = nil
}
}
if len(fileInfoArr) > 0 {
dataCallback(fileInfoArr, nil)
log.Printf("sending %d entries\n", len(fileInfoArr))
dataCallback(fileInfoArr, nil, byteRange)
}
return nil
}
// TODO make sure the read is in chunks of 3 bytes (so 4 bytes of base64) in order to make decoding more efficient
func (impl *ServerImpl) remoteStreamFileRegular(ctx context.Context, path string, byteRange ByteRangeType, dataCallback func(fileInfo []*wshrpc.FileInfo, data []byte)) error {
func (impl *ServerImpl) remoteStreamFileRegular(ctx context.Context, path string, byteRange ByteRangeType, dataCallback func(fileInfo []*wshrpc.FileInfo, data []byte, byteRange ByteRangeType)) error {
fd, err := os.Open(path)
if err != nil {
return fmt.Errorf("cannot open file %q: %w", path, err)
@ -133,7 +131,7 @@ func (impl *ServerImpl) remoteStreamFileRegular(ctx context.Context, path string
}
filePos = byteRange.Start
}
buf := make([]byte, FileChunkSize)
buf := make([]byte, wshrpc.FileChunkSize)
for {
if ctx.Err() != nil {
return ctx.Err()
@ -144,7 +142,7 @@ func (impl *ServerImpl) remoteStreamFileRegular(ctx context.Context, path string
n = int(byteRange.End - filePos)
}
filePos += int64(n)
dataCallback(nil, buf[:n])
dataCallback(nil, buf[:n], byteRange)
}
if !byteRange.All && filePos >= byteRange.End {
break
@ -159,7 +157,7 @@ func (impl *ServerImpl) remoteStreamFileRegular(ctx context.Context, path string
return nil
}
func (impl *ServerImpl) remoteStreamFileInternal(ctx context.Context, data wshrpc.CommandRemoteStreamFileData, dataCallback func(fileInfo []*wshrpc.FileInfo, data []byte)) error {
func (impl *ServerImpl) remoteStreamFileInternal(ctx context.Context, data wshrpc.CommandRemoteStreamFileData, dataCallback func(fileInfo []*wshrpc.FileInfo, data []byte, byteRange ByteRangeType)) error {
byteRange, err := parseByteRange(data.ByteRange)
if err != nil {
return err
@ -172,11 +170,11 @@ func (impl *ServerImpl) remoteStreamFileInternal(ctx context.Context, data wshrp
if err != nil {
return fmt.Errorf("cannot stat file %q: %w", path, err)
}
dataCallback([]*wshrpc.FileInfo{finfo}, nil)
dataCallback([]*wshrpc.FileInfo{finfo}, nil, byteRange)
if finfo.NotFound {
return nil
}
if finfo.Size > MaxFileSize {
if finfo.Size > wshrpc.MaxFileSize {
return fmt.Errorf("file %q is too large to read, use /wave/stream-file", path)
}
if finfo.IsDir {
@ -186,37 +184,367 @@ func (impl *ServerImpl) remoteStreamFileInternal(ctx context.Context, data wshrp
}
}
func (impl *ServerImpl) RemoteStreamFileCommand(ctx context.Context, data wshrpc.CommandRemoteStreamFileData) chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteStreamFileRtnData] {
ch := make(chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteStreamFileRtnData], 16)
func (impl *ServerImpl) RemoteStreamFileCommand(ctx context.Context, data wshrpc.CommandRemoteStreamFileData) chan wshrpc.RespOrErrorUnion[wshrpc.FileData] {
ch := make(chan wshrpc.RespOrErrorUnion[wshrpc.FileData], 16)
go func() {
defer close(ch)
err := impl.remoteStreamFileInternal(ctx, data, func(fileInfo []*wshrpc.FileInfo, data []byte) {
resp := wshrpc.CommandRemoteStreamFileRtnData{}
resp.FileInfo = fileInfo
firstPk := true
err := impl.remoteStreamFileInternal(ctx, data, func(fileInfo []*wshrpc.FileInfo, data []byte, byteRange ByteRangeType) {
resp := wshrpc.FileData{}
fileInfoLen := len(fileInfo)
if fileInfoLen > 1 || !firstPk {
resp.Entries = fileInfo
} else if fileInfoLen == 1 {
resp.Info = fileInfo[0]
}
if firstPk {
firstPk = false
}
if len(data) > 0 {
resp.Data64 = base64.StdEncoding.EncodeToString(data)
resp.At = &wshrpc.FileDataAt{Offset: byteRange.Start, Size: len(data)}
}
ch <- wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteStreamFileRtnData]{Response: resp}
log.Printf("callback -- sending response %d\n", len(resp.Data64))
ch <- wshrpc.RespOrErrorUnion[wshrpc.FileData]{Response: resp}
})
if err != nil {
ch <- respErr(err)
ch <- wshutil.RespErr[wshrpc.FileData](err)
}
}()
return ch
}
func (impl *ServerImpl) RemoteTarStreamCommand(ctx context.Context, data wshrpc.CommandRemoteStreamTarData) <-chan wshrpc.RespOrErrorUnion[[]byte] {
path := data.Path
opts := data.Opts
if opts == nil {
opts = &wshrpc.FileCopyOpts{}
}
recursive := opts.Recursive
log.Printf("RemoteTarStreamCommand: path=%s\n", path)
path, err := wavebase.ExpandHomeDir(path)
if err != nil {
return wshutil.SendErrCh[[]byte](fmt.Errorf("cannot expand path %q: %w", path, err))
}
cleanedPath := filepath.Clean(wavebase.ExpandHomeDirSafe(path))
finfo, err := os.Stat(cleanedPath)
if err != nil {
return wshutil.SendErrCh[[]byte](fmt.Errorf("cannot stat file %q: %w", path, err))
}
pipeReader, pipeWriter := io.Pipe()
tarWriter := tar.NewWriter(pipeWriter)
timeout := time.Millisecond * 100
if opts.Timeout > 0 {
timeout = time.Duration(opts.Timeout) * time.Millisecond
}
readerCtx, _ := context.WithTimeout(context.Background(), timeout)
rtn := iochan.ReaderChan(readerCtx, pipeReader, wshrpc.FileChunkSize, func() {
pipeReader.Close()
pipeWriter.Close()
})
var pathPrefix string
if finfo.IsDir() && strings.HasSuffix(cleanedPath, "/") {
pathPrefix = cleanedPath
} else {
pathPrefix = filepath.Dir(cleanedPath)
}
go func() {
if readerCtx.Err() != nil {
return
}
defer tarWriter.Close()
log.Printf("creating tar stream for %q\n", path)
if finfo.IsDir() {
log.Printf("%q is a directory, recursive: %v\n", path, recursive)
if !recursive {
rtn <- wshutil.RespErr[[]byte](fmt.Errorf("cannot create tar stream for %q: %w", path, errors.New("directory copy requires recursive option")))
return
}
}
err := filepath.Walk(path, func(file string, fi os.FileInfo, err error) error {
// generate tar header
header, err := tar.FileInfoHeader(fi, file)
if err != nil {
return err
}
header.Name = strings.TrimPrefix(file, pathPrefix)
if header.Name == "" {
return nil
}
// write header
if err := tarWriter.WriteHeader(header); err != nil {
return err
}
// if not a dir, write file content
if !fi.IsDir() {
data, err := os.Open(file)
if err != nil {
return err
}
if n, err := io.Copy(tarWriter, data); err != nil {
log.Printf("error copying file %q: %v\n", file, err)
return err
} else {
log.Printf("wrote %d bytes to tar stream\n", n)
}
}
time.Sleep(time.Millisecond * 10)
return nil
})
if err != nil {
rtn <- wshutil.RespErr[[]byte](fmt.Errorf("cannot create tar stream for %q: %w", path, err))
}
log.Printf("returning tar stream\n")
}()
log.Printf("returning channel\n")
return rtn
}
func (impl *ServerImpl) RemoteFileCopyCommand(ctx context.Context, data wshrpc.CommandRemoteFileCopyData) error {
log.Printf("RemoteFileCopyCommand: src=%s, dest=%s\n", data.SrcUri, data.DestUri)
opts := data.Opts
if opts == nil {
opts = &wshrpc.FileCopyOpts{}
}
destUri := data.DestUri
srcUri := data.SrcUri
// merge := opts.Merge
overwrite := opts.Overwrite
destConn, err := connparse.ParseURIAndReplaceCurrentHost(ctx, destUri)
if err != nil {
return fmt.Errorf("cannot parse destination URI %q: %w", srcUri, err)
}
destPathCleaned := filepath.Clean(wavebase.ExpandHomeDirSafe(destConn.Path))
destinfo, err := os.Stat(destPathCleaned)
if err == nil {
if !destinfo.IsDir() {
if !overwrite {
return fmt.Errorf("destination %q already exists, use overwrite option", destPathCleaned)
} else {
err := os.Remove(destPathCleaned)
if err != nil {
return fmt.Errorf("cannot remove file %q: %w", destPathCleaned, err)
}
}
}
} else if !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("cannot stat destination %q: %w", destPathCleaned, err)
}
log.Printf("copying %q to %q\n", srcUri, destUri)
srcConn, err := connparse.ParseURIAndReplaceCurrentHost(ctx, srcUri)
if err != nil {
return fmt.Errorf("cannot parse source URI %q: %w", srcUri, err)
}
if srcConn.Host == destConn.Host {
log.Printf("same host, copying file\n")
srcPathCleaned := filepath.Clean(wavebase.ExpandHomeDirSafe(srcConn.Path))
err := os.Rename(srcPathCleaned, destPathCleaned)
if err != nil {
return fmt.Errorf("cannot copy file %q to %q: %w", srcPathCleaned, destPathCleaned, err)
}
} else {
return fmt.Errorf("cannot copy file %q to %q: source and destination must be on the same host", srcUri, destPathCleaned)
}
/* TODO: uncomment once ready for cross-connection copy
timeout := time.Millisecond * 100
if opts.Timeout > 0 {
timeout = time.Duration(opts.Timeout) * time.Millisecond
}
readCtx, _ := context.WithTimeout(ctx, timeout)
readCtx, cancel := context.WithCancelCause(readCtx)
ioch := fileshare.ReadTarStream(readCtx, wshrpc.CommandRemoteStreamTarData{Path: srcUri, Opts: opts})
pipeReader, pipeWriter := io.Pipe()
iochan.WriterChan(readCtx, pipeWriter, ioch, func() {
log.Printf("closing pipe writer\n")
pipeWriter.Close()
pipeReader.Close()
}, cancel)
defer cancel(nil)
tarReader := tar.NewReader(pipeReader)
for {
select {
case <-readCtx.Done():
if readCtx.Err() != nil {
return context.Cause(readCtx)
}
return nil
default:
next, err := tarReader.Next()
if err != nil {
if errors.Is(err, io.EOF) {
// Do one more check for context error before returning
if readCtx.Err() != nil {
return context.Cause(readCtx)
}
return nil
}
return fmt.Errorf("cannot read tar stream: %w", err)
}
// Check for directory traversal
if strings.Contains(next.Name, "..") {
log.Printf("skipping file with unsafe path: %q\n", next.Name)
continue
}
finfo := next.FileInfo()
nextPath := filepath.Join(destPathCleaned, next.Name)
destinfo, err = os.Stat(nextPath)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("cannot stat file %q: %w", nextPath, err)
}
log.Printf("new file: name %q; dest %q\n", next.Name, nextPath)
if destinfo != nil {
if destinfo.IsDir() {
if !finfo.IsDir() {
if !overwrite {
return fmt.Errorf("cannot create directory %q, file exists at path, overwrite not specified", nextPath)
} else {
err := os.Remove(nextPath)
if err != nil {
return fmt.Errorf("cannot remove file %q: %w", nextPath, err)
}
}
} else if !merge && !overwrite {
return fmt.Errorf("cannot create directory %q, directory exists at path, neither overwrite nor merge specified", nextPath)
} else if overwrite {
err := os.RemoveAll(nextPath)
if err != nil {
return fmt.Errorf("cannot remove directory %q: %w", nextPath, err)
}
}
} else {
if finfo.IsDir() {
if !overwrite {
return fmt.Errorf("cannot create file %q, directory exists at path, overwrite not specified", nextPath)
} else {
err := os.RemoveAll(nextPath)
if err != nil {
return fmt.Errorf("cannot remove directory %q: %w", nextPath, err)
}
}
} else if !overwrite {
return fmt.Errorf("cannot create file %q, file exists at path, overwrite not specified", nextPath)
} else {
err := os.Remove(nextPath)
if err != nil {
return fmt.Errorf("cannot remove file %q: %w", nextPath, err)
}
}
}
} else {
if finfo.IsDir() {
log.Printf("creating directory %q\n", nextPath)
err := os.MkdirAll(nextPath, finfo.Mode())
if err != nil {
return fmt.Errorf("cannot create directory %q: %w", nextPath, err)
}
} else {
err := os.MkdirAll(filepath.Dir(nextPath), 0755)
if err != nil {
return fmt.Errorf("cannot create parent directory %q: %w", filepath.Dir(nextPath), err)
}
file, err := os.Create(nextPath)
if err != nil {
return fmt.Errorf("cannot create new file %q: %w", nextPath, err)
}
_, err = io.Copy(file, tarReader)
if err != nil {
return fmt.Errorf("cannot write file %q: %w", nextPath, err)
}
file.Chmod(finfo.Mode())
file.Close()
}
}
}
}*/
return nil
}
func (impl *ServerImpl) RemoteListEntriesCommand(ctx context.Context, data wshrpc.CommandRemoteListEntriesData) chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
ch := make(chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData], 16)
go func() {
defer close(ch)
path, err := wavebase.ExpandHomeDir(data.Path)
if err != nil {
ch <- wshutil.RespErr[wshrpc.CommandRemoteListEntriesRtnData](err)
return
}
innerFilesEntries := []os.DirEntry{}
seen := 0
if data.Opts.Limit == 0 {
data.Opts.Limit = wshrpc.MaxDirSize
}
if data.Opts.All {
fs.WalkDir(os.DirFS(path), ".", func(path string, d fs.DirEntry, err error) error {
defer func() {
seen++
}()
if seen < data.Opts.Offset {
return nil
}
if seen >= data.Opts.Offset+data.Opts.Limit {
return io.EOF
}
if err != nil {
return err
}
if d.IsDir() {
return nil
}
innerFilesEntries = append(innerFilesEntries, d)
return nil
})
} else {
innerFilesEntries, err = os.ReadDir(path)
if err != nil {
ch <- wshutil.RespErr[wshrpc.CommandRemoteListEntriesRtnData](fmt.Errorf("cannot open dir %q: %w", path, err))
return
}
}
var fileInfoArr []*wshrpc.FileInfo
for _, innerFileEntry := range innerFilesEntries {
if ctx.Err() != nil {
ch <- wshutil.RespErr[wshrpc.CommandRemoteListEntriesRtnData](ctx.Err())
return
}
innerFileInfoInt, err := innerFileEntry.Info()
if err != nil {
log.Printf("cannot stat file %q: %v\n", innerFileEntry.Name(), err)
continue
}
innerFileInfo := statToFileInfo(filepath.Join(path, innerFileInfoInt.Name()), innerFileInfoInt, false)
fileInfoArr = append(fileInfoArr, innerFileInfo)
if len(fileInfoArr) >= wshrpc.DirChunkSize {
resp := wshrpc.CommandRemoteListEntriesRtnData{FileInfo: fileInfoArr}
ch <- wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]{Response: resp}
fileInfoArr = nil
}
}
if len(fileInfoArr) > 0 {
resp := wshrpc.CommandRemoteListEntriesRtnData{FileInfo: fileInfoArr}
ch <- wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]{Response: resp}
}
}()
return ch
}
func statToFileInfo(fullPath string, finfo fs.FileInfo, extended bool) *wshrpc.FileInfo {
mimeType := utilfn.DetectMimeType(fullPath, finfo, extended)
mimeType := fileutil.DetectMimeType(fullPath, finfo, extended)
rtn := &wshrpc.FileInfo{
Path: wavebase.ReplaceHomeDir(fullPath),
Dir: computeDirPart(fullPath, finfo.IsDir()),
Name: finfo.Name(),
Size: finfo.Size(),
Mode: finfo.Mode(),
ModeStr: finfo.Mode().String(),
ModTime: finfo.ModTime().UnixMilli(),
IsDir: finfo.IsDir(),
MimeType: mimeType,
Path: wavebase.ReplaceHomeDir(fullPath),
Dir: computeDirPart(fullPath, finfo.IsDir()),
Name: finfo.Name(),
Size: finfo.Size(),
Mode: finfo.Mode(),
ModeStr: finfo.Mode().String(),
ModTime: finfo.ModTime().UnixMilli(),
IsDir: finfo.IsDir(),
MimeType: mimeType,
SupportsMkdir: true,
}
if finfo.IsDir() {
rtn.Size = -1
@ -269,10 +597,11 @@ func (*ServerImpl) fileInfoInternal(path string, extended bool) (*wshrpc.FileInf
finfo, err := os.Stat(cleanedPath)
if os.IsNotExist(err) {
return &wshrpc.FileInfo{
Path: wavebase.ReplaceHomeDir(path),
Dir: computeDirPart(path, false),
NotFound: true,
ReadOnly: checkIsReadOnly(cleanedPath, finfo, false),
Path: wavebase.ReplaceHomeDir(path),
Dir: computeDirPart(path, false),
NotFound: true,
ReadOnly: checkIsReadOnly(cleanedPath, finfo, false),
SupportsMkdir: true,
}, nil
}
if err != nil {
@ -324,16 +653,49 @@ func (impl *ServerImpl) RemoteFileTouchCommand(ctx context.Context, path string)
return nil
}
func (impl *ServerImpl) RemoteFileRenameCommand(ctx context.Context, pathTuple [2]string) error {
path := pathTuple[0]
newPath := pathTuple[1]
cleanedPath := filepath.Clean(wavebase.ExpandHomeDirSafe(path))
cleanedNewPath := filepath.Clean(wavebase.ExpandHomeDirSafe(newPath))
if _, err := os.Stat(cleanedNewPath); err == nil {
return fmt.Errorf("destination file path %q already exists", path)
func (impl *ServerImpl) RemoteFileMoveCommand(ctx context.Context, data wshrpc.CommandRemoteFileCopyData) error {
log.Printf("RemoteFileCopyCommand: src=%s, dest=%s\n", data.SrcUri, data.DestUri)
opts := data.Opts
destUri := data.DestUri
srcUri := data.SrcUri
overwrite := opts != nil && opts.Overwrite
destConn, err := connparse.ParseURIAndReplaceCurrentHost(ctx, destUri)
if err != nil {
return fmt.Errorf("cannot parse destination URI %q: %w", srcUri, err)
}
if err := os.Rename(cleanedPath, cleanedNewPath); err != nil {
return fmt.Errorf("cannot rename file %q to %q: %w", cleanedPath, cleanedNewPath, err)
destPathCleaned := filepath.Clean(wavebase.ExpandHomeDirSafe(destConn.Path))
destinfo, err := os.Stat(destPathCleaned)
if err == nil {
if !destinfo.IsDir() {
if !overwrite {
return fmt.Errorf("destination %q already exists, use overwrite option", destUri)
} else {
err := os.Remove(destPathCleaned)
if err != nil {
return fmt.Errorf("cannot remove file %q: %w", destUri, err)
}
}
}
} else if !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("cannot stat destination %q: %w", destUri, err)
}
log.Printf("moving %q to %q\n", srcUri, destUri)
srcConn, err := connparse.ParseURIAndReplaceCurrentHost(ctx, srcUri)
if err != nil {
return fmt.Errorf("cannot parse source URI %q: %w", srcUri, err)
}
log.Printf("source host: %q, destination host: %q\n", srcConn.Host, destConn.Host)
if srcConn.Host == destConn.Host {
log.Printf("moving file on same host\n")
srcPathCleaned := filepath.Clean(wavebase.ExpandHomeDirSafe(srcConn.Path))
log.Printf("moving %q to %q\n", srcPathCleaned, destPathCleaned)
err := os.Rename(srcPathCleaned, destPathCleaned)
if err != nil {
return fmt.Errorf("cannot move file %q to %q: %w", srcPathCleaned, destPathCleaned, err)
}
} else {
return fmt.Errorf("cannot move file %q to %q: source and destination must be on the same host", srcUri, destUri)
}
return nil
}
@ -353,12 +715,12 @@ func (impl *ServerImpl) RemoteMkdirCommand(ctx context.Context, path string) err
return nil
}
func (*ServerImpl) RemoteWriteFileCommand(ctx context.Context, data wshrpc.CommandRemoteWriteFileData) error {
path, err := wavebase.ExpandHomeDir(data.Path)
func (*ServerImpl) RemoteWriteFileCommand(ctx context.Context, data wshrpc.FileData) error {
path, err := wavebase.ExpandHomeDir(data.Info.Path)
if err != nil {
return err
}
createMode := data.CreateMode
createMode := data.Info.Mode
if createMode == 0 {
createMode = 0644
}
@ -368,10 +730,42 @@ func (*ServerImpl) RemoteWriteFileCommand(ctx context.Context, data wshrpc.Comma
if err != nil {
return fmt.Errorf("cannot decode base64 data: %w", err)
}
err = os.WriteFile(path, dataBytes[:n], createMode)
if err != nil {
return fmt.Errorf("cannot write file %q: %w", path, err)
finfo, err := os.Stat(path)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("cannot stat file %q: %w", path, err)
}
fileSize := int64(0)
if finfo != nil {
fileSize = finfo.Size()
}
offset := fileSize
if data.At != nil {
if data.At.Offset > 0 {
offset = min(data.At.Offset, fileSize)
}
}
openFlags := os.O_CREATE | os.O_WRONLY
if data.Info.Opts.Truncate {
openFlags |= os.O_TRUNC
}
if data.Info.Opts.Append {
openFlags |= os.O_APPEND
}
file, err := os.OpenFile(path, openFlags, createMode)
if err != nil {
return fmt.Errorf("cannot open file %q: %w", path, err)
}
if offset > 0 && !data.Info.Opts.Append {
n, err = file.WriteAt(dataBytes[:n], offset)
} else {
n, err = file.Write(dataBytes[:n])
}
if err != nil {
return fmt.Errorf("cannot write to file %q: %w", path, err)
}
log.Printf("wrote %d bytes to file %q at offset %q\n", n, path, offset)
return nil
}

View File

@ -12,13 +12,24 @@ import (
"os"
"reflect"
"github.com/wavetermdev/waveterm/pkg/filestore"
"github.com/wavetermdev/waveterm/pkg/ijson"
"github.com/wavetermdev/waveterm/pkg/vdom"
"github.com/wavetermdev/waveterm/pkg/waveobj"
"github.com/wavetermdev/waveterm/pkg/wconfig"
"github.com/wavetermdev/waveterm/pkg/wps"
)
const (
// MaxFileSize is the maximum file size that can be read
MaxFileSize = 50 * 1024 * 1024 // 50M
// MaxDirSize is the maximum number of entries that can be read in a directory
MaxDirSize = 1024
// FileChunkSize is the size of the file chunk to read
FileChunkSize = 16 * 1024
// DirChunkSize is the size of the directory chunk to read
DirChunkSize = 128
)
const LocalConnName = "local"
const (
@ -45,12 +56,15 @@ const (
Command_ControllerResync = "controllerresync"
Command_FileAppend = "fileappend"
Command_FileAppendIJson = "fileappendijson"
Command_Mkdir = "mkdir"
Command_ResolveIds = "resolveids"
Command_BlockInfo = "blockinfo"
Command_CreateBlock = "createblock"
Command_DeleteBlock = "deleteblock"
Command_FileWrite = "filewrite"
Command_FileRead = "fileread"
Command_FileMove = "filemove"
Command_FileCopy = "filecopy"
Command_EventPublish = "eventpublish"
Command_EventRecv = "eventrecv"
Command_EventSub = "eventsub"
@ -63,10 +77,13 @@ const (
Command_Test = "test"
Command_SetConfig = "setconfig"
Command_SetConnectionsConfig = "connectionsconfig"
Command_GetFullConfig = "getfullconfig"
Command_RemoteStreamFile = "remotestreamfile"
Command_RemoteTarStream = "remotetarstream"
Command_RemoteFileInfo = "remotefileinfo"
Command_RemoteFileTouch = "remotefiletouch"
Command_RemoteWriteFile = "remotewritefile"
Command_RemoteFileDelete = "remotefiledelete"
Command_RemoteFileJoin = "remotefilejoin"
Command_WaveInfo = "waveinfo"
@ -131,14 +148,19 @@ type WshRpcInterface interface {
DeleteBlockCommand(ctx context.Context, data CommandDeleteBlockData) error
DeleteSubBlockCommand(ctx context.Context, data CommandDeleteBlockData) error
WaitForRouteCommand(ctx context.Context, data CommandWaitForRouteData) (bool, error)
FileCreateCommand(ctx context.Context, data CommandFileCreateData) error
FileDeleteCommand(ctx context.Context, data CommandFileData) error
FileAppendCommand(ctx context.Context, data CommandFileData) error
FileMkdirCommand(ctx context.Context, data FileData) error
FileCreateCommand(ctx context.Context, data FileData) error
FileDeleteCommand(ctx context.Context, data FileData) error
FileAppendCommand(ctx context.Context, data FileData) error
FileAppendIJsonCommand(ctx context.Context, data CommandAppendIJsonData) error
FileWriteCommand(ctx context.Context, data CommandFileData) error
FileReadCommand(ctx context.Context, data CommandFileData) (string, error)
FileInfoCommand(ctx context.Context, data CommandFileData) (*WaveFileInfo, error)
FileListCommand(ctx context.Context, data CommandFileListData) ([]*WaveFileInfo, error)
FileWriteCommand(ctx context.Context, data FileData) error
FileReadCommand(ctx context.Context, data FileData) (*FileData, error)
FileStreamTarCommand(ctx context.Context, data CommandRemoteStreamTarData) <-chan RespOrErrorUnion[[]byte]
FileMoveCommand(ctx context.Context, data CommandFileCopyData) error
FileCopyCommand(ctx context.Context, data CommandFileCopyData) error
FileInfoCommand(ctx context.Context, data FileData) (*FileInfo, error)
FileListCommand(ctx context.Context, data FileListData) ([]*FileInfo, error)
FileListStreamCommand(ctx context.Context, data FileListData) <-chan RespOrErrorUnion[CommandRemoteListEntriesRtnData]
EventPublishCommand(ctx context.Context, data wps.WaveEvent) error
EventSubCommand(ctx context.Context, data wps.SubscriptionRequest) error
EventUnsubCommand(ctx context.Context, data string) error
@ -150,6 +172,7 @@ type WshRpcInterface interface {
TestCommand(ctx context.Context, data string) error
SetConfigCommand(ctx context.Context, data MetaSettingsType) error
SetConnectionsConfigCommand(ctx context.Context, data ConnConfigRequest) error
GetFullConfigCommand(ctx context.Context) (wconfig.FullConfigType, error)
BlockInfoCommand(ctx context.Context, blockId string) (*BlockInfoData, error)
WaveInfoCommand(ctx context.Context) (*WaveInfoData, error)
WshActivityCommand(ct context.Context, data map[string]int) error
@ -175,12 +198,15 @@ type WshRpcInterface interface {
EventRecvCommand(ctx context.Context, data wps.WaveEvent) error
// remotes
RemoteStreamFileCommand(ctx context.Context, data CommandRemoteStreamFileData) chan RespOrErrorUnion[CommandRemoteStreamFileRtnData]
RemoteStreamFileCommand(ctx context.Context, data CommandRemoteStreamFileData) chan RespOrErrorUnion[FileData]
RemoteTarStreamCommand(ctx context.Context, data CommandRemoteStreamTarData) <-chan RespOrErrorUnion[[]byte]
RemoteFileCopyCommand(ctx context.Context, data CommandRemoteFileCopyData) error
RemoteListEntriesCommand(ctx context.Context, data CommandRemoteListEntriesData) chan RespOrErrorUnion[CommandRemoteListEntriesRtnData]
RemoteFileInfoCommand(ctx context.Context, path string) (*FileInfo, error)
RemoteFileTouchCommand(ctx context.Context, path string) error
RemoteFileRenameCommand(ctx context.Context, pathTuple [2]string) error
RemoteFileMoveCommand(ctx context.Context, data CommandRemoteFileCopyData) error
RemoteFileDeleteCommand(ctx context.Context, path string) error
RemoteWriteFileCommand(ctx context.Context, data CommandRemoteWriteFileData) error
RemoteWriteFileCommand(ctx context.Context, data FileData) error
RemoteFileJoinCommand(ctx context.Context, paths []string) (*FileInfo, error)
RemoteMkdirCommand(ctx context.Context, path string) error
RemoteStreamCpuDataCommand(ctx context.Context) chan RespOrErrorUnion[TimeSeriesData]
@ -213,7 +239,7 @@ type WshServerCommandMeta struct {
}
type RpcOpts struct {
Timeout int `json:"timeout,omitempty"`
Timeout int64 `json:"timeout,omitempty"`
NoResponse bool `json:"noresponse,omitempty"`
Route string `json:"route,omitempty"`
@ -341,42 +367,63 @@ type CommandBlockInputData struct {
TermSize *waveobj.TermSize `json:"termsize,omitempty"`
}
type CommandFileDataAt struct {
type FileDataAt struct {
Offset int64 `json:"offset"`
Size int64 `json:"size,omitempty"`
Size int `json:"size,omitempty"`
}
type CommandFileData struct {
ZoneId string `json:"zoneid" wshcontext:"BlockId"`
FileName string `json:"filename"`
Data64 string `json:"data64,omitempty"`
At *CommandFileDataAt `json:"at,omitempty"` // if set, this turns read/write ops to ReadAt/WriteAt ops (len is only used for ReadAt)
type FileData struct {
Info *FileInfo `json:"info,omitempty"`
Data64 string `json:"data64,omitempty"`
Entries []*FileInfo `json:"entries,omitempty"`
At *FileDataAt `json:"at,omitempty"` // if set, this turns read/write ops to ReadAt/WriteAt ops (len is only used for ReadAt)
}
type WaveFileInfo struct {
ZoneId string `json:"zoneid"`
Name string `json:"name"`
Opts filestore.FileOptsType `json:"opts,omitempty"`
Size int64 `json:"size,omitempty"`
CreatedTs int64 `json:"createdts,omitempty"`
ModTs int64 `json:"modts,omitempty"`
Meta map[string]any `json:"meta,omitempty"`
IsDir bool `json:"isdir,omitempty"`
type FileInfo struct {
Path string `json:"path"` // cleaned path (may have "~")
Dir string `json:"dir,omitempty"` // returns the directory part of the path (if this is a a directory, it will be equal to Path). "~" will be expanded, and separators will be normalized to "/"
Name string `json:"name,omitempty"`
NotFound bool `json:"notfound,omitempty"`
Opts *FileOpts `json:"opts,omitempty"`
Size int64 `json:"size,omitempty"`
Meta *FileMeta `json:"meta,omitempty"`
Mode os.FileMode `json:"mode,omitempty"`
ModeStr string `json:"modestr,omitempty"`
ModTime int64 `json:"modtime,omitempty"`
IsDir bool `json:"isdir,omitempty"`
SupportsMkdir bool `json:"supportsmkdir,omitempty"`
MimeType string `json:"mimetype,omitempty"`
ReadOnly bool `json:"readonly,omitempty"` // this is not set for fileinfo's returned from directory listings
}
type CommandFileListData struct {
ZoneId string `json:"zoneid"`
Prefix string `json:"prefix,omitempty"`
All bool `json:"all,omitempty"`
Offset int `json:"offset,omitempty"`
Limit int `json:"limit,omitempty"`
type FileOpts struct {
MaxSize int64 `json:"maxsize,omitempty"`
Circular bool `json:"circular,omitempty"`
IJson bool `json:"ijson,omitempty"`
IJsonBudget int `json:"ijsonbudget,omitempty"`
Truncate bool `json:"truncate,omitempty"`
Append bool `json:"append,omitempty"`
}
type CommandFileCreateData struct {
ZoneId string `json:"zoneid"`
FileName string `json:"filename"`
Meta map[string]any `json:"meta,omitempty"`
Opts *filestore.FileOptsType `json:"opts,omitempty"`
type FileMeta = map[string]any
type FileListStreamResponse <-chan RespOrErrorUnion[CommandRemoteListEntriesRtnData]
type FileListData struct {
Path string `json:"path"`
Opts *FileListOpts `json:"opts,omitempty"`
}
type FileListOpts struct {
All bool `json:"all,omitempty"`
Offset int `json:"offset,omitempty"`
Limit int `json:"limit,omitempty"`
}
type FileCreateData struct {
Path string `json:"path"`
Meta map[string]any `json:"meta,omitempty"`
Opts *FileOpts `json:"opts,omitempty"`
}
type CommandAppendIJsonData struct {
@ -451,18 +498,28 @@ type CpuDataType struct {
Value float64 `json:"value"`
}
type FileInfo struct {
Path string `json:"path"` // cleaned path (may have "~")
Dir string `json:"dir"` // returns the directory part of the path (if this is a a directory, it will be equal to Path). "~" will be expanded, and separators will be normalized to "/"
Name string `json:"name"`
NotFound bool `json:"notfound,omitempty"`
Size int64 `json:"size"`
Mode os.FileMode `json:"mode"`
ModeStr string `json:"modestr"`
ModTime int64 `json:"modtime"`
IsDir bool `json:"isdir,omitempty"`
MimeType string `json:"mimetype,omitempty"`
ReadOnly bool `json:"readonly,omitempty"` // this is not set for fileinfo's returned from directory listings
type CommandFileCopyData struct {
SrcUri string `json:"srcuri"`
DestUri string `json:"desturi"`
Opts *FileCopyOpts `json:"opts,omitempty"`
}
type CommandRemoteFileCopyData struct {
SrcUri string `json:"srcuri"`
DestUri string `json:"desturi"`
Opts *FileCopyOpts `json:"opts,omitempty"`
}
type CommandRemoteStreamTarData struct {
Path string `json:"path"`
Opts *FileCopyOpts `json:"opts,omitempty"`
}
type FileCopyOpts struct {
Overwrite bool `json:"overwrite,omitempty"`
Recursive bool `json:"recursive,omitempty"`
Merge bool `json:"merge,omitempty"`
Timeout int64 `json:"timeout,omitempty"`
}
type CommandRemoteStreamFileData struct {
@ -470,53 +527,19 @@ type CommandRemoteStreamFileData struct {
ByteRange string `json:"byterange,omitempty"`
}
type CommandRemoteStreamFileRtnData struct {
type CommandRemoteListEntriesData struct {
Path string `json:"path"`
Opts *FileListOpts `json:"opts,omitempty"`
}
type CommandRemoteListEntriesRtnData struct {
FileInfo []*FileInfo `json:"fileinfo,omitempty"`
Data64 string `json:"data64,omitempty"`
}
type CommandRemoteWriteFileData struct {
Path string `json:"path"`
Data64 string `json:"data64"`
CreateMode os.FileMode `json:"createmode,omitempty"`
}
type ConnKeywords struct {
ConnWshEnabled *bool `json:"conn:wshenabled,omitempty"`
ConnAskBeforeWshInstall *bool `json:"conn:askbeforewshinstall,omitempty"`
ConnWshPath string `json:"conn:wshpath,omitempty"`
ConnShellPath string `json:"conn:shellpath,omitempty"`
ConnIgnoreSshConfig *bool `json:"conn:ignoresshconfig,omitempty"`
DisplayHidden *bool `json:"display:hidden,omitempty"`
DisplayOrder float32 `json:"display:order,omitempty"`
TermClear bool `json:"term:*,omitempty"`
TermFontSize float64 `json:"term:fontsize,omitempty"`
TermFontFamily string `json:"term:fontfamily,omitempty"`
TermTheme string `json:"term:theme,omitempty"`
SshUser *string `json:"ssh:user,omitempty"`
SshHostName *string `json:"ssh:hostname,omitempty"`
SshPort *string `json:"ssh:port,omitempty"`
SshIdentityFile []string `json:"ssh:identityfile,omitempty"`
SshIdentitiesOnly *bool `json:"ssh:identitiesonly,omitempty"`
SshBatchMode *bool `json:"ssh:batchmode,omitempty"`
SshPubkeyAuthentication *bool `json:"ssh:pubkeyauthentication,omitempty"`
SshPasswordAuthentication *bool `json:"ssh:passwordauthentication,omitempty"`
SshKbdInteractiveAuthentication *bool `json:"ssh:kbdinteractiveauthentication,omitempty"`
SshPreferredAuthentications []string `json:"ssh:preferredauthentications,omitempty"`
SshAddKeysToAgent *bool `json:"ssh:addkeystoagent,omitempty"`
SshIdentityAgent *string `json:"ssh:identityagent,omitempty"`
SshProxyJump []string `json:"ssh:proxyjump,omitempty"`
SshUserKnownHostsFile []string `json:"ssh:userknownhostsfile,omitempty"`
SshGlobalKnownHostsFile []string `json:"ssh:globalknownhostsfile,omitempty"`
}
type ConnRequest struct {
Host string `json:"host"`
Keywords ConnKeywords `json:"keywords,omitempty"`
LogBlockId string `json:"logblockid,omitempty"`
Host string `json:"host"`
Keywords wconfig.ConnKeywords `json:"keywords,omitempty"`
LogBlockId string `json:"logblockid,omitempty"`
}
type RemoteInfo struct {
@ -586,11 +609,11 @@ type CommandWebSelectorData struct {
}
type BlockInfoData struct {
BlockId string `json:"blockid"`
TabId string `json:"tabid"`
WorkspaceId string `json:"workspaceid"`
Block *waveobj.Block `json:"block"`
Files []*filestore.WaveFile `json:"files"`
BlockId string `json:"blockid"`
TabId string `json:"tabid"`
WorkspaceId string `json:"workspaceid"`
Block *waveobj.Block `json:"block"`
Files []*FileInfo `json:"files"`
}
type WaveNotificationOptions struct {

View File

@ -25,9 +25,11 @@ import (
"github.com/wavetermdev/waveterm/pkg/panichandler"
"github.com/wavetermdev/waveterm/pkg/remote"
"github.com/wavetermdev/waveterm/pkg/remote/conncontroller"
"github.com/wavetermdev/waveterm/pkg/remote/fileshare"
"github.com/wavetermdev/waveterm/pkg/telemetry"
"github.com/wavetermdev/waveterm/pkg/util/envutil"
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
"github.com/wavetermdev/waveterm/pkg/util/wavefileutil"
"github.com/wavetermdev/waveterm/pkg/waveai"
"github.com/wavetermdev/waveterm/pkg/wavebase"
"github.com/wavetermdev/waveterm/pkg/waveobj"
@ -93,7 +95,7 @@ func MakePlotData(ctx context.Context, blockId string) error {
if viewName != "cpuplot" && viewName != "sysinfo" {
return fmt.Errorf("invalid view type: %s", viewName)
}
return filestore.WFS.MakeFile(ctx, blockId, "cpuplotdata", nil, filestore.FileOptsType{})
return filestore.WFS.MakeFile(ctx, blockId, "cpuplotdata", nil, wshrpc.FileOpts{})
}
func SavePlotData(ctx context.Context, blockId string, history string) error {
@ -278,216 +280,63 @@ func (ws *WshServer) ControllerAppendOutputCommand(ctx context.Context, data wsh
return nil
}
func (ws *WshServer) FileCreateCommand(ctx context.Context, data wshrpc.CommandFileCreateData) error {
var fileOpts filestore.FileOptsType
if data.Opts != nil {
fileOpts = *data.Opts
}
err := filestore.WFS.MakeFile(ctx, data.ZoneId, data.FileName, data.Meta, fileOpts)
func (ws *WshServer) FileCreateCommand(ctx context.Context, data wshrpc.FileData) error {
data.Data64 = ""
err := fileshare.PutFile(ctx, data)
if err != nil {
return fmt.Errorf("error creating blockfile: %w", err)
return fmt.Errorf("error creating file: %w", err)
}
wps.Broker.Publish(wps.WaveEvent{
Event: wps.Event_BlockFile,
Scopes: []string{waveobj.MakeORef(waveobj.OType_Block, data.ZoneId).String()},
Data: &wps.WSFileEventData{
ZoneId: data.ZoneId,
FileName: data.FileName,
FileOp: wps.FileOp_Create,
},
})
return nil
}
func (ws *WshServer) FileDeleteCommand(ctx context.Context, data wshrpc.CommandFileData) error {
err := filestore.WFS.DeleteFile(ctx, data.ZoneId, data.FileName)
if err != nil {
return fmt.Errorf("error deleting blockfile: %w", err)
}
wps.Broker.Publish(wps.WaveEvent{
Event: wps.Event_BlockFile,
Scopes: []string{waveobj.MakeORef(waveobj.OType_Block, data.ZoneId).String()},
Data: &wps.WSFileEventData{
ZoneId: data.ZoneId,
FileName: data.FileName,
FileOp: wps.FileOp_Delete,
},
})
return nil
func (ws *WshServer) FileMkdirCommand(ctx context.Context, data wshrpc.FileData) error {
return fileshare.Mkdir(ctx, data.Info.Path)
}
func waveFileToWaveFileInfo(wf *filestore.WaveFile) *wshrpc.WaveFileInfo {
return &wshrpc.WaveFileInfo{
ZoneId: wf.ZoneId,
Name: wf.Name,
Opts: wf.Opts,
Size: wf.Size,
CreatedTs: wf.CreatedTs,
ModTs: wf.ModTs,
Meta: wf.Meta,
}
func (ws *WshServer) FileDeleteCommand(ctx context.Context, data wshrpc.FileData) error {
return fileshare.Delete(ctx, data.Info.Path)
}
func (ws *WshServer) FileInfoCommand(ctx context.Context, data wshrpc.CommandFileData) (*wshrpc.WaveFileInfo, error) {
fileInfo, err := filestore.WFS.Stat(ctx, data.ZoneId, data.FileName)
if err != nil {
if err == fs.ErrNotExist {
return nil, fmt.Errorf("NOTFOUND: %w", err)
}
return nil, fmt.Errorf("error getting file info: %w", err)
}
return waveFileToWaveFileInfo(fileInfo), nil
func (ws *WshServer) FileInfoCommand(ctx context.Context, data wshrpc.FileData) (*wshrpc.FileInfo, error) {
return fileshare.Stat(ctx, data.Info.Path)
}
func (ws *WshServer) FileListCommand(ctx context.Context, data wshrpc.CommandFileListData) ([]*wshrpc.WaveFileInfo, error) {
fileListOrig, err := filestore.WFS.ListFiles(ctx, data.ZoneId)
if err != nil {
return nil, fmt.Errorf("error listing blockfiles: %w", err)
}
var fileList []*wshrpc.WaveFileInfo
for _, wf := range fileListOrig {
fileList = append(fileList, waveFileToWaveFileInfo(wf))
}
if data.Prefix != "" {
var filteredList []*wshrpc.WaveFileInfo
for _, file := range fileList {
if strings.HasPrefix(file.Name, data.Prefix) {
filteredList = append(filteredList, file)
}
}
fileList = filteredList
}
if !data.All {
var filteredList []*wshrpc.WaveFileInfo
dirMap := make(map[string]int64) // the value is max modtime
for _, file := range fileList {
// if there is an extra "/" after the prefix, don't include it
// first strip the prefix
relPath := strings.TrimPrefix(file.Name, data.Prefix)
// then check if there is a "/" after the prefix
if strings.Contains(relPath, "/") {
dirPath := strings.Split(relPath, "/")[0]
modTime := dirMap[dirPath]
if file.ModTs > modTime {
dirMap[dirPath] = file.ModTs
}
continue
}
filteredList = append(filteredList, file)
}
for dir := range dirMap {
filteredList = append(filteredList, &wshrpc.WaveFileInfo{
ZoneId: data.ZoneId,
Name: data.Prefix + dir + "/",
Size: 0,
Meta: nil,
ModTs: dirMap[dir],
CreatedTs: dirMap[dir],
IsDir: true,
})
}
fileList = filteredList
}
if data.Offset > 0 {
if data.Offset >= len(fileList) {
fileList = nil
} else {
fileList = fileList[data.Offset:]
}
}
if data.Limit > 0 {
if data.Limit < len(fileList) {
fileList = fileList[:data.Limit]
}
}
return fileList, nil
func (ws *WshServer) FileListCommand(ctx context.Context, data wshrpc.FileListData) ([]*wshrpc.FileInfo, error) {
return fileshare.ListEntries(ctx, data.Path, data.Opts)
}
func (ws *WshServer) FileWriteCommand(ctx context.Context, data wshrpc.CommandFileData) error {
dataBuf, err := base64.StdEncoding.DecodeString(data.Data64)
if err != nil {
return fmt.Errorf("error decoding data64: %w", err)
}
if data.At != nil {
err = filestore.WFS.WriteAt(ctx, data.ZoneId, data.FileName, data.At.Offset, dataBuf)
if err == fs.ErrNotExist {
return fmt.Errorf("NOTFOUND: %w", err)
}
if err != nil {
return fmt.Errorf("error writing to blockfile: %w", err)
}
} else {
err = filestore.WFS.WriteFile(ctx, data.ZoneId, data.FileName, dataBuf)
if err == fs.ErrNotExist {
return fmt.Errorf("NOTFOUND: %w", err)
}
if err != nil {
return fmt.Errorf("error writing to blockfile: %w", err)
}
}
wps.Broker.Publish(wps.WaveEvent{
Event: wps.Event_BlockFile,
Scopes: []string{waveobj.MakeORef(waveobj.OType_Block, data.ZoneId).String()},
Data: &wps.WSFileEventData{
ZoneId: data.ZoneId,
FileName: data.FileName,
FileOp: wps.FileOp_Invalidate,
},
})
return nil
func (ws *WshServer) FileListStreamCommand(ctx context.Context, data wshrpc.FileListData) <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
return fileshare.ListEntriesStream(ctx, data.Path, data.Opts)
}
func (ws *WshServer) FileReadCommand(ctx context.Context, data wshrpc.CommandFileData) (string, error) {
if data.At != nil {
_, dataBuf, err := filestore.WFS.ReadAt(ctx, data.ZoneId, data.FileName, data.At.Offset, data.At.Size)
if err == fs.ErrNotExist {
return "", fmt.Errorf("NOTFOUND: %w", err)
}
if err != nil {
return "", fmt.Errorf("error reading blockfile: %w", err)
}
return base64.StdEncoding.EncodeToString(dataBuf), nil
} else {
_, dataBuf, err := filestore.WFS.ReadFile(ctx, data.ZoneId, data.FileName)
if err == fs.ErrNotExist {
return "", fmt.Errorf("NOTFOUND: %w", err)
}
if err != nil {
return "", fmt.Errorf("error reading blockfile: %w", err)
}
return base64.StdEncoding.EncodeToString(dataBuf), nil
}
func (ws *WshServer) FileWriteCommand(ctx context.Context, data wshrpc.FileData) error {
return fileshare.PutFile(ctx, data)
}
func (ws *WshServer) FileAppendCommand(ctx context.Context, data wshrpc.CommandFileData) error {
dataBuf, err := base64.StdEncoding.DecodeString(data.Data64)
if err != nil {
return fmt.Errorf("error decoding data64: %w", err)
}
err = filestore.WFS.AppendData(ctx, data.ZoneId, data.FileName, dataBuf)
if err == fs.ErrNotExist {
return fmt.Errorf("NOTFOUND: %w", err)
}
if err != nil {
return fmt.Errorf("error appending to blockfile: %w", err)
}
wps.Broker.Publish(wps.WaveEvent{
Event: wps.Event_BlockFile,
Scopes: []string{waveobj.MakeORef(waveobj.OType_Block, data.ZoneId).String()},
Data: &wps.WSFileEventData{
ZoneId: data.ZoneId,
FileName: data.FileName,
FileOp: wps.FileOp_Append,
Data64: base64.StdEncoding.EncodeToString(dataBuf),
},
})
return nil
func (ws *WshServer) FileReadCommand(ctx context.Context, data wshrpc.FileData) (*wshrpc.FileData, error) {
return fileshare.Read(ctx, data)
}
func (ws *WshServer) FileCopyCommand(ctx context.Context, data wshrpc.CommandFileCopyData) error {
return fileshare.Copy(ctx, data)
}
func (ws *WshServer) FileMoveCommand(ctx context.Context, data wshrpc.CommandFileCopyData) error {
return fileshare.Move(ctx, data)
}
func (ws *WshServer) FileStreamTarCommand(ctx context.Context, data wshrpc.CommandRemoteStreamTarData) <-chan wshrpc.RespOrErrorUnion[[]byte] {
return fileshare.ReadTarStream(ctx, data)
}
func (ws *WshServer) FileAppendCommand(ctx context.Context, data wshrpc.FileData) error {
return fileshare.Append(ctx, data)
}
func (ws *WshServer) FileAppendIJsonCommand(ctx context.Context, data wshrpc.CommandAppendIJsonData) error {
tryCreate := true
if data.FileName == blockcontroller.BlockFile_VDom && tryCreate {
err := filestore.WFS.MakeFile(ctx, data.ZoneId, data.FileName, nil, filestore.FileOptsType{MaxSize: blockcontroller.DefaultHtmlMaxFileSize, IJson: true})
err := filestore.WFS.MakeFile(ctx, data.ZoneId, data.FileName, nil, wshrpc.FileOpts{MaxSize: blockcontroller.DefaultHtmlMaxFileSize, IJson: true})
if err != nil && err != fs.ErrExist {
return fmt.Errorf("error creating blockfile[vdom]: %w", err)
}
@ -604,6 +453,11 @@ func (ws *WshServer) SetConnectionsConfigCommand(ctx context.Context, data wshrp
return wconfig.SetConnectionsConfigValue(data.Host, data.MetaMapType)
}
func (ws *WshServer) GetFullConfigCommand(ctx context.Context) (wconfig.FullConfigType, error) {
watcher := wconfig.GetWatcher()
return watcher.GetFullConfig(), nil
}
func (ws *WshServer) ConnStatusCommand(ctx context.Context) ([]wshrpc.ConnStatus, error) {
rtn := conncontroller.GetAllConnStatus()
return rtn, nil
@ -652,7 +506,7 @@ func (ws *WshServer) ConnDisconnectCommand(ctx context.Context, connName string)
if err != nil {
return fmt.Errorf("error parsing connection name: %w", err)
}
conn := conncontroller.GetConn(ctx, connOpts, &wshrpc.ConnKeywords{})
conn := conncontroller.GetConn(ctx, connOpts, &wconfig.ConnKeywords{})
if conn == nil {
return fmt.Errorf("connection not found: %s", connName)
}
@ -698,7 +552,7 @@ func (ws *WshServer) ConnReinstallWshCommand(ctx context.Context, data wshrpc.Co
if err != nil {
return fmt.Errorf("error parsing connection name: %w", err)
}
conn := conncontroller.GetConn(ctx, connOpts, &wshrpc.ConnKeywords{})
conn := conncontroller.GetConn(ctx, connOpts, &wconfig.ConnKeywords{})
if conn == nil {
return fmt.Errorf("connection not found: %s", connName)
}
@ -735,7 +589,7 @@ func (ws *WshServer) ConnUpdateWshCommand(ctx context.Context, remoteInfo wshrpc
if err != nil {
return false, fmt.Errorf("error parsing connection name: %w", err)
}
conn := conncontroller.GetConn(ctx, connOpts, &wshrpc.ConnKeywords{})
conn := conncontroller.GetConn(ctx, connOpts, &wconfig.ConnKeywords{})
if conn == nil {
return false, fmt.Errorf("connection not found: %s", connName)
}
@ -823,12 +677,13 @@ func (ws *WshServer) BlockInfoCommand(ctx context.Context, blockId string) (*wsh
if err != nil {
return nil, fmt.Errorf("error listing blockfiles: %w", err)
}
fileInfoList := wavefileutil.WaveFileListToFileInfoList(fileList)
return &wshrpc.BlockInfoData{
BlockId: blockId,
TabId: tabId,
WorkspaceId: workspaceId,
Block: blockData,
Files: fileList,
Files: fileInfoList,
}, nil
}
@ -911,7 +766,7 @@ func (ws *WshServer) SetVarCommand(ctx context.Context, data wshrpc.CommandVarDa
_, fileData, err := filestore.WFS.ReadFile(ctx, data.ZoneId, data.FileName)
if err == fs.ErrNotExist {
fileData = []byte{}
err = filestore.WFS.MakeFile(ctx, data.ZoneId, data.FileName, nil, filestore.FileOptsType{})
err = filestore.WFS.MakeFile(ctx, data.ZoneId, data.FileName, nil, wshrpc.FileOpts{})
if err != nil {
return fmt.Errorf("error creating blockfile: %w", err)
}

View File

@ -19,10 +19,18 @@ import (
"github.com/wavetermdev/waveterm/pkg/wshrpc"
)
const DefaultRoute = "wavesrv"
const UpstreamRoute = "upstream"
const SysRoute = "sys" // this route doesn't exist, just a placeholder for system messages
const ElectronRoute = "electron"
const (
DefaultRoute = "wavesrv"
UpstreamRoute = "upstream"
SysRoute = "sys" // this route doesn't exist, just a placeholder for system messages
ElectronRoute = "electron"
RoutePrefix_Conn = "conn:"
RoutePrefix_Controller = "controller:"
RoutePrefix_Proc = "proc:"
RoutePrefix_Tab = "tab:"
RoutePrefix_FeBlock = "feblock:"
)
// this works like a network switch
@ -188,20 +196,26 @@ func (router *WshRouter) getAnnouncedRoute(routeId string) string {
func (router *WshRouter) sendRoutedMessage(msgBytes []byte, routeId string) bool {
rpc := router.GetRpc(routeId)
if rpc != nil {
// log.Printf("[router] sending message to %q via rpc\n", routeId)
rpc.SendRpcMessage(msgBytes)
return true
}
upstream := router.GetUpstreamClient()
if upstream != nil {
log.Printf("[router] sending message to %q via upstream\n", routeId)
upstream.SendRpcMessage(msgBytes)
return true
} else {
log.Printf("[router] sending message to %q via announced route\n", routeId)
// we are the upstream, so consult our announced routes map
localRouteId := router.getAnnouncedRoute(routeId)
log.Printf("[router] local route id: %q\n", localRouteId)
rpc := router.GetRpc(localRouteId)
if rpc == nil {
log.Printf("[router] no rpc for local route id %q\n", localRouteId)
return false
}
log.Printf("[router] sending message to %q via local route\n", localRouteId)
rpc.SendRpcMessage(msgBytes)
return true
}

View File

@ -24,6 +24,7 @@ import (
const DefaultTimeoutMs = 5000
const RespChSize = 32
const DefaultMessageChSize = 32
const CtxDoneChSize = 10
type ResponseFnType = func(any) error
@ -44,6 +45,7 @@ type WshRpc struct {
clientId string
InputCh chan []byte
OutputCh chan []byte
CtxDoneCh chan string // for context cancellation, value is ResId
RpcContext *atomic.Pointer[wshrpc.RpcContext]
AuthToken string
RpcMap map[string]*rpcData
@ -52,6 +54,7 @@ type WshRpc struct {
ResponseHandlerMap map[string]*RpcResponseHandler // reqId => handler
Debug bool
DebugName string
ServerDone bool
}
type wshRpcContextKey struct{}
@ -110,7 +113,7 @@ type RpcMessage struct {
Command string `json:"command,omitempty"`
ReqId string `json:"reqid,omitempty"`
ResId string `json:"resid,omitempty"`
Timeout int `json:"timeout,omitempty"`
Timeout int64 `json:"timeout,omitempty"`
Route string `json:"route,omitempty"` // to route/forward requests to alternate servers
AuthToken string `json:"authtoken,omitempty"` // needed for routing unauthenticated requests (WshRpcMultiProxy)
Source string `json:"source,omitempty"` // source route id
@ -206,6 +209,7 @@ func MakeWshRpc(inputCh chan []byte, outputCh chan []byte, rpcCtx wshrpc.RpcCont
clientId: uuid.New().String(),
InputCh: inputCh,
OutputCh: outputCh,
CtxDoneCh: make(chan string, CtxDoneChSize),
RpcMap: make(map[string]*rpcData),
RpcContext: &atomic.Pointer[wshrpc.RpcContext]{},
EventListener: MakeEventListener(),
@ -326,11 +330,33 @@ func (w *WshRpc) handleRequest(req *RpcMessage) {
}
func (w *WshRpc) runServer() {
defer close(w.OutputCh)
for msgBytes := range w.InputCh {
if w.Debug {
log.Printf("[%s] received message: %s\n", w.DebugName, string(msgBytes))
defer func() {
panichandler.PanicHandler("wshrpc.runServer", recover())
close(w.OutputCh)
w.setServerDone()
}()
outer:
for {
var msgBytes []byte
var inputChMore bool
var resIdTimeout string
select {
case msgBytes, inputChMore = <-w.InputCh:
if !inputChMore {
break outer
}
if w.Debug {
log.Printf("[%s] received message: %s\n", w.DebugName, string(msgBytes))
}
case resIdTimeout = <-w.CtxDoneCh:
if w.Debug {
log.Printf("[%s] received request timeout: %s\n", w.DebugName, resIdTimeout)
}
w.unregisterRpc(resIdTimeout, fmt.Errorf("EC-TIME: timeout waiting for response"))
continue
}
var msg RpcMessage
err := json.Unmarshal(msgBytes, &msg)
if err != nil {
@ -396,7 +422,7 @@ func (w *WshRpc) registerRpc(ctx context.Context, reqId string) chan *RpcMessage
panichandler.PanicHandler("registerRpc:timeout", recover())
}()
<-ctx.Done()
w.unregisterRpc(reqId, fmt.Errorf("EC-TIME: timeout waiting for response"))
w.retrySendTimeout(reqId)
}()
return rpcCh
}
@ -413,7 +439,13 @@ func (w *WshRpc) unregisterRpc(reqId string, err error) {
ResId: reqId,
Error: err.Error(),
}
rd.ResCh <- errResp
// non-blocking send since we're about to close anyway
// likely the channel isn't being actively read
// this also prevents us from blocking the main loop (and holding the lock)
select {
case rd.ResCh <- errResp:
default:
}
}
delete(w.RpcMap, reqId)
close(rd.ResCh)
@ -642,6 +674,9 @@ func (handler *RpcResponseHandler) IsDone() bool {
}
func (w *WshRpc) SendComplexRequest(command string, data any, opts *wshrpc.RpcOpts) (rtnHandler *RpcRequestHandler, rtnErr error) {
if w.IsServerDone() {
return nil, errors.New("server is no longer running, cannot send new requests")
}
if opts == nil {
opts = &wshrpc.RpcOpts{}
}
@ -681,3 +716,42 @@ func (w *WshRpc) SendComplexRequest(command string, data any, opts *wshrpc.RpcOp
w.OutputCh <- barr
return handler, nil
}
func (w *WshRpc) IsServerDone() bool {
w.Lock.Lock()
defer w.Lock.Unlock()
return w.ServerDone
}
func (w *WshRpc) setServerDone() {
w.Lock.Lock()
defer w.Lock.Unlock()
w.ServerDone = true
close(w.CtxDoneCh)
for range w.CtxDoneCh {
// drain channel
}
}
func (w *WshRpc) retrySendTimeout(resId string) {
for {
done := func() bool {
w.Lock.Lock()
defer w.Lock.Unlock()
if w.ServerDone {
return true
}
select {
case w.CtxDoneCh <- resId:
return true
default:
return false
}
}()
if done {
return
}
time.Sleep(100 * time.Millisecond)
}
}

View File

@ -573,3 +573,14 @@ func InstallRcFiles() error {
wshBinDir := filepath.Join(waveDir, wavebase.RemoteWshBinDirName)
return shellutil.InitRcFiles(waveDir, wshBinDir)
}
func SendErrCh[T any](err error) <-chan wshrpc.RespOrErrorUnion[T] {
ch := make(chan wshrpc.RespOrErrorUnion[T], 1)
ch <- RespErr[T](err)
close(ch)
return ch
}
func RespErr[T any](err error) wshrpc.RespOrErrorUnion[T] {
return wshrpc.RespOrErrorUnion[T]{Error: err}
}

View File

@ -641,11 +641,11 @@ func (conn *WslConn) tryEnableWsh(ctx context.Context, clientDisplayName string)
}
}
func (conn *WslConn) getConnectionConfig() (wshrpc.ConnKeywords, bool) {
func (conn *WslConn) getConnectionConfig() (wconfig.ConnKeywords, bool) {
config := wconfig.GetWatcher().GetFullConfig()
connSettings, ok := config.Connections[conn.GetName()]
if !ok {
return wshrpc.ConnKeywords{}, false
return wconfig.ConnKeywords{}, false
}
return connSettings, true
}