mirror of
https://github.com/wavetermdev/waveterm.git
synced 2025-02-28 03:42:50 +01:00
merge branch 'main' into sylvie/hujson-config
This commit is contained in:
commit
661913a718
9
.github/workflows/build-helper.yml
vendored
9
.github/workflows/build-helper.yml
vendored
@ -38,6 +38,9 @@ jobs:
|
||||
sudo apt-get update
|
||||
sudo apt-get install --no-install-recommends -y libarchive-tools libopenjp2-tools rpm squashfs-tools
|
||||
sudo snap install snapcraft --classic
|
||||
sudo snap install lxd
|
||||
sudo lxd init --auto
|
||||
sudo snap refresh
|
||||
- name: Install Zig (not Mac)
|
||||
if: matrix.platform != 'darwin'
|
||||
uses: mlugg/setup-zig@v1
|
||||
@ -159,6 +162,12 @@ jobs:
|
||||
with:
|
||||
name: ${{ matrix.runner }}
|
||||
path: make
|
||||
- name: Upload Snapcraft logs on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.runner }}-log
|
||||
path: /home/runner/.local/state/snapcraft/log
|
||||
create-release:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-app
|
||||
|
1
BUILD.md
1
BUILD.md
@ -45,6 +45,7 @@ For packaging, the following additional packages are required:
|
||||
- `lxd` — [Installation instructions](https://canonical.com/lxd/install)
|
||||
- `snapcraft` — Run `sudo snap install snapcraft --classic`
|
||||
- `libarchive-tools` — Install via your package manager
|
||||
- `binutils` — Install via your package manager
|
||||
- `libopenjp2-tools` — Install via your package manager
|
||||
- `squashfs-tools` — Install via your package manager
|
||||
|
||||
|
@ -32,7 +32,7 @@ We have a set of recommended Visual Studio Code extensions to enforce our style
|
||||
|
||||
- For minor changes, you are welcome to [open a pull request](https://github.com/wavetermdev/waveterm/pulls).
|
||||
- For major changes, please [create an issue](https://github.com/wavetermdev/waveterm/issues/new) first.
|
||||
- If you are looking for a place to start take a look at [open issues](https://github.com/wavetermdev/waveterm/issues).
|
||||
- If you are looking for a place to start take a look at [Good First Issues](https://github.com/wavetermdev/waveterm/issues?q=is:issue%20state:open%20label:%22good%20first%20issue%22).
|
||||
- Join the [Discord channel](https://discord.gg/XfvZ334gwU) to collaborate with the community on your contribution.
|
||||
|
||||
### Development Environment
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/wshfs"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/packetparser"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/sigutil"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
|
||||
"github.com/wavetermdev/waveterm/pkg/wavebase"
|
||||
"github.com/wavetermdev/waveterm/pkg/wshrpc"
|
||||
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
|
||||
@ -162,9 +163,7 @@ func serverRunRouter(jwtToken string) error {
|
||||
// just ignore and drain the rawCh (stdin)
|
||||
// when stdin is closed, shutdown
|
||||
defer wshutil.DoShutdown("", 0, true)
|
||||
for range rawCh {
|
||||
// ignore
|
||||
}
|
||||
utilfn.DrainChannelSafe(rawCh, "serverRunRouter:stdin")
|
||||
}()
|
||||
go func() {
|
||||
for msg := range termProxy.FromRemoteCh {
|
||||
|
@ -31,12 +31,33 @@ var debugSendTelemetryCmd = &cobra.Command{
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
var debugGetTabCmd = &cobra.Command{
|
||||
Use: "gettab",
|
||||
Short: "get tab",
|
||||
RunE: debugGetTabRun,
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
debugCmd.AddCommand(debugBlockIdsCmd)
|
||||
debugCmd.AddCommand(debugSendTelemetryCmd)
|
||||
debugCmd.AddCommand(debugGetTabCmd)
|
||||
rootCmd.AddCommand(debugCmd)
|
||||
}
|
||||
|
||||
func debugGetTabRun(cmd *cobra.Command, args []string) error {
|
||||
tab, err := wshclient.GetTabCommand(RpcClient, RpcContext.TabId, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
barr, err := json.MarshalIndent(tab, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
WriteStdout("%s\n", string(barr))
|
||||
return nil
|
||||
}
|
||||
|
||||
func debugSendTelemetryRun(cmd *cobra.Command, args []string) error {
|
||||
err := wshclient.SendTelemetryCommand(RpcClient, nil)
|
||||
return err
|
||||
|
@ -4,6 +4,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -11,6 +12,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fsutil"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/fileutil"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/wavefileutil"
|
||||
"github.com/wavetermdev/waveterm/pkg/wshrpc"
|
||||
@ -27,15 +29,15 @@ func convertNotFoundErr(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func ensureFile(origName string, fileData wshrpc.FileData) (*wshrpc.FileInfo, error) {
|
||||
info, err := wshclient.FileInfoCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: DefaultFileTimeout})
|
||||
func ensureFile(fileData wshrpc.FileData) (*wshrpc.FileInfo, error) {
|
||||
info, err := wshclient.FileInfoCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: fileTimeout})
|
||||
err = convertNotFoundErr(err)
|
||||
if err == fs.ErrNotExist {
|
||||
err = wshclient.FileCreateCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: DefaultFileTimeout})
|
||||
err = wshclient.FileCreateCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: fileTimeout})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating file: %w", err)
|
||||
}
|
||||
info, err = wshclient.FileInfoCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: DefaultFileTimeout})
|
||||
info, err = wshclient.FileInfoCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: fileTimeout})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting file info: %w", err)
|
||||
}
|
||||
@ -51,12 +53,12 @@ func streamWriteToFile(fileData wshrpc.FileData, reader io.Reader) error {
|
||||
// First truncate the file with an empty write
|
||||
emptyWrite := fileData
|
||||
emptyWrite.Data64 = ""
|
||||
err := wshclient.FileWriteCommand(RpcClient, emptyWrite, &wshrpc.RpcOpts{Timeout: DefaultFileTimeout})
|
||||
err := wshclient.FileWriteCommand(RpcClient, emptyWrite, &wshrpc.RpcOpts{Timeout: fileTimeout})
|
||||
if err != nil {
|
||||
return fmt.Errorf("initializing file with empty write: %w", err)
|
||||
}
|
||||
|
||||
const chunkSize = 32 * 1024 // 32KB chunks
|
||||
const chunkSize = wshrpc.FileChunkSize // 32KB chunks
|
||||
buf := make([]byte, chunkSize)
|
||||
totalWritten := int64(0)
|
||||
|
||||
@ -89,40 +91,9 @@ func streamWriteToFile(fileData wshrpc.FileData, reader io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func streamReadFromFile(fileData wshrpc.FileData, size int64, writer io.Writer) error {
|
||||
const chunkSize = 32 * 1024 // 32KB chunks
|
||||
for offset := int64(0); offset < size; offset += chunkSize {
|
||||
// Calculate the length of this chunk
|
||||
length := chunkSize
|
||||
if offset+int64(length) > size {
|
||||
length = int(size - offset)
|
||||
}
|
||||
|
||||
// Set up the ReadAt request
|
||||
fileData.At = &wshrpc.FileDataAt{
|
||||
Offset: offset,
|
||||
Size: length,
|
||||
}
|
||||
|
||||
// Read the chunk
|
||||
data, err := wshclient.FileReadCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: int64(fileTimeout)})
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading chunk at offset %d: %w", offset, err)
|
||||
}
|
||||
|
||||
// Decode and write the chunk
|
||||
chunk, err := base64.StdEncoding.DecodeString(data.Data64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decoding chunk at offset %d: %w", offset, err)
|
||||
}
|
||||
|
||||
_, err = writer.Write(chunk)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing chunk at offset %d: %w", offset, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
func streamReadFromFile(ctx context.Context, fileData wshrpc.FileData, writer io.Writer) error {
|
||||
ch := wshclient.FileReadStreamCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: fileTimeout})
|
||||
return fsutil.ReadFileStreamToWriter(ctx, ch, writer)
|
||||
}
|
||||
|
||||
type fileListResult struct {
|
||||
|
@ -9,7 +9,6 @@ import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
@ -31,8 +30,7 @@ const (
|
||||
WaveFileScheme = "wavefile"
|
||||
WaveFilePrefix = "wavefile://"
|
||||
|
||||
DefaultFileTimeout = 5000
|
||||
TimeoutYear = int64(365) * 24 * 60 * 60 * 1000
|
||||
TimeoutYear = int64(365) * 24 * 60 * 60 * 1000
|
||||
|
||||
UriHelpText = `
|
||||
|
||||
@ -83,12 +81,12 @@ Wave Terminal is capable of managing files from remote SSH hosts, S3-compatible
|
||||
systems, and the internal Wave filesystem. Files are addressed via URIs, which
|
||||
vary depending on the storage system.` + UriHelpText}
|
||||
|
||||
var fileTimeout int
|
||||
var fileTimeout int64
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(fileCmd)
|
||||
|
||||
fileCmd.PersistentFlags().IntVarP(&fileTimeout, "timeout", "t", 15000, "timeout in milliseconds for long operations")
|
||||
fileCmd.PersistentFlags().Int64VarP(&fileTimeout, "timeout", "t", 15000, "timeout in milliseconds for long operations")
|
||||
|
||||
fileListCmd.Flags().BoolP("recursive", "r", false, "list subdirectories recursively")
|
||||
fileListCmd.Flags().BoolP("long", "l", false, "use long listing format")
|
||||
@ -103,7 +101,6 @@ func init() {
|
||||
fileCmd.AddCommand(fileInfoCmd)
|
||||
fileCmd.AddCommand(fileAppendCmd)
|
||||
fileCpCmd.Flags().BoolP("merge", "m", false, "merge directories")
|
||||
fileCpCmd.Flags().BoolP("recursive", "r", false, "copy directories recursively")
|
||||
fileCpCmd.Flags().BoolP("force", "f", false, "force overwrite of existing files")
|
||||
fileCmd.AddCommand(fileCpCmd)
|
||||
fileMvCmd.Flags().BoolP("recursive", "r", false, "move directories recursively")
|
||||
@ -174,7 +171,7 @@ var fileAppendCmd = &cobra.Command{
|
||||
var fileCpCmd = &cobra.Command{
|
||||
Use: "cp [source-uri] [destination-uri]" + UriHelpText,
|
||||
Aliases: []string{"copy"},
|
||||
Short: "copy files between storage systems",
|
||||
Short: "copy files between storage systems, recursively if needed",
|
||||
Long: "Copy files between different storage systems." + UriHelpText,
|
||||
Example: " wsh file cp wavefile://block/config.txt ./local-config.txt\n wsh file cp ./local-config.txt wavefile://block/config.txt\n wsh file cp wsh://user@ec2/home/user/config.txt wavefile://client/config.txt",
|
||||
Args: cobra.ExactArgs(2),
|
||||
@ -202,17 +199,7 @@ func fileCatRun(cmd *cobra.Command, args []string) error {
|
||||
Info: &wshrpc.FileInfo{
|
||||
Path: path}}
|
||||
|
||||
// Get file info first to check existence and get size
|
||||
info, err := wshclient.FileInfoCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: 2000})
|
||||
err = convertNotFoundErr(err)
|
||||
if err == fs.ErrNotExist {
|
||||
return fmt.Errorf("%s: no such file", path)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting file info: %w", err)
|
||||
}
|
||||
|
||||
err = streamReadFromFile(fileData, info.Size, os.Stdout)
|
||||
err = streamReadFromFile(cmd.Context(), fileData, os.Stdout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading file: %w", err)
|
||||
}
|
||||
@ -229,7 +216,7 @@ func fileInfoRun(cmd *cobra.Command, args []string) error {
|
||||
Info: &wshrpc.FileInfo{
|
||||
Path: path}}
|
||||
|
||||
info, err := wshclient.FileInfoCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: DefaultFileTimeout})
|
||||
info, err := wshclient.FileInfoCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: fileTimeout})
|
||||
err = convertNotFoundErr(err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting file info: %w", err)
|
||||
@ -265,20 +252,8 @@ func fileRmRun(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fileData := wshrpc.FileData{
|
||||
Info: &wshrpc.FileInfo{
|
||||
Path: path}}
|
||||
|
||||
_, err = wshclient.FileInfoCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: DefaultFileTimeout})
|
||||
err = convertNotFoundErr(err)
|
||||
if err == fs.ErrNotExist {
|
||||
return fmt.Errorf("%s: no such file", path)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting file info: %w", err)
|
||||
}
|
||||
|
||||
err = wshclient.FileDeleteCommand(RpcClient, wshrpc.CommandDeleteFileData{Path: path, Recursive: recursive}, &wshrpc.RpcOpts{Timeout: DefaultFileTimeout})
|
||||
err = wshclient.FileDeleteCommand(RpcClient, wshrpc.CommandDeleteFileData{Path: path, Recursive: recursive}, &wshrpc.RpcOpts{Timeout: fileTimeout})
|
||||
if err != nil {
|
||||
return fmt.Errorf("removing file: %w", err)
|
||||
}
|
||||
@ -295,14 +270,31 @@ func fileWriteRun(cmd *cobra.Command, args []string) error {
|
||||
Info: &wshrpc.FileInfo{
|
||||
Path: path}}
|
||||
|
||||
_, err = ensureFile(path, fileData)
|
||||
capability, err := wshclient.FileShareCapabilityCommand(RpcClient, fileData.Info.Path, &wshrpc.RpcOpts{Timeout: fileTimeout})
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("getting fileshare capability: %w", err)
|
||||
}
|
||||
|
||||
err = streamWriteToFile(fileData, WrappedStdin)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing file: %w", err)
|
||||
if capability.CanAppend {
|
||||
err = streamWriteToFile(fileData, WrappedStdin)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing file: %w", err)
|
||||
}
|
||||
} else {
|
||||
buf := make([]byte, MaxFileSize)
|
||||
n, err := WrappedStdin.Read(buf)
|
||||
if err != nil && err != io.EOF {
|
||||
return fmt.Errorf("reading input: %w", err)
|
||||
}
|
||||
if int64(n) == MaxFileSize {
|
||||
if _, err := WrappedStdin.Read(make([]byte, 1)); err != io.EOF {
|
||||
return fmt.Errorf("input exceeds maximum file size of %d bytes", MaxFileSize)
|
||||
}
|
||||
}
|
||||
fileData.Data64 = base64.StdEncoding.EncodeToString(buf[:n])
|
||||
err = wshclient.FileWriteCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: fileTimeout})
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -317,7 +309,7 @@ func fileAppendRun(cmd *cobra.Command, args []string) error {
|
||||
Info: &wshrpc.FileInfo{
|
||||
Path: path}}
|
||||
|
||||
info, err := ensureFile(path, fileData)
|
||||
info, err := ensureFile(fileData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -346,7 +338,7 @@ func fileAppendRun(cmd *cobra.Command, args []string) error {
|
||||
|
||||
if buf.Len() >= 8192 { // 8KB batch size
|
||||
fileData.Data64 = base64.StdEncoding.EncodeToString(buf.Bytes())
|
||||
err = wshclient.FileAppendCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: int64(fileTimeout)})
|
||||
err = wshclient.FileAppendCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: fileTimeout})
|
||||
if err != nil {
|
||||
return fmt.Errorf("appending to file: %w", err)
|
||||
}
|
||||
@ -357,7 +349,7 @@ func fileAppendRun(cmd *cobra.Command, args []string) error {
|
||||
|
||||
if buf.Len() > 0 {
|
||||
fileData.Data64 = base64.StdEncoding.EncodeToString(buf.Bytes())
|
||||
err = wshclient.FileAppendCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: int64(fileTimeout)})
|
||||
err = wshclient.FileAppendCommand(RpcClient, fileData, &wshrpc.RpcOpts{Timeout: fileTimeout})
|
||||
if err != nil {
|
||||
return fmt.Errorf("appending to file: %w", err)
|
||||
}
|
||||
@ -398,10 +390,6 @@ func getTargetPath(src, dst string) (string, error) {
|
||||
|
||||
func fileCpRun(cmd *cobra.Command, args []string) error {
|
||||
src, dst := args[0], args[1]
|
||||
recursive, err := cmd.Flags().GetBool("recursive")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
merge, err := cmd.Flags().GetBool("merge")
|
||||
if err != nil {
|
||||
return err
|
||||
@ -419,9 +407,9 @@ func fileCpRun(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse dest path: %w", err)
|
||||
}
|
||||
log.Printf("Copying %s to %s; recursive: %v, merge: %v, force: %v", srcPath, destPath, recursive, merge, force)
|
||||
log.Printf("Copying %s to %s; merge: %v, force: %v", srcPath, destPath, merge, force)
|
||||
rpcOpts := &wshrpc.RpcOpts{Timeout: TimeoutYear}
|
||||
err = wshclient.FileCopyCommand(RpcClient, wshrpc.CommandFileCopyData{SrcUri: srcPath, DestUri: destPath, Opts: &wshrpc.FileCopyOpts{Recursive: recursive, Merge: merge, Overwrite: force, Timeout: TimeoutYear}}, rpcOpts)
|
||||
err = wshclient.FileCopyCommand(RpcClient, wshrpc.CommandFileCopyData{SrcUri: srcPath, DestUri: destPath, Opts: &wshrpc.FileCopyOpts{Merge: merge, Overwrite: force, Timeout: TimeoutYear}}, rpcOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("copying file: %w", err)
|
||||
}
|
||||
@ -449,7 +437,7 @@ func fileMvRun(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
log.Printf("Moving %s to %s; recursive: %v, force: %v", srcPath, destPath, recursive, force)
|
||||
rpcOpts := &wshrpc.RpcOpts{Timeout: TimeoutYear}
|
||||
err = wshclient.FileMoveCommand(RpcClient, wshrpc.CommandFileCopyData{SrcUri: srcPath, DestUri: destPath, Opts: &wshrpc.FileCopyOpts{Recursive: recursive, Overwrite: force, Timeout: TimeoutYear}}, rpcOpts)
|
||||
err = wshclient.FileMoveCommand(RpcClient, wshrpc.CommandFileCopyData{SrcUri: srcPath, DestUri: destPath, Opts: &wshrpc.FileCopyOpts{Overwrite: force, Timeout: TimeoutYear, Recursive: recursive}}, rpcOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("moving file: %w", err)
|
||||
}
|
||||
@ -562,10 +550,7 @@ func fileListRun(cmd *cobra.Command, args []string) error {
|
||||
|
||||
filesChan := wshclient.FileListStreamCommand(RpcClient, wshrpc.FileListData{Path: path, Opts: &wshrpc.FileListOpts{All: recursive}}, &wshrpc.RpcOpts{Timeout: 2000})
|
||||
// Drain the channel when done
|
||||
defer func() {
|
||||
for range filesChan {
|
||||
}
|
||||
}()
|
||||
defer utilfn.DrainChannelSafe(filesChan, "fileListRun")
|
||||
if longForm {
|
||||
return filePrintLong(filesChan)
|
||||
}
|
||||
|
@ -90,7 +90,9 @@ func setBgRun(cmd *cobra.Command, args []string) (rtnErr error) {
|
||||
if setBgOpacity < 0 || setBgOpacity > 1 {
|
||||
return fmt.Errorf("opacity must be between 0.0 and 1.0")
|
||||
}
|
||||
if cmd.Flags().Changed("opacity") {
|
||||
if setBgClear {
|
||||
meta["bg:*"] = true
|
||||
} else {
|
||||
meta["bg:opacity"] = setBgOpacity
|
||||
}
|
||||
} else if len(args) > 1 {
|
||||
@ -167,7 +169,11 @@ func setBgRun(cmd *cobra.Command, args []string) (rtnErr error) {
|
||||
}
|
||||
|
||||
// Resolve tab reference
|
||||
oRef, err := resolveSimpleId("tab")
|
||||
id := blockArg
|
||||
if id == "" {
|
||||
id = "tab"
|
||||
}
|
||||
oRef, err := resolveSimpleId(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ var viewMagnified bool
|
||||
|
||||
var viewCmd = &cobra.Command{
|
||||
Use: "view {file|directory|URL}",
|
||||
Aliases: []string{"preview", "open"},
|
||||
Short: "preview/edit a file or directory",
|
||||
RunE: viewRun,
|
||||
PreRunE: preRunSetupRpcClient,
|
||||
|
@ -6,17 +6,39 @@ title: "Connections"
|
||||
|
||||
# Connections
|
||||
|
||||
Wave allows users to connect to various machines and unify them together in a way that preserves the unique behavior of each. At the moment, this extends to SSH remote connections and local WSL connections.
|
||||
Wave allows users to connect to various machines and unify them together in a way that preserves the unique behavior of each. At the moment, this extends to SSH remote connections, local WSL connections, and AWS S3 buckets.
|
||||
|
||||
## Access a Connection in a Block
|
||||
|
||||
The easiest way to access connections is to click the <i className="fa-sharp fa-laptop"/> icon. From there, you can either type `[user]@[host]` for a desired SSH remote or type `wsl://<distribution name>` for a desired WSL distribution. Alternatively, if the connection already exists in the dropdown list, you can either click it or navigate to it with arrow keys and press enter to connect.
|
||||
The easiest way to access connections is to click the <i className="fa-sharp fa-laptop"/> icon. From there, you can type one of the following to depending on the connection you want:
|
||||
|
||||
For SSH Connections:
|
||||
|
||||
- `[user]@[host]`
|
||||
- `[host]`
|
||||
- `[user]@[host]:[port]`
|
||||
|
||||
For WSL Connections:
|
||||
|
||||
- `wsl://<distribution name>`
|
||||
|
||||
For AWS S3 Connections:
|
||||
|
||||
- `aws:[profile]`
|
||||
|
||||
Alternatively, if the connection already exists in the dropdown list, you can either click it or navigate to it with arrow keys and press enter to connect.
|
||||
|
||||

|
||||
|
||||
## Different Types of Connections
|
||||
|
||||
As there are several different types of connections, not all of the types have access to the same features. For instance, AWS S3 connections can only be used in preview widgets (directory, image viewer, code editor, etc.). Meanwhile, SSH and WSL connections can always work in terminal widgets, and if `wsh` shell extensions are installed, they can also work in preview widgets and the sysinfo widget.
|
||||
|
||||
As such, certain features will not be available for certain types of connections. As an example, AWS S3 connections cannot run startup scripts as they are not capable of running scripts.
|
||||
|
||||
## What are wsh Shell Extensions?
|
||||
|
||||
`wsh` is a small program that helps manage waveterm regardless of which machine you are currently connected to. It is always included on your host machine, but you also have the option to install it when connecting to a remote machine. If it is installed on the remote machine, it is installed at `~/.waveterm/bin/wsh`. Then, when wave connects to your connection (and only when wave connects to your connection), the following happens:
|
||||
`wsh` is a small program that helps manage waveterm regardless of which machine you are currently connected to. It is always included on your host machine, but you also have the option to install it when connecting to SSH and WSL Connections. If it is installed on the connection, it is installed at `~/.waveterm/bin/wsh`. Then, when wave connects to your connection (and only when wave connects to your connection), the following happens:
|
||||
|
||||
- `~/.waveterm/bin` is added to your `PATH` for that individual session. This allows the user to use the `wsh` command without providing the complete path.
|
||||
- Several environment variables are injected into the session to make certain tasks with `wsh` easier. These are [listed below](#additional-environment-variables).
|
||||
@ -64,7 +86,9 @@ The SSH values that are loaded into the dropdown by default are obtained by pars
|
||||
- manually typing your connection into the connection box (if this successfully connects, the connection will be added to the internal `config/connections.json` file)
|
||||
- use `wsh ssh [user]@[host]` in your terminal (if this successfully connects, the connection will be added to the internal `config/connections.json` file)
|
||||
|
||||
WSL values are added by searching the installed WSL distributions as they appear in the Windows Registry.
|
||||
WSL connections are added by searching the installed WSL distributions as they appear in the Windows Registry. They also exist in the `config/connections.json` file similarly to SSH connections.
|
||||
|
||||
AWS S3 Connections are added by parsing the `~/.aws/config` file. Unlike the SSH and WSL connections, these are not stored in the `config/connections.json` file.
|
||||
|
||||
## SSH Config Parsing
|
||||
|
||||
|
@ -4,7 +4,7 @@ id: "keybindings"
|
||||
title: "Key Bindings"
|
||||
---
|
||||
|
||||
import { Kbd } from "@site/src/components/kbd.tsx";
|
||||
import { Kbd, KbdChord } from "@site/src/components/kbd.tsx";
|
||||
import { PlatformProvider, PlatformSelectorButton } from "@site/src/components/platformcontext.tsx";
|
||||
|
||||
<PlatformProvider>
|
||||
@ -15,32 +15,39 @@ Some keybindings are always active. Others are only active for certain types of
|
||||
Note that these are the MacOS keybindings (they use "Cmd"). For Windows and Linux,
|
||||
replace "Cmd" with "Alt" (note that "Ctrl" is "Ctrl" on both Mac, Windows, and Linux).
|
||||
|
||||
Chords are shown with a + between the keys. You have 2 seconds to hit the 2nd chord key after typing the first key. Hitting Escape after an initial chord key will always be a no-op.
|
||||
|
||||
## Global Keybindings
|
||||
|
||||
<PlatformSelectorButton />
|
||||
<div style={{ marginBottom: 20 }}></div>
|
||||
|
||||
| Key | Function |
|
||||
| ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| <Kbd k="Cmd:t"/> | Open a new tab |
|
||||
| <Kbd k="Cmd:n"/> | Open a new block (defaults to a terminal block with the same connection and working directory). Switch to launcher using `app:defaultnewblock` setting |
|
||||
| <Kbd k="Cmd:d"/> | Split horizontally, open a new block to the right |
|
||||
| <Kbd k="Cmd:Shift:d"/> | Split vertically, open a new block below |
|
||||
| <Kbd k="Cmd:Shift:n"/> | Open a new window |
|
||||
| <Kbd k="Cmd:w"/> | Close the current block |
|
||||
| <Kbd k="Cmd:Shift:w"/> | Close the current tab |
|
||||
| <Kbd k="Cmd:m"/> | Magnify / Un-Magnify the current block |
|
||||
| <Kbd k="Cmd:g"/> | Open the "connection" switcher |
|
||||
| <Kbd k="Cmd:i"/> | Refocus the current block (useful if the block has lost input focus) |
|
||||
| <Kbd k="Ctrl:Shift"/> | Show block numbers |
|
||||
| <Kbd k="Ctrl:Shift:1-9"/> | Switch to block number |
|
||||
| <Kbd k="Ctrl:Shift:Arrows"/> | Move left, right, up, down between blocks |
|
||||
| <Kbd k="Cmd:1-9"/> | Switch to tab number |
|
||||
| <Kbd k="Cmd:["/> | Switch tab left |
|
||||
| <Kbd k="Cmd:]"/> | Switch tab right |
|
||||
| <Kbd k="Cmd:Ctrl:1-9"/> | Switch to workspace number |
|
||||
| <Kbd k="Cmd:Shift:r"/> | Refresh the UI |
|
||||
| <Kbd k="Ctrl:Shift:i"/> | Toggle terminal multi-input mode |
|
||||
| Key | Function |
|
||||
| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| <Kbd k="Cmd:t"/> | Open a new tab |
|
||||
| <Kbd k="Cmd:n"/> | Open a new block (defaults to a terminal block with the same connection and working directory). Switch to launcher using `app:defaultnewblock` setting |
|
||||
| <Kbd k="Cmd:d"/> | Split horizontally, open a new block to the right |
|
||||
| <Kbd k="Cmd:Shift:d"/> | Split vertically, open a new block below |
|
||||
| <KbdChord karr={["Ctrl:Shift:s", "ArrowUp"]}/> | Split vertically, open a new block above |
|
||||
| <KbdChord karr={["Ctrl:Shift:s", "ArrowDown"]}/> | Split vertically, open a new block below |
|
||||
| <KbdChord karr={["Ctrl:Shift:s", "ArrowLeft"]}/> | Split horizontally, open a new block to the left |
|
||||
| <KbdChord karr={["Ctrl:Shift:s", "ArrowRight"]}/> | Split horizontally, open a new block to the right |
|
||||
| <Kbd k="Cmd:Shift:n"/> | Open a new window |
|
||||
| <Kbd k="Cmd:w"/> | Close the current block |
|
||||
| <Kbd k="Cmd:Shift:w"/> | Close the current tab |
|
||||
| <Kbd k="Cmd:m"/> | Magnify / Un-Magnify the current block |
|
||||
| <Kbd k="Cmd:g"/> | Open the "connection" switcher |
|
||||
| <Kbd k="Cmd:i"/> | Refocus the current block (useful if the block has lost input focus) |
|
||||
| <Kbd k="Ctrl:Shift"/> | Show block numbers |
|
||||
| <Kbd k="Ctrl:Shift:1-9"/> | Switch to block number |
|
||||
| <Kbd k="Ctrl:Shift:Arrows"/> | Move left, right, up, down between blocks |
|
||||
| <Kbd k="Ctrl:Shift:k"/> | Replace the current block with a launcher block |
|
||||
| <Kbd k="Cmd:1-9"/> | Switch to tab number |
|
||||
| <Kbd k="Cmd:["/> | Switch tab left |
|
||||
| <Kbd k="Cmd:]"/> | Switch tab right |
|
||||
| <Kbd k="Cmd:Ctrl:1-9"/> | Switch to workspace number |
|
||||
| <Kbd k="Cmd:Shift:r"/> | Refresh the UI |
|
||||
| <Kbd k="Ctrl:Shift:i"/> | Toggle terminal multi-input mode |
|
||||
|
||||
## File Preview Keybindings
|
||||
|
||||
|
130
docs/docs/telemetry-old.mdx
Normal file
130
docs/docs/telemetry-old.mdx
Normal file
@ -0,0 +1,130 @@
|
||||
---
|
||||
id: "telemetry-old"
|
||||
title: "Legacy Telemetry"
|
||||
sidebar_class_name: hidden
|
||||
---
|
||||
|
||||
Wave Terminal collects telemetry data to help us track feature use, direct future product efforts, and generate aggregate metrics on Wave's popularity and usage. We do not collect or store any PII (personal identifiable information) and all metric data is only associated with and aggregated using your randomly generated _ClientId_. You may opt out of collection at any time.
|
||||
|
||||
If you would like to turn telemetry on or off, the first opportunity is a button on the initial welcome page. After this, it can be turned off by adding `"telemetry:enabled": false` to the `config/settings.json` file. It can alternatively be turned on by adding `"telemetry:enabled": true` to the `config/settings.json` file.
|
||||
|
||||
:::info
|
||||
|
||||
You can also change your telemetry setting by running the wsh command:
|
||||
|
||||
```
|
||||
wsh setconfig telemetry:enabled=true
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
---
|
||||
|
||||
## Sending Telemetry
|
||||
|
||||
Provided that telemetry is enabled, it is sent 10 seconds after Waveterm is first booted and then again every 4 hours thereafter. It can also be sent in response to a few special cases listed below. When telemetry is sent, it is grouped into individual days as determined by your time zone. Any data from a previous day is marked as `Uploaded` so it will not need to be sent again.
|
||||
|
||||
### Sending Once Telemetry is Enabled
|
||||
|
||||
As soon as telemetry is enabled, a telemetry update is sent regardless of how long it has been since the last send. This does not reset the usual timer for telemetry sends.
|
||||
|
||||
### Notifying that Telemetry is Disabled
|
||||
|
||||
As soon as telemetry is disabled, Waveterm sends a special update that notifies us of this change. See [When Telemetry is Turned Off](#when-telemetry-is-turned-off) for more info. The timer still runs in the background but no data is sent.
|
||||
|
||||
### When Waveterm is Closed
|
||||
|
||||
Provided that telemetry is enabled, it will be sent when Waveterm is closed.
|
||||
|
||||
---
|
||||
|
||||
## Telemetry Data
|
||||
|
||||
When telemetry is active, we collect the following data. It is stored in the `telemetry.TelemetryData` type in the source code.
|
||||
|
||||
| Name | Description |
|
||||
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| ActiveMinutes | The number of minutes that the user has actively used Waveterm on a given day. This requires the terminal window to be in focus while the user is actively interacting with it. |
|
||||
| FgMinutes | The number of minutes that Waveterm has been in the foreground on a given day. This requires the terminal window to be in focus regardless of user interaction. |
|
||||
| OpenMinutes | The number of minutes that Waveterm has been open on a given day. This only requires that the terminal is open, even if the window is out of focus. |
|
||||
| NumBlocks | The number of existing blocks open on a given day |
|
||||
| NumTabs | The number of existing tabs open on a given day. |
|
||||
| NewTab | The number of new tabs created on a given day |
|
||||
| NumWindows | The number of existing windows open on a given day. |
|
||||
| NumWS | The number of existing workspaces on a given day. |
|
||||
| NumWSNamed | The number of named workspaces on a give day. |
|
||||
| NewTab | The number of new tabs opened on a given day. |
|
||||
| NumStartup | The number of times waveterm has been started on a given day. |
|
||||
| NumShutdown | The number of times waveterm has been shut down on a given day. |
|
||||
| SetTabTheme | The number of times the tab theme is changed from the context menu |
|
||||
| NumMagnify | The number of times any block is magnified |
|
||||
| NumPanics | The number of backend (golang) panics caught in the current day |
|
||||
| NumAIReqs | The number of AI requests made in the current day |
|
||||
| NumSSHConn | The number of distinct SSH connections that have been made to distinct hosts |
|
||||
| NumWSLConns | The number of distinct WSL connections that have been made to distinct distros |
|
||||
| Renderers | The number of new block views of each type are open on a given day. |
|
||||
| WshCmds | The number of wsh commands of each type run on a given day |
|
||||
| Blocks | The number of blocks of different view types open on a given day |
|
||||
| Conn | The number of successful remote connections made (and errors) on a given day |
|
||||
|
||||
## Associated Data
|
||||
|
||||
In addition to the telemetry data collected, the following is also reported. It is stored in the `telemetry.ActivityType` type in the source code.
|
||||
|
||||
| Name | Description |
|
||||
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| Day | The date the telemetry is associated with. It does not include the time. |
|
||||
| Uploaded | A boolean that indicates if the telemetry for this day is finalized. It is false during the day the telemetry is associated with, but gets set true at the first telemetry upload after that. Once it is true, the data for that particular day will not be sent up with the telemetry any more. |
|
||||
| TzName | The code for the timezone the user's OS is reporting (e.g. PST, GMT, JST) |
|
||||
| TzOffset | The offset for the timezone the user's OS is reporting (e.g. -08:00, +00:00, +09:00) |
|
||||
| ClientVersion | Which version of Waveterm is installed. |
|
||||
| ClientArch | This includes the user's operating system (e.g. linux or darwin) and architecture (e.g. x86_64 or arm64). It does not include data for any Connections at this time. |
|
||||
| BuildTime | This serves as a more accurate version number that keeps track of when we built the version. It has no bearing on when that version was installed by you. |
|
||||
| OSRelease | This lists the version of the operating system the user has installed. |
|
||||
| Displays | Display resolutions (added in v0.9.3 to help us understand what screen resolutions to optimize for) |
|
||||
|
||||
## Telemetry Metadata
|
||||
|
||||
Lastly, some data is sent along with the telemetry that describes how to classify it. It is stored in the `wcloud.TelemetryInputType` in the source code.
|
||||
|
||||
| Name | Description |
|
||||
| ----------------- | --------------------------------------------------------------------------------------------------------------------------- |
|
||||
| UserId | Currently Unused. This is an anonymous UUID intended for use in future features. |
|
||||
| ClientId | This is an anonymous UUID created when Waveterm is first launched. It is used for telemetry and sending prompts to Open AI. |
|
||||
| AppType | This is used to differentiate the current version of waveterm from the legacy app. |
|
||||
| AutoUpdateEnabled | Whether or not auto update is turned on. |
|
||||
| AutoUpdateChannel | The type of auto update in use. This specifically refers to whether a latest or beta channel is selected. |
|
||||
| CurDay | The current day (in your time zone) when telemetry is sent. It does not include the time of day. |
|
||||
|
||||
## Geo Data
|
||||
|
||||
We do not store IP addresses in our telemetry table. However, CloudFlare passes us Geo-Location headers. We store these two header values:
|
||||
|
||||
| Name | Description |
|
||||
| ------------ | ----------------------------------------------------------------- |
|
||||
| CFCountry | 2-letter country code (e.g. "US", "FR", or "JP") |
|
||||
| CFRegionCode | region code (often a provence, region, or state within a country) |
|
||||
|
||||
---
|
||||
|
||||
## When Telemetry is Turned Off
|
||||
|
||||
When a user disables telemetry, Waveterm sends a notification that their anonymous _ClientId_ has had its telemetry disabled. This is done with the `wcloud.NoTelemetryInputType` type in the source code. Beyond that, no further information is sent unless telemetry is turned on again. If it is turned on again, the previous 30 days of telemetry will be sent.
|
||||
|
||||
---
|
||||
|
||||
## A Note on IP Addresses
|
||||
|
||||
Telemetry is uploaded via https, which means your IP address is known to the telemetry server. We **do not** store your IP address in our telemetry table and **do not** associate it with your _ClientId_.
|
||||
|
||||
---
|
||||
|
||||
## Previously Collected Telemetry Data
|
||||
|
||||
While we believe the data we collect with telemetry is fairly minimal, we cannot make that decision for every user. If you ever change your mind about what has been collected previously, you may request that your data be deleted by emailing us at [support@waveterm.dev](mailto:support@waveterm.dev). If you do, we will need your _ClientId_ to remove it.
|
||||
|
||||
---
|
||||
|
||||
## Privacy Policy
|
||||
|
||||
For a summary of the above, you can take a look at our [Privacy Policy](https://www.waveterm.dev/privacy).
|
@ -1,16 +1,32 @@
|
||||
---
|
||||
sidebar_position: 100
|
||||
title: Telemetry
|
||||
id: "telemetry"
|
||||
title: "Telemetry"
|
||||
---
|
||||
|
||||
Wave Terminal collects telemetry data to help us track feature use, direct future product efforts, and generate aggregate metrics on Wave's popularity and usage. We do not collect or store any PII (personal identifiable information) and all metric data is only associated with and aggregated using your randomly generated _ClientId_. You may opt out of collection at any time.
|
||||
## tl;dr
|
||||
|
||||
Wave Terminal collects telemetry data to help us track feature use, direct future product efforts, and generate aggregate metrics on Wave's popularity and usage. We do NOT collect personal information (PII), keystrokes, file contents, AI prompts, IP addresses, hostnames, or commands. We attach all information to an anonymous, randomly generated _ClientId_ (UUID). You may opt out of collection at any time.
|
||||
|
||||
Here’s a quick summary of what is collected:
|
||||
|
||||
- Basic App/System Info – OS, architecture, app version, update settings
|
||||
- Usage Metrics – App start/shutdown, active minutes, foreground time, tab/block counts/usage
|
||||
- Feature Interactions – When you create tabs, run commands, change settings, etc.
|
||||
- Display Info – Monitor resolution, number of displays
|
||||
- Connection Events – SSH/WSL connection attempts (but NOT hostnames/IPs)
|
||||
- AI Commands – Only which AI backend is used (e.g., OpenAI, Claude) – no text or prompts sent
|
||||
- Error Reports – Crash/panic events with minimal debugging info, but no stack traces or detailed errors
|
||||
|
||||
Telemetry can be disabled at any time in settings. If not disabled it is sent on startup, on shutdown, and every 4-hours.
|
||||
|
||||
## How to Disable Telemetry
|
||||
|
||||
If you would like to turn telemetry on or off, the first opportunity is a button on the initial welcome page. After this, it can be turned off by adding `"telemetry:enabled": false` to the `config/settings.json` file. It can alternatively be turned on by adding `"telemetry:enabled": true` to the `config/settings.json` file.
|
||||
|
||||
:::info
|
||||
:::tip
|
||||
|
||||
You can also change your telemetry setting by running the wsh command:
|
||||
You can also change your telemetry setting (true/false) by running the wsh command:
|
||||
|
||||
```
|
||||
wsh setconfig telemetry:enabled=true
|
||||
@ -18,7 +34,11 @@ wsh setconfig telemetry:enabled=true
|
||||
|
||||
:::
|
||||
|
||||
---
|
||||
:::info
|
||||
|
||||
This document outlines the new telemetry system as of v0.11.1. The previous telemetry documentation is still relevant and can be found in our [Legacy Telemetry Documentation](./telemetry-old.mdx), but in general, the new telemetry is a superset of the old.
|
||||
|
||||
:::
|
||||
|
||||
## Sending Telemetry
|
||||
|
||||
@ -36,74 +56,65 @@ As soon as telemetry is disabled, Waveterm sends a special update that notifies
|
||||
|
||||
Provided that telemetry is enabled, it will be sent when Waveterm is closed.
|
||||
|
||||
---
|
||||
## Event Types
|
||||
|
||||
## Telemetry Data
|
||||
Below is a list of the event types collected in the new telemetry system. More events are likely to be added in the future.
|
||||
|
||||
When telemetry is active, we collect the following data. It is stored in the `telemetry.TelemetryData` type in the source code.
|
||||
| Event Name | Description |
|
||||
| -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `app:startup` | Logged every time you start the app. Contains basic app information like architecture, version, buildtime, etc. |
|
||||
| `app:shutdown` | Logged on every shutdown |
|
||||
| `app:activity` | Logged once per hour of app activity |
|
||||
| `app:display` | Logged on startup and contains information about size of displays |
|
||||
| `app:counts` | Logged once per hour when app is active, contains basic counts like number of windows, tabs, workspaces, blocks, etc. |
|
||||
| `action:magnify` | Logged each time a block is magnified |
|
||||
| `action:settabtheme` | Logged each time a tab theme is changed |
|
||||
| `action:runaicmd` | Logged each time an AI request is made (no prompt information or text is sent), only sends "ai:backendtype" to know what type of AI backend is being used (OpenAI, Claude, Gemini, etc.) |
|
||||
| `action:createtab` | Logged when a new tab is created |
|
||||
| `action:createblock` | Logged when a new block is created (contains the block view type) |
|
||||
| `wsh:run` | Logged when a wsh command is executed (contains the command type) |
|
||||
| `debug:panic` | Logged when a backend (Go) panic happens. Contains a debugging string that can be used to find which panic was hit in our source code. No data is sent |
|
||||
| `conn:connect` | Logged each time a backend ssh/wsl connection connects (logs the conneciton type, no hostname or IP is sent) |
|
||||
| `conn:connecterror` | Logged when you try to connect but it fails (logs the connection type, no hostname or IP is set, and no detailed error information is sent) |
|
||||
|
||||
| Name | Description |
|
||||
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| ActiveMinutes | The number of minutes that the user has actively used Waveterm on a given day. This requires the terminal window to be in focus while the user is actively interacting with it. |
|
||||
| FgMinutes | The number of minutes that Waveterm has been in the foreground on a given day. This requires the terminal window to be in focus regardless of user interaction. |
|
||||
| OpenMinutes | The number of minutes that Waveterm has been open on a given day. This only requires that the terminal is open, even if the window is out of focus. |
|
||||
| NumBlocks | The number of existing blocks open on a given day |
|
||||
| NumTabs | The number of existing tabs open on a given day. |
|
||||
| NewTab | The number of new tabs created on a given day |
|
||||
| NumWindows | The number of existing windows open on a given day. |
|
||||
| NumWS | The number of existing workspaces on a given day. |
|
||||
| NumWSNamed | The number of named workspaces on a give day. |
|
||||
| NewTab | The number of new tabs opened on a given day. |
|
||||
| NumStartup | The number of times waveterm has been started on a given day. |
|
||||
| NumShutdown | The number of times waveterm has been shut down on a given day. |
|
||||
| SetTabTheme | The number of times the tab theme is changed from the context menu |
|
||||
| NumMagnify | The number of times any block is magnified |
|
||||
| NumPanics | The number of backend (golang) panics caught in the current day |
|
||||
| NumAIReqs | The number of AI requests made in the current day |
|
||||
| NumSSHConn | The number of distinct SSH connections that have been made to distinct hosts |
|
||||
| NumWSLConns | The number of distinct WSL connections that have been made to distinct distros |
|
||||
| Renderers | The number of new block views of each type are open on a given day. |
|
||||
| WshCmds | The number of wsh commands of each type run on a given day |
|
||||
| Blocks | The number of blocks of different view types open on a given day |
|
||||
| Conn | The number of successful remote connections made (and errors) on a given day |
|
||||
## Event Properties
|
||||
|
||||
## Associated Data
|
||||
Each event may contain the following properties that are relevant to the particular events.
|
||||
|
||||
In addition to the telemetry data collected, the following is also reported. It is stored in the `telemetry.ActivityType` type in the source code.
|
||||
|
||||
| Name | Description |
|
||||
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| Day | The date the telemetry is associated with. It does not include the time. |
|
||||
| Uploaded | A boolean that indicates if the telemetry for this day is finalized. It is false during the day the telemetry is associated with, but gets set true at the first telemetry upload after that. Once it is true, the data for that particular day will not be sent up with the telemetry any more. |
|
||||
| TzName | The code for the timezone the user's OS is reporting (e.g. PST, GMT, JST) |
|
||||
| TzOffset | The offset for the timezone the user's OS is reporting (e.g. -08:00, +00:00, +09:00) |
|
||||
| ClientVersion | Which version of Waveterm is installed. |
|
||||
| ClientArch | This includes the user's operating system (e.g. linux or darwin) and architecture (e.g. x86_64 or arm64). It does not include data for any Connections at this time. |
|
||||
| BuildTime | This serves as a more accurate version number that keeps track of when we built the version. It has no bearing on when that version was installed by you. |
|
||||
| OSRelease | This lists the version of the operating system the user has installed. |
|
||||
| Displays | Display resolutions (added in v0.9.3 to help us understand what screen resolutions to optimize for) |
|
||||
|
||||
## Telemetry Metadata
|
||||
|
||||
Lastly, some data is sent along with the telemetry that describes how to classify it. It is stored in the `wcloud.TelemetryInputType` in the source code.
|
||||
|
||||
| Name | Description |
|
||||
| ----------------- | --------------------------------------------------------------------------------------------------------------------------- |
|
||||
| UserId | Currently Unused. This is an anonymous UUID intended for use in future features. |
|
||||
| ClientId | This is an anonymous UUID created when Waveterm is first launched. It is used for telemetry and sending prompts to Open AI. |
|
||||
| AppType | This is used to differentiate the current version of waveterm from the legacy app. |
|
||||
| AutoUpdateEnabled | Whether or not auto update is turned on. |
|
||||
| AutoUpdateChannel | The type of auto update in use. This specifically refers to whether a latest or beta channel is selected. |
|
||||
| CurDay | The current day (in your time zone) when telemetry is sent. It does not include the time of day. |
|
||||
|
||||
## Geo Data
|
||||
|
||||
We do not store IP addresses in our telemetry table. However, CloudFlare passes us Geo-Location headers. We store these two header values:
|
||||
|
||||
| Name | Description |
|
||||
| ------------ | ----------------------------------------------------------------- |
|
||||
| CFCountry | 2-letter country code (e.g. "US", "FR", or "JP") |
|
||||
| CFRegionCode | region code (often a provence, region, or state within a country) |
|
||||
| Property | Description |
|
||||
| ------------------------ | ------------------------------------------------------------------------------------------------------ |
|
||||
| `client:arch` | Wave architecture (darwin, windows, linux) and x64 vs arm64 |
|
||||
| `client:version` | The Wave version (e.g. v0.11.1) |
|
||||
| `client:initial_version` | Initial installed wave version |
|
||||
| `client:buildtime` | The buildtime (more exact wave version) |
|
||||
| `client:osrelease` | A string representing the version of the OS you're running -- different for darwin, windows, and linux |
|
||||
| `client:isdev` | True/False if using the dev build |
|
||||
| `autoupdate:channel` | What auto-update channel you're on (latest vs beta) |
|
||||
| `autoupdate:enabled` | True/False if auto-updated is enabled |
|
||||
| `loc:countrycode` | Two character country code (e.g. US, CN, FR, JP) |
|
||||
| `loc:regioncode` | Two character region code (usually the State or Province within a country) |
|
||||
| `activity:activeminutes` | For app:activity, a number between 0-60 of how many minutes were active within the hour |
|
||||
| `activity:fgminutes` | For app:activity, a number between 0-60 of how many minutes Wave was the foreground application |
|
||||
| `activity:openminutes` | For app:activity, a number between 0-60 of how many minutes Wave was open |
|
||||
| `action:initiator` | For certain actions logs if the action was initiated by the UI or the backend |
|
||||
| `debug:panictype` | The string that identifies the panic location within our Go code |
|
||||
| `block:view` | Type of block, e.g. "preview", "waveai", "term", "sysinfo", etc. |
|
||||
| `ai:backendtype` | AI backend type (e.g. OpenAI, Gemini, Anthropic, etc.) |
|
||||
| `wsh:cmd` | The wsh command that was run, e.g. "view", "edit", "run", "editconfig" etc. |
|
||||
| `wsh:haderror` | True/False whether the wsh command returned an error |
|
||||
| `conn:conntype` | Type of connnection (ssh / wsl) |
|
||||
| `display:height` | Height of the main display in px |
|
||||
| `display:width` | Width of the main display in px |
|
||||
| `display:dpr` | DPR of the main display |
|
||||
| `display:count` | How many total displays |
|
||||
| `display:all` | JSON for all the displays attached (same attributes as above) |
|
||||
| `count:blocks` | Total number of blocks |
|
||||
| `count:tabs` | Total number of tabs |
|
||||
| `count:windows` | Total number of windows |
|
||||
| `count:workspaces` | Total number of workspaces |
|
||||
| `count:sshconn` | Total number of SSH connections |
|
||||
| `count:wslconn` | Total number of WSL connections |
|
||||
| `count:views` | Counts of the types of blocks (views) |
|
||||
|
||||
---
|
||||
|
||||
|
@ -62,6 +62,10 @@ When looking at a directory, preview will show a file viewer much like MacOS' _F
|
||||
|
||||
The simplest way to view a new file is to double click its row in the file viewer. Alternatively, while the widget is focused, you can use the <Kbd k="ArrowUp" /> and <Kbd k="ArrowDown" /> arrow keys to select a row and press enter to preview the associated file.
|
||||
|
||||
##### Copy a File
|
||||
|
||||
If you have two directory widgets open, you can copy a file or a directory between them. To do this, simply drag the file or directory from one directory preview widget to another that is opened to where you would like it dropped. This even works for copying files and directories across connections.
|
||||
|
||||
##### View the Parent Directory
|
||||
|
||||
In the directory view, this is as simple as opening the `..` file as if it were a regular file. This can be done with the method above. You can also use the keyboard shortcut <Kbd k="Cmd:ArrowUp"/>.
|
||||
|
@ -460,13 +460,13 @@ This allows setting various options in the `config/settings.json` file. It will
|
||||
|
||||
## file
|
||||
|
||||
The `file` command provides a set of subcommands for managing files across different storage systems, such as `wavefile`, `wsh` remote servers, and S3 (not yet supported).
|
||||
The `file` command provides a set of subcommands for managing files across different storage systems, such as `wavefile`, `wsh` remote servers, and S3.
|
||||
|
||||
:::note
|
||||
|
||||
Wave Terminal is capable of managing files from remote SSH hosts, S3-compatible
|
||||
systems, and the internal Wave filesystem. Files are addressed via URIs, which
|
||||
vary depending on the storage system.
|
||||
vary depending on the storage system. If no scheme is specified, the file will be treated as a local connection.
|
||||
|
||||
URI format: `[profile]:[uri-scheme]://[connection]/[path]`
|
||||
|
||||
@ -509,8 +509,6 @@ Supported URI schemes:
|
||||
- `wavefile://temp/...` - stored globally, but removed on startup/shutdown
|
||||
- `wavefile://[uuid]/...` - an entity id (can be a block, tab, workspace, etc.)
|
||||
|
||||
All file operations respect a maximum file size of 10MB.
|
||||
|
||||
:::
|
||||
|
||||
### cat
|
||||
@ -545,7 +543,7 @@ cat config.json | wsh file write //ec2-user@remote01/~/config.json
|
||||
wsh file append [file-uri]
|
||||
```
|
||||
|
||||
Append data from stdin to a file, respecting the 10MB total file size limit. This is useful for log files or accumulating data. For example:
|
||||
Append data from stdin to a file, respecting a 10MB total file size limit. This is useful for log files or accumulating data. For example:
|
||||
|
||||
```sh
|
||||
tail -f app.log | wsh file append wavefile://block/logs.txt
|
||||
|
@ -61,3 +61,15 @@ const KbdInternal = ({ k }: { k: string }) => {
|
||||
export const Kbd = ({ k }: { k: string }) => {
|
||||
return <BrowserOnly fallback={<kbd>{k}</kbd>}>{() => <KbdInternal k={k} />}</BrowserOnly>;
|
||||
};
|
||||
|
||||
export const KbdChord = ({ karr }: { karr: string[] }) => {
|
||||
const elems: React.ReactNode[] = [];
|
||||
for (let i = 0; i < karr.length; i++) {
|
||||
if (i > 0) {
|
||||
elems.push(<span style={{ padding: "0 2px" }}>+</span>);
|
||||
}
|
||||
elems.push(<Kbd key={i} k={karr[i]} />);
|
||||
}
|
||||
const fullElem = <span style={{ whiteSpace: "nowrap" }}>{elems}</span>;
|
||||
return <BrowserOnly fallback={null}>{() => fullElem}</BrowserOnly>;
|
||||
};
|
||||
|
@ -77,10 +77,12 @@ const config = {
|
||||
synopsis: pkg.description,
|
||||
description: null,
|
||||
desktop: {
|
||||
Name: pkg.productName,
|
||||
Comment: pkg.description,
|
||||
Keywords: "developer;terminal;emulator;",
|
||||
category: "Development;Utility;",
|
||||
entry: {
|
||||
Name: pkg.productName,
|
||||
Comment: pkg.description,
|
||||
Keywords: "developer;terminal;emulator;",
|
||||
Categories: "Development;Utility;",
|
||||
},
|
||||
},
|
||||
executableArgs: ["--enable-features", "UseOzonePlatform", "--ozone-platform-hint", "auto"], // Hint Electron to use Ozone abstraction layer for native Wayland support
|
||||
},
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
import { RpcApi } from "@/app/store/wshclientapi";
|
||||
import { adaptFromElectronKeyEvent } from "@/util/keyutil";
|
||||
import { CHORD_TIMEOUT } from "@/util/sharedconst";
|
||||
import { Rectangle, shell, WebContentsView } from "electron";
|
||||
import { getWaveWindowById } from "emain/emain-window";
|
||||
import path from "path";
|
||||
@ -45,6 +46,8 @@ export class WaveTabView extends WebContentsView {
|
||||
isInitialized: boolean = false;
|
||||
isWaveReady: boolean = false;
|
||||
isDestroyed: boolean = false;
|
||||
keyboardChordMode: boolean = false;
|
||||
resetChordModeTimeout: NodeJS.Timeout = null;
|
||||
|
||||
constructor(fullConfig: FullConfigType) {
|
||||
console.log("createBareTabView");
|
||||
@ -91,6 +94,23 @@ export class WaveTabView extends WebContentsView {
|
||||
this._waveTabId = waveTabId;
|
||||
}
|
||||
|
||||
setKeyboardChordMode(mode: boolean) {
|
||||
this.keyboardChordMode = mode;
|
||||
if (mode) {
|
||||
if (this.resetChordModeTimeout) {
|
||||
clearTimeout(this.resetChordModeTimeout);
|
||||
}
|
||||
this.resetChordModeTimeout = setTimeout(() => {
|
||||
this.keyboardChordMode = false;
|
||||
}, CHORD_TIMEOUT);
|
||||
} else {
|
||||
if (this.resetChordModeTimeout) {
|
||||
clearTimeout(this.resetChordModeTimeout);
|
||||
this.resetChordModeTimeout = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
positionTabOnScreen(winBounds: Rectangle) {
|
||||
const curBounds = this.getBounds();
|
||||
if (
|
||||
@ -220,6 +240,11 @@ export async function getOrCreateWebViewForTab(waveWindowId: string, tabId: stri
|
||||
// console.log("WIN bie", tabView.waveTabId.substring(0, 8), waveEvent.type, waveEvent.code);
|
||||
handleCtrlShiftState(tabView.webContents, waveEvent);
|
||||
setWasActive(true);
|
||||
if (input.type == "keyDown" && tabView.keyboardChordMode) {
|
||||
e.preventDefault();
|
||||
tabView.setKeyboardChordMode(false);
|
||||
tabView.webContents.send("reinject-key", waveEvent);
|
||||
}
|
||||
});
|
||||
tabView.webContents.on("zoom-changed", (e) => {
|
||||
tabView.webContents.send("zoom-changed");
|
||||
|
@ -87,7 +87,10 @@ export function shFrameNavHandler(event: Electron.Event<Electron.WebContentsWill
|
||||
}
|
||||
if (
|
||||
event.frame.name == "pdfview" &&
|
||||
(url.startsWith("blob:file:///") || url.startsWith(getWebServerEndpoint() + "/wave/stream-file?"))
|
||||
(url.startsWith("blob:file:///") ||
|
||||
url.startsWith(getWebServerEndpoint() + "/wave/stream-file?") ||
|
||||
url.startsWith(getWebServerEndpoint() + "/wave/stream-file/") ||
|
||||
url.startsWith(getWebServerEndpoint() + "/wave/stream-local-file?"))
|
||||
) {
|
||||
// allowed
|
||||
return;
|
||||
|
@ -240,7 +240,9 @@ electron.ipcMain.on("webview-image-contextmenu", (event: electron.IpcMainEvent,
|
||||
});
|
||||
|
||||
electron.ipcMain.on("download", (event, payload) => {
|
||||
const streamingUrl = getWebServerEndpoint() + "/wave/stream-file?path=" + encodeURIComponent(payload.filePath);
|
||||
const baseName = encodeURIComponent(path.basename(payload.filePath));
|
||||
const streamingUrl =
|
||||
getWebServerEndpoint() + "/wave/stream-file/" + baseName + "?path=" + encodeURIComponent(payload.filePath);
|
||||
event.sender.downloadURL(streamingUrl);
|
||||
});
|
||||
|
||||
@ -259,6 +261,16 @@ electron.ipcMain.on("get-cursor-point", (event) => {
|
||||
event.returnValue = retVal;
|
||||
});
|
||||
|
||||
electron.ipcMain.handle("capture-screenshot", async (event, rect) => {
|
||||
const tabView = getWaveTabViewByWebContentsId(event.sender.id);
|
||||
if (!tabView) {
|
||||
throw new Error("No tab view found for the given webContents id");
|
||||
}
|
||||
const image = await tabView.webContents.capturePage(rect);
|
||||
const base64String = image.toPNG().toString("base64");
|
||||
return `data:image/png;base64,${base64String}`;
|
||||
});
|
||||
|
||||
electron.ipcMain.on("get-env", (event, varName) => {
|
||||
event.returnValue = process.env[varName] ?? null;
|
||||
});
|
||||
@ -312,6 +324,12 @@ electron.ipcMain.on("register-global-webview-keys", (event, keys: string[]) => {
|
||||
webviewKeys = keys ?? [];
|
||||
});
|
||||
|
||||
electron.ipcMain.on("set-keyboard-chord-mode", (event) => {
|
||||
event.returnValue = null;
|
||||
const tabView = getWaveTabViewByWebContentsId(event.sender.id);
|
||||
tabView?.setKeyboardChordMode(true);
|
||||
});
|
||||
|
||||
if (unamePlatform !== "darwin") {
|
||||
const fac = new FastAverageColor();
|
||||
|
||||
@ -352,6 +370,7 @@ electron.ipcMain.on("quicklook", (event, filePath: string) => {
|
||||
|
||||
electron.ipcMain.on("open-native-path", (event, filePath: string) => {
|
||||
console.log("open-native-path", filePath);
|
||||
filePath = filePath.replace("~", electronApp.getPath("home"));
|
||||
fireAndForget(() =>
|
||||
callWithOriginalXdgCurrentDesktopAsync(() =>
|
||||
electron.shell.openPath(filePath).then((excuse) => {
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Copyright 2025, Command Line Inc.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import { contextBridge, ipcRenderer, WebviewTag } from "electron";
|
||||
import { contextBridge, ipcRenderer, Rectangle, WebviewTag } from "electron";
|
||||
|
||||
contextBridge.exposeInMainWorld("api", {
|
||||
getAuthKey: () => ipcRenderer.sendSync("get-auth-key"),
|
||||
@ -51,6 +51,8 @@ contextBridge.exposeInMainWorld("api", {
|
||||
sendLog: (log) => ipcRenderer.send("fe-log", log),
|
||||
onQuicklook: (filePath: string) => ipcRenderer.send("quicklook", filePath),
|
||||
openNativePath: (filePath: string) => ipcRenderer.send("open-native-path", filePath),
|
||||
captureScreenshot: (rect: Rectangle) => ipcRenderer.invoke("capture-screenshot", rect),
|
||||
setKeyboardChordMode: () => ipcRenderer.send("set-keyboard-chord-mode"),
|
||||
});
|
||||
|
||||
// Custom event for "new-window"
|
||||
|
@ -1,105 +1,25 @@
|
||||
// Copyright 2025, Command Line Inc.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import { getWebServerEndpoint } from "@/util/endpoints";
|
||||
import * as util from "@/util/util";
|
||||
import { PLATFORM, PlatformMacOS } from "@/util/platformutil";
|
||||
import { computeBgStyleFromMeta } from "@/util/waveutil";
|
||||
import useResizeObserver from "@react-hook/resize-observer";
|
||||
import { generate as generateCSS, parse as parseCSS, walk as walkCSS } from "css-tree";
|
||||
import { useAtomValue } from "jotai";
|
||||
import { CSSProperties, useCallback, useLayoutEffect, useRef } from "react";
|
||||
import { debounce } from "throttle-debounce";
|
||||
import { atoms, getApi, PLATFORM, WOS } from "./store/global";
|
||||
import { atoms, getApi, WOS } from "./store/global";
|
||||
import { useWaveObjectValue } from "./store/wos";
|
||||
|
||||
function encodeFileURL(file: string) {
|
||||
const webEndpoint = getWebServerEndpoint();
|
||||
return webEndpoint + `/wave/stream-file?path=${encodeURIComponent(file)}&no404=1`;
|
||||
}
|
||||
|
||||
function processBackgroundUrls(cssText: string): string {
|
||||
if (util.isBlank(cssText)) {
|
||||
return null;
|
||||
}
|
||||
cssText = cssText.trim();
|
||||
if (cssText.endsWith(";")) {
|
||||
cssText = cssText.slice(0, -1);
|
||||
}
|
||||
const attrRe = /^background(-image)?\s*:\s*/i;
|
||||
cssText = cssText.replace(attrRe, "");
|
||||
const ast = parseCSS("background: " + cssText, {
|
||||
context: "declaration",
|
||||
});
|
||||
let hasUnsafeUrl = false;
|
||||
walkCSS(ast, {
|
||||
visit: "Url",
|
||||
enter(node) {
|
||||
const originalUrl = node.value.trim();
|
||||
if (
|
||||
originalUrl.startsWith("http:") ||
|
||||
originalUrl.startsWith("https:") ||
|
||||
originalUrl.startsWith("data:")
|
||||
) {
|
||||
return;
|
||||
}
|
||||
// allow file:/// urls (if they are absolute)
|
||||
if (originalUrl.startsWith("file://")) {
|
||||
const path = originalUrl.slice(7);
|
||||
if (!path.startsWith("/")) {
|
||||
console.log(`Invalid background, contains a non-absolute file URL: ${originalUrl}`);
|
||||
hasUnsafeUrl = true;
|
||||
return;
|
||||
}
|
||||
const newUrl = encodeFileURL(path);
|
||||
node.value = newUrl;
|
||||
return;
|
||||
}
|
||||
// allow absolute paths
|
||||
if (originalUrl.startsWith("/") || originalUrl.startsWith("~/") || /^[a-zA-Z]:(\/|\\)/.test(originalUrl)) {
|
||||
const newUrl = encodeFileURL(originalUrl);
|
||||
node.value = newUrl;
|
||||
return;
|
||||
}
|
||||
hasUnsafeUrl = true;
|
||||
console.log(`Invalid background, contains an unsafe URL scheme: ${originalUrl}`);
|
||||
},
|
||||
});
|
||||
if (hasUnsafeUrl) {
|
||||
return null;
|
||||
}
|
||||
const rtnStyle = generateCSS(ast);
|
||||
if (rtnStyle == null) {
|
||||
return null;
|
||||
}
|
||||
return rtnStyle.replace(/^background:\s*/, "");
|
||||
}
|
||||
|
||||
export function AppBackground() {
|
||||
const bgRef = useRef<HTMLDivElement>(null);
|
||||
const tabId = useAtomValue(atoms.staticTabId);
|
||||
const [tabData] = useWaveObjectValue<Tab>(WOS.makeORef("tab", tabId));
|
||||
const bgAttr = tabData?.meta?.bg;
|
||||
const style: CSSProperties = {};
|
||||
if (!util.isBlank(bgAttr)) {
|
||||
try {
|
||||
const processedBg = processBackgroundUrls(bgAttr);
|
||||
if (!util.isBlank(processedBg)) {
|
||||
const opacity = util.boundNumber(tabData?.meta?.["bg:opacity"], 0, 1) ?? 0.5;
|
||||
style.opacity = opacity;
|
||||
style.background = processedBg;
|
||||
const blendMode = tabData?.meta?.["bg:blendmode"];
|
||||
if (!util.isBlank(blendMode)) {
|
||||
style.backgroundBlendMode = blendMode;
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("error processing background", e);
|
||||
}
|
||||
}
|
||||
const style: CSSProperties = computeBgStyleFromMeta(tabData?.meta, 0.5) ?? {};
|
||||
const getAvgColor = useCallback(
|
||||
debounce(30, () => {
|
||||
if (
|
||||
bgRef.current &&
|
||||
PLATFORM !== "darwin" &&
|
||||
PLATFORM !== PlatformMacOS &&
|
||||
bgRef.current &&
|
||||
"windowControlsOverlay" in window.navigator
|
||||
) {
|
||||
|
@ -72,11 +72,12 @@ a.plain-link {
|
||||
}
|
||||
|
||||
.error-boundary {
|
||||
white-space: pre-wrap;
|
||||
color: var(--error-color);
|
||||
}
|
||||
|
||||
.error-color {
|
||||
color: var(--error-color);
|
||||
color: var(--error-color);
|
||||
}
|
||||
|
||||
/* OverlayScrollbars styling */
|
||||
|
@ -3,18 +3,11 @@
|
||||
|
||||
import { Workspace } from "@/app/workspace/workspace";
|
||||
import { ContextMenuModel } from "@/store/contextmenu";
|
||||
import {
|
||||
atoms,
|
||||
createBlock,
|
||||
getSettingsPrefixAtom,
|
||||
globalStore,
|
||||
isDev,
|
||||
PLATFORM,
|
||||
removeFlashError,
|
||||
} from "@/store/global";
|
||||
import { atoms, createBlock, getSettingsPrefixAtom, globalStore, isDev, removeFlashError } from "@/store/global";
|
||||
import { appHandleKeyDown, keyboardMouseDownHandler } from "@/store/keymodel";
|
||||
import { getElemAsStr } from "@/util/focusutil";
|
||||
import * as keyutil from "@/util/keyutil";
|
||||
import { PLATFORM } from "@/util/platformutil";
|
||||
import * as util from "@/util/util";
|
||||
import clsx from "clsx";
|
||||
import debug from "debug";
|
||||
@ -29,7 +22,7 @@ import { NotificationBubbles } from "./notification/notificationbubbles";
|
||||
|
||||
import "./app.scss";
|
||||
|
||||
// this should come after app.scss (don't remove the newline above otherwise prettier will reorder these imports)
|
||||
// tailwindsetup.css should come *after* app.scss (don't remove the newline above otherwise prettier will reorder these imports)
|
||||
import "../tailwindsetup.css";
|
||||
|
||||
const dlog = debug("wave:app");
|
||||
|
@ -24,6 +24,7 @@ import { MagnifyIcon } from "@/element/magnify";
|
||||
import { MenuButton } from "@/element/menubutton";
|
||||
import { NodeModel } from "@/layout/index";
|
||||
import * as util from "@/util/util";
|
||||
import { computeBgStyleFromMeta } from "@/util/waveutil";
|
||||
import clsx from "clsx";
|
||||
import * as jotai from "jotai";
|
||||
import { OverlayScrollbarsComponent } from "overlayscrollbars-react";
|
||||
@ -240,6 +241,7 @@ const BlockFrame_Header = ({
|
||||
icon: "link-slash",
|
||||
title: "wsh is not installed for this connection",
|
||||
};
|
||||
const showNoWshButton = manageConnection && wshProblem && !util.isBlank(connName) && !connName.startsWith("aws:");
|
||||
|
||||
return (
|
||||
<div
|
||||
@ -262,9 +264,7 @@ const BlockFrame_Header = ({
|
||||
changeConnModalAtom={changeConnModalAtom}
|
||||
/>
|
||||
)}
|
||||
{manageConnection && wshProblem && (
|
||||
<IconButton decl={wshInstallButton} className="block-frame-header-iconbutton" />
|
||||
)}
|
||||
{showNoWshButton && <IconButton decl={wshInstallButton} className="block-frame-header-iconbutton" />}
|
||||
<div className="block-frame-textelems-wrapper">{headerTextElems}</div>
|
||||
<div className="block-frame-end-icons">{endIconsElem}</div>
|
||||
</div>
|
||||
@ -575,15 +575,9 @@ const BlockFrame_Default_Component = (props: BlockFrameProps) => {
|
||||
}, [manageConnection, blockData]);
|
||||
|
||||
const viewIconElem = getViewIconElem(viewIconUnion, blockData);
|
||||
const innerStyle: React.CSSProperties = {};
|
||||
if (!preview && customBg?.bg != null) {
|
||||
innerStyle.background = customBg.bg;
|
||||
if (customBg["bg:opacity"] != null) {
|
||||
innerStyle.opacity = customBg["bg:opacity"];
|
||||
}
|
||||
if (customBg["bg:blendmode"] != null) {
|
||||
innerStyle.backgroundBlendMode = customBg["bg:blendmode"];
|
||||
}
|
||||
let innerStyle: React.CSSProperties = {};
|
||||
if (!preview) {
|
||||
innerStyle = computeBgStyleFromMeta(customBg);
|
||||
}
|
||||
const previewElem = <div className="block-frame-preview">{viewIconElem}</div>;
|
||||
const headerElem = (
|
||||
@ -609,7 +603,8 @@ const BlockFrame_Default_Component = (props: BlockFrameProps) => {
|
||||
"--magnified-block-blur": `${magnifiedBlockBlur}px`,
|
||||
} as React.CSSProperties
|
||||
}
|
||||
{...({ inert: preview ? "1" : undefined } as any)} // sets insert="1" ... but tricks TS into accepting it
|
||||
// @ts-ignore: inert does exist in the DOM, just not in react
|
||||
inert={preview ? "1" : undefined} //
|
||||
>
|
||||
<BlockMask nodeModel={nodeModel} />
|
||||
{preview || viewModel == null ? null : (
|
||||
@ -643,13 +638,11 @@ const BlockFrame = React.memo((props: BlockFrameProps) => {
|
||||
const blockId = props.nodeModel.blockId;
|
||||
const [blockData] = WOS.useWaveObjectValue<Block>(WOS.makeORef("block", blockId));
|
||||
const tabData = jotai.useAtomValue(atoms.tabAtom);
|
||||
|
||||
if (!blockId || !blockData) {
|
||||
return null;
|
||||
}
|
||||
const FrameElem = BlockFrame_Default;
|
||||
const numBlocks = tabData?.blockids?.length ?? 0;
|
||||
return <FrameElem {...props} numBlocksInTab={numBlocks} />;
|
||||
return <BlockFrame_Default {...props} numBlocksInTab={numBlocks} />;
|
||||
});
|
||||
|
||||
export { BlockFrame, NumActiveConnColors };
|
||||
|
154
frontend/app/element/ansiline.tsx
Normal file
154
frontend/app/element/ansiline.tsx
Normal file
@ -0,0 +1,154 @@
|
||||
export const ANSI_TAILWIND_MAP = {
|
||||
// Reset and modifiers
|
||||
0: "reset", // special: clear state
|
||||
1: "font-bold",
|
||||
2: "opacity-75",
|
||||
3: "italic",
|
||||
4: "underline",
|
||||
8: "invisible",
|
||||
9: "line-through",
|
||||
|
||||
// Foreground standard colors
|
||||
30: "text-ansi-black",
|
||||
31: "text-ansi-red",
|
||||
32: "text-ansi-green",
|
||||
33: "text-ansi-yellow",
|
||||
34: "text-ansi-blue",
|
||||
35: "text-ansi-magenta",
|
||||
36: "text-ansi-cyan",
|
||||
37: "text-ansi-white",
|
||||
|
||||
// Foreground bright colors
|
||||
90: "text-ansi-brightblack",
|
||||
91: "text-ansi-brightred",
|
||||
92: "text-ansi-brightgreen",
|
||||
93: "text-ansi-brightyellow",
|
||||
94: "text-ansi-brightblue",
|
||||
95: "text-ansi-brightmagenta",
|
||||
96: "text-ansi-brightcyan",
|
||||
97: "text-ansi-brightwhite",
|
||||
|
||||
// Background standard colors
|
||||
40: "bg-ansi-black",
|
||||
41: "bg-ansi-red",
|
||||
42: "bg-ansi-green",
|
||||
43: "bg-ansi-yellow",
|
||||
44: "bg-ansi-blue",
|
||||
45: "bg-ansi-magenta",
|
||||
46: "bg-ansi-cyan",
|
||||
47: "bg-ansi-white",
|
||||
|
||||
// Background bright colors
|
||||
100: "bg-ansi-brightblack",
|
||||
101: "bg-ansi-brightred",
|
||||
102: "bg-ansi-brightgreen",
|
||||
103: "bg-ansi-brightyellow",
|
||||
104: "bg-ansi-brightblue",
|
||||
105: "bg-ansi-brightmagenta",
|
||||
106: "bg-ansi-brightcyan",
|
||||
107: "bg-ansi-brightwhite",
|
||||
};
|
||||
|
||||
type InternalStateType = {
|
||||
modifiers: Set<string>;
|
||||
textColor: string | null;
|
||||
bgColor: string | null;
|
||||
reverse: boolean;
|
||||
};
|
||||
|
||||
type SegmentType = {
|
||||
text: string;
|
||||
classes: string;
|
||||
};
|
||||
|
||||
const makeInitialState: () => InternalStateType = () => ({
|
||||
modifiers: new Set<string>(),
|
||||
textColor: null,
|
||||
bgColor: null,
|
||||
reverse: false,
|
||||
});
|
||||
|
||||
const updateStateWithCodes = (state, codes) => {
|
||||
codes.forEach((code) => {
|
||||
if (code === 0) {
|
||||
// Reset state
|
||||
state.modifiers.clear();
|
||||
state.textColor = null;
|
||||
state.bgColor = null;
|
||||
state.reverse = false;
|
||||
return;
|
||||
}
|
||||
// Instead of swapping immediately, we set a flag
|
||||
if (code === 7) {
|
||||
state.reverse = true;
|
||||
return;
|
||||
}
|
||||
const tailwindClass = ANSI_TAILWIND_MAP[code];
|
||||
if (tailwindClass && tailwindClass !== "reset") {
|
||||
if (tailwindClass.startsWith("text-")) {
|
||||
state.textColor = tailwindClass;
|
||||
} else if (tailwindClass.startsWith("bg-")) {
|
||||
state.bgColor = tailwindClass;
|
||||
} else {
|
||||
state.modifiers.add(tailwindClass);
|
||||
}
|
||||
}
|
||||
});
|
||||
return state;
|
||||
};
|
||||
|
||||
const stateToClasses = (state: InternalStateType) => {
|
||||
const classes = [];
|
||||
classes.push(...Array.from(state.modifiers));
|
||||
|
||||
// Apply reverse: swap text and background colors if flag is set.
|
||||
let textColor = state.textColor;
|
||||
let bgColor = state.bgColor;
|
||||
if (state.reverse) {
|
||||
[textColor, bgColor] = [bgColor, textColor];
|
||||
}
|
||||
if (textColor) classes.push(textColor);
|
||||
if (bgColor) classes.push(bgColor);
|
||||
|
||||
return classes.join(" ");
|
||||
};
|
||||
|
||||
const ansiRegex = /\x1b\[([0-9;]+)m/g;
|
||||
|
||||
const AnsiLine = ({ line }) => {
|
||||
const segments: SegmentType[] = [];
|
||||
let lastIndex = 0;
|
||||
let currentState = makeInitialState();
|
||||
|
||||
let match: RegExpExecArray;
|
||||
while ((match = ansiRegex.exec(line)) !== null) {
|
||||
if (match.index > lastIndex) {
|
||||
segments.push({
|
||||
text: line.substring(lastIndex, match.index),
|
||||
classes: stateToClasses(currentState),
|
||||
});
|
||||
}
|
||||
const codes = match[1].split(";").map(Number);
|
||||
updateStateWithCodes(currentState, codes);
|
||||
lastIndex = ansiRegex.lastIndex;
|
||||
}
|
||||
|
||||
if (lastIndex < line.length) {
|
||||
segments.push({
|
||||
text: line.substring(lastIndex),
|
||||
classes: stateToClasses(currentState),
|
||||
});
|
||||
}
|
||||
|
||||
return (
|
||||
<div>
|
||||
{segments.map((seg, idx) => (
|
||||
<span key={idx} className={seg.classes}>
|
||||
{seg.text}
|
||||
</span>
|
||||
))}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default AnsiLine;
|
@ -4,7 +4,7 @@
|
||||
import { RpcApi } from "@/app/store/wshclientapi";
|
||||
import { TabRpcClient } from "@/app/store/wshrpcutil";
|
||||
import { getWebServerEndpoint } from "@/util/endpoints";
|
||||
import { isBlank, makeConnRoute } from "@/util/util";
|
||||
import { formatRemoteUri } from "@/util/waveutil";
|
||||
import parseSrcSet from "parse-srcset";
|
||||
|
||||
export type MarkdownContentBlockType = {
|
||||
@ -158,19 +158,13 @@ export const resolveRemoteFile = async (filepath: string, resolveOpts: MarkdownR
|
||||
if (!filepath || filepath.startsWith("http://") || filepath.startsWith("https://")) {
|
||||
return filepath;
|
||||
}
|
||||
|
||||
try {
|
||||
const route = makeConnRoute(resolveOpts.connName);
|
||||
const fileInfo = await RpcApi.RemoteFileJoinCommand(TabRpcClient, [resolveOpts.baseDir, filepath], {
|
||||
route: route,
|
||||
});
|
||||
|
||||
const baseDirUri = formatRemoteUri(resolveOpts.baseDir, resolveOpts.connName);
|
||||
const fileInfo = await RpcApi.FileJoinCommand(TabRpcClient, [baseDirUri, filepath]);
|
||||
const remoteUri = formatRemoteUri(fileInfo.path, resolveOpts.connName);
|
||||
console.log("markdown resolve", resolveOpts, filepath, "=>", baseDirUri, remoteUri);
|
||||
const usp = new URLSearchParams();
|
||||
usp.set("path", fileInfo.path);
|
||||
if (!isBlank(resolveOpts.connName)) {
|
||||
usp.set("connection", resolveOpts.connName);
|
||||
}
|
||||
|
||||
usp.set("path", remoteUri);
|
||||
return getWebServerEndpoint() + "/wave/stream-file?" + usp.toString();
|
||||
} catch (err) {
|
||||
console.warn("Failed to resolve remote file:", filepath, err);
|
||||
|
@ -2,7 +2,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import { MagnifyIcon } from "@/app/element/magnify";
|
||||
import { PLATFORM } from "@/app/store/global";
|
||||
import { PLATFORM, PlatformMacOS } from "@/util/platformutil";
|
||||
import "./quicktips.scss";
|
||||
|
||||
const KeyBinding = ({ keyDecl }: { keyDecl: string }) => {
|
||||
@ -10,7 +10,7 @@ const KeyBinding = ({ keyDecl }: { keyDecl: string }) => {
|
||||
const elems: React.ReactNode[] = [];
|
||||
for (let part of parts) {
|
||||
if (part === "Cmd") {
|
||||
if (PLATFORM === "darwin") {
|
||||
if (PLATFORM === PlatformMacOS) {
|
||||
elems.push(
|
||||
<div key="cmd" className="keybinding">
|
||||
⌘ Cmd
|
||||
|
@ -2,7 +2,6 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import Logo from "@/app/asset/logo.svg";
|
||||
import { LinkButton } from "@/app/element/linkbutton";
|
||||
import { modalsModel } from "@/app/store/modalmodel";
|
||||
import { Modal } from "./modal";
|
||||
|
||||
@ -37,24 +36,30 @@ const AboutModal = ({}: AboutModalProps) => {
|
||||
Update Channel: {updaterChannel}
|
||||
</div>
|
||||
<div className="section links">
|
||||
<LinkButton
|
||||
className="secondary solid"
|
||||
<a
|
||||
href="https://github.com/wavetermdev/waveterm"
|
||||
target="_blank"
|
||||
rel="noopener"
|
||||
className="inline-flex items-center px-4 py-2 rounded border border-border hover:bg-hoverbg transition-colors duration-200"
|
||||
>
|
||||
<i className="fa-brands fa-github"></i>Github
|
||||
</LinkButton>
|
||||
<LinkButton className="secondary solid" href="https://www.waveterm.dev/" target="_blank">
|
||||
<i className="fa-sharp fa-light fa-globe"></i>Website
|
||||
</LinkButton>
|
||||
<LinkButton
|
||||
className="secondary solid"
|
||||
<i className="fa-brands fa-github mr-2"></i>Github
|
||||
</a>
|
||||
<a
|
||||
href="https://www.waveterm.dev/"
|
||||
target="_blank"
|
||||
rel="noopener"
|
||||
className="inline-flex items-center px-4 py-2 rounded border border-border hover:bg-hoverbg transition-colors duration-200"
|
||||
>
|
||||
<i className="fa-sharp fa-light fa-globe mr-2"></i>Website
|
||||
</a>
|
||||
<a
|
||||
href="https://github.com/wavetermdev/waveterm/blob/main/ACKNOWLEDGEMENTS.md"
|
||||
target="_blank"
|
||||
rel={"noopener"}
|
||||
rel="noopener"
|
||||
className="inline-flex items-center px-4 py-2 rounded border border-border hover:bg-hoverbg transition-colors duration-200"
|
||||
>
|
||||
<i className="fa-sharp fa-light fa-heart"></i>Acknowledgements
|
||||
</LinkButton>
|
||||
<i className="fa-sharp fa-light fa-heart mr-2"></i>Acknowledgements
|
||||
</a>
|
||||
</div>
|
||||
<div className="section text-standard">© {currentDate.getFullYear()} Command Line Inc.</div>
|
||||
</div>
|
||||
|
@ -127,12 +127,10 @@ function createS3SuggestionItems(
|
||||
// behavior
|
||||
return s3Profiles.map((profileName) => {
|
||||
const connStatus = connStatusMap.get(profileName);
|
||||
const connColorNum = computeConnColorNum(connStatus);
|
||||
const item: SuggestionConnectionItem = {
|
||||
status: "connected",
|
||||
icon: "arrow-right-arrow-left",
|
||||
iconColor:
|
||||
connStatus?.status == "connected" ? `var(--conn-icon-color-${connColorNum})` : "var(--grey-text-color)",
|
||||
icon: "database",
|
||||
iconColor: "var(--accent-color)",
|
||||
value: profileName,
|
||||
label: profileName,
|
||||
current: profileName == connection,
|
||||
@ -348,6 +346,7 @@ const ChangeConnectionBlockModal = React.memo(
|
||||
const connStatusMap = new Map<string, ConnStatus>();
|
||||
const fullConfig = jotai.useAtomValue(atoms.fullConfigAtom);
|
||||
let filterOutNowsh = util.useAtomValueSafe(viewModel.filterOutNowsh) ?? true;
|
||||
const showS3 = util.useAtomValueSafe(viewModel.showS3) ?? false;
|
||||
|
||||
let maxActiveConnNum = 1;
|
||||
for (const conn of allConnStatus) {
|
||||
@ -377,13 +376,10 @@ const ChangeConnectionBlockModal = React.memo(
|
||||
// typeahead was opened. good candidate for verbose log level.
|
||||
//console.log("unable to load wsl list from backend. using blank list: ", e)
|
||||
});
|
||||
/////////
|
||||
// TODO-S3
|
||||
// this needs an rpc call to generate a list of s3 profiles
|
||||
const newS3List = [];
|
||||
setS3List(newS3List);
|
||||
/////////
|
||||
}, [changeConnModalOpen, setConnList]);
|
||||
RpcApi.ConnListAWSCommand(TabRpcClient, { timeout: 2000 })
|
||||
.then((s3List) => setS3List(s3List ?? []))
|
||||
.catch((e) => console.log("unable to load s3 list from backend:", e));
|
||||
}, [changeConnModalOpen]);
|
||||
|
||||
const changeConnection = React.useCallback(
|
||||
async (connName: string) => {
|
||||
@ -393,10 +389,13 @@ const ChangeConnectionBlockModal = React.memo(
|
||||
if (connName == blockData?.meta?.connection) {
|
||||
return;
|
||||
}
|
||||
const isAws = connName?.startsWith("aws:");
|
||||
const oldCwd = blockData?.meta?.file ?? "";
|
||||
let newCwd: string;
|
||||
if (oldCwd == "") {
|
||||
newCwd = "";
|
||||
} else if (isAws) {
|
||||
newCwd = "/";
|
||||
} else {
|
||||
newCwd = "~";
|
||||
}
|
||||
@ -436,14 +435,17 @@ const ChangeConnectionBlockModal = React.memo(
|
||||
fullConfig,
|
||||
filterOutNowsh
|
||||
);
|
||||
const s3Suggestions = getS3Suggestions(
|
||||
s3List,
|
||||
connection,
|
||||
connSelected,
|
||||
connStatusMap,
|
||||
fullConfig,
|
||||
filterOutNowsh
|
||||
);
|
||||
let s3Suggestions: SuggestionConnectionScope = null;
|
||||
if (showS3) {
|
||||
s3Suggestions = getS3Suggestions(
|
||||
s3List,
|
||||
connection,
|
||||
connSelected,
|
||||
connStatusMap,
|
||||
fullConfig,
|
||||
filterOutNowsh
|
||||
);
|
||||
}
|
||||
const connectionsEditItem = getConnectionsEditItem(changeConnModalAtom, connSelected);
|
||||
const disconnectItem = getDisconnectItem(connection, connStatusMap);
|
||||
const newConnectionSuggestionItem = getNewConnectionSuggestionItem(
|
||||
|
@ -17,6 +17,7 @@ import {
|
||||
} from "@/layout/lib/types";
|
||||
import { getWebServerEndpoint } from "@/util/endpoints";
|
||||
import { fetch } from "@/util/fetchutil";
|
||||
import { setPlatform } from "@/util/platformutil";
|
||||
import { deepCompareReturnPrev, getPrefixedSettings, isBlank } from "@/util/util";
|
||||
import { atom, Atom, PrimitiveAtom, useAtomValue } from "jotai";
|
||||
import { globalStore } from "./jotaiStore";
|
||||
@ -25,7 +26,6 @@ import { ClientService, ObjectService } from "./services";
|
||||
import * as WOS from "./wos";
|
||||
import { getFileSubject, waveEventSubscribe } from "./wps";
|
||||
|
||||
let PLATFORM: NodeJS.Platform = "darwin";
|
||||
let atoms: GlobalAtomsType;
|
||||
let globalEnvironment: "electron" | "renderer";
|
||||
const blockComponentModelMap = new Map<string, BlockComponentModel>();
|
||||
@ -46,10 +46,6 @@ function initGlobal(initOpts: GlobalInitOptions) {
|
||||
initGlobalAtoms(initOpts);
|
||||
}
|
||||
|
||||
function setPlatform(platform: NodeJS.Platform) {
|
||||
PLATFORM = platform;
|
||||
}
|
||||
|
||||
function initGlobalAtoms(initOpts: GlobalInitOptions) {
|
||||
const windowIdAtom = atom(initOpts.windowId) as PrimitiveAtom<string>;
|
||||
const clientIdAtom = atom(initOpts.clientId) as PrimitiveAtom<string>;
|
||||
@ -456,6 +452,9 @@ async function replaceBlock(blockId: string, blockDef: BlockDef): Promise<string
|
||||
const layoutModel = getLayoutModelForTabById(tabId);
|
||||
const rtOpts: RuntimeOpts = { termsize: { rows: 25, cols: 80 } };
|
||||
const newBlockId = await ObjectService.CreateBlock(blockDef, rtOpts);
|
||||
setTimeout(async () => {
|
||||
await ObjectService.DeleteBlock(blockId);
|
||||
}, 300);
|
||||
const targetNodeId = layoutModel.getNodeByBlockId(blockId)?.id;
|
||||
if (targetNodeId == null) {
|
||||
throw new Error(`targetNodeId not found for blockId: ${blockId}`);
|
||||
@ -669,6 +668,17 @@ function getConnStatusAtom(conn: string): PrimitiveAtom<ConnStatus> {
|
||||
wshenabled: false,
|
||||
};
|
||||
rtn = atom(connStatus);
|
||||
} else if (conn.startsWith("aws:")) {
|
||||
const connStatus: ConnStatus = {
|
||||
connection: conn,
|
||||
connected: true,
|
||||
error: null,
|
||||
status: "connected",
|
||||
hasconnected: true,
|
||||
activeconnnum: 0,
|
||||
wshenabled: false,
|
||||
};
|
||||
rtn = atom(connStatus);
|
||||
} else {
|
||||
const connStatus: ConnStatus = {
|
||||
connection: conn,
|
||||
@ -763,6 +773,7 @@ export {
|
||||
getBlockComponentModel,
|
||||
getBlockMetaKeyAtom,
|
||||
getConnStatusAtom,
|
||||
getFocusedBlockId,
|
||||
getHostName,
|
||||
getObjectId,
|
||||
getOverrideConfigAtom,
|
||||
@ -775,7 +786,6 @@ export {
|
||||
isDev,
|
||||
loadConnStatus,
|
||||
openLink,
|
||||
PLATFORM,
|
||||
pushFlashError,
|
||||
pushNotification,
|
||||
recordTEvent,
|
||||
|
@ -10,9 +10,11 @@ import {
|
||||
getAllBlockComponentModels,
|
||||
getApi,
|
||||
getBlockComponentModel,
|
||||
getFocusedBlockId,
|
||||
getSettingsKeyAtom,
|
||||
globalStore,
|
||||
refocusNode,
|
||||
replaceBlock,
|
||||
WOS,
|
||||
} from "@/app/store/global";
|
||||
import {
|
||||
@ -23,12 +25,37 @@ import {
|
||||
} from "@/layout/index";
|
||||
import { getLayoutModelForStaticTab } from "@/layout/lib/layoutModelHooks";
|
||||
import * as keyutil from "@/util/keyutil";
|
||||
import { CHORD_TIMEOUT } from "@/util/sharedconst";
|
||||
import { fireAndForget } from "@/util/util";
|
||||
import * as jotai from "jotai";
|
||||
import { modalsModel } from "./modalmodel";
|
||||
|
||||
type KeyHandler = (event: WaveKeyboardEvent) => boolean;
|
||||
|
||||
const simpleControlShiftAtom = jotai.atom(false);
|
||||
const globalKeyMap = new Map<string, (waveEvent: WaveKeyboardEvent) => boolean>();
|
||||
const globalChordMap = new Map<string, Map<string, KeyHandler>>();
|
||||
|
||||
// track current chord state and timeout (for resetting)
|
||||
let activeChord: string | null = null;
|
||||
let chordTimeout: NodeJS.Timeout = null;
|
||||
|
||||
function resetChord() {
|
||||
activeChord = null;
|
||||
if (chordTimeout) {
|
||||
clearTimeout(chordTimeout);
|
||||
chordTimeout = null;
|
||||
}
|
||||
}
|
||||
|
||||
function setActiveChord(activeChordArg: string) {
|
||||
getApi().setKeyboardChordMode();
|
||||
if (chordTimeout) {
|
||||
clearTimeout(chordTimeout);
|
||||
}
|
||||
activeChord = activeChordArg;
|
||||
chordTimeout = setTimeout(() => resetChord(), CHORD_TIMEOUT);
|
||||
}
|
||||
|
||||
export function keyboardMouseDownHandler(e: MouseEvent) {
|
||||
if (!e.ctrlKey || !e.shiftKey) {
|
||||
@ -69,7 +96,7 @@ function shouldDispatchToBlock(e: WaveKeyboardEvent): boolean {
|
||||
const activeElem = document.activeElement;
|
||||
if (activeElem != null && activeElem instanceof HTMLElement) {
|
||||
if (activeElem.tagName == "INPUT" || activeElem.tagName == "TEXTAREA" || activeElem.contentEditable == "true") {
|
||||
if (activeElem.classList.contains("dummy-focus")) {
|
||||
if (activeElem.classList.contains("dummy-focus") || activeElem.classList.contains("dummy")) {
|
||||
return true;
|
||||
}
|
||||
if (keyutil.isInputEvent(e)) {
|
||||
@ -216,38 +243,73 @@ async function handleCmdN() {
|
||||
await createBlock(blockDef);
|
||||
}
|
||||
|
||||
async function handleSplitHorizontal() {
|
||||
async function handleSplitHorizontal(position: "before" | "after") {
|
||||
const layoutModel = getLayoutModelForStaticTab();
|
||||
const focusedNode = globalStore.get(layoutModel.focusedNode);
|
||||
if (focusedNode == null) {
|
||||
return;
|
||||
}
|
||||
const blockDef = getDefaultNewBlockDef();
|
||||
await createBlockSplitHorizontally(blockDef, focusedNode.data.blockId, "after");
|
||||
await createBlockSplitHorizontally(blockDef, focusedNode.data.blockId, position);
|
||||
}
|
||||
|
||||
async function handleSplitVertical() {
|
||||
async function handleSplitVertical(position: "before" | "after") {
|
||||
const layoutModel = getLayoutModelForStaticTab();
|
||||
const focusedNode = globalStore.get(layoutModel.focusedNode);
|
||||
if (focusedNode == null) {
|
||||
return;
|
||||
}
|
||||
const blockDef = getDefaultNewBlockDef();
|
||||
await createBlockSplitVertically(blockDef, focusedNode.data.blockId, "after");
|
||||
await createBlockSplitVertically(blockDef, focusedNode.data.blockId, position);
|
||||
}
|
||||
|
||||
let lastHandledEvent: KeyboardEvent | null = null;
|
||||
|
||||
// returns [keymatch, T]
|
||||
function checkKeyMap<T>(waveEvent: WaveKeyboardEvent, keyMap: Map<string, T>): [string, T] {
|
||||
for (const key of keyMap.keys()) {
|
||||
if (keyutil.checkKeyPressed(waveEvent, key)) {
|
||||
const val = keyMap.get(key);
|
||||
return [key, val];
|
||||
}
|
||||
}
|
||||
return [null, null];
|
||||
}
|
||||
|
||||
function appHandleKeyDown(waveEvent: WaveKeyboardEvent): boolean {
|
||||
const nativeEvent = (waveEvent as any).nativeEvent;
|
||||
if (lastHandledEvent != null && nativeEvent != null && lastHandledEvent === nativeEvent) {
|
||||
console.log("lastHandledEvent return false");
|
||||
return false;
|
||||
}
|
||||
lastHandledEvent = nativeEvent;
|
||||
const handled = handleGlobalWaveKeyboardEvents(waveEvent);
|
||||
if (handled) {
|
||||
if (activeChord) {
|
||||
console.log("handle activeChord", activeChord);
|
||||
// If we're in chord mode, look for the second key.
|
||||
const chordBindings = globalChordMap.get(activeChord);
|
||||
const [, handler] = checkKeyMap(waveEvent, chordBindings);
|
||||
if (handler) {
|
||||
resetChord();
|
||||
return handler(waveEvent);
|
||||
} else {
|
||||
// invalid chord; reset state and consume key
|
||||
resetChord();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
const [chordKeyMatch] = checkKeyMap(waveEvent, globalChordMap);
|
||||
if (chordKeyMatch) {
|
||||
setActiveChord(chordKeyMatch);
|
||||
return true;
|
||||
}
|
||||
|
||||
const [, globalHandler] = checkKeyMap(waveEvent, globalKeyMap);
|
||||
if (globalHandler) {
|
||||
const handled = globalHandler(waveEvent);
|
||||
if (handled) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
const layoutModel = getLayoutModelForStaticTab();
|
||||
const focusedNode = globalStore.get(layoutModel.focusedNode);
|
||||
const blockId = focusedNode?.data?.blockId;
|
||||
@ -319,11 +381,11 @@ function registerGlobalKeys() {
|
||||
return true;
|
||||
});
|
||||
globalKeyMap.set("Cmd:d", () => {
|
||||
handleSplitHorizontal();
|
||||
handleSplitHorizontal("after");
|
||||
return true;
|
||||
});
|
||||
globalKeyMap.set("Shift:Cmd:d", () => {
|
||||
handleSplitVertical();
|
||||
handleSplitVertical("after");
|
||||
return true;
|
||||
});
|
||||
globalKeyMap.set("Cmd:i", () => {
|
||||
@ -380,6 +442,18 @@ function registerGlobalKeys() {
|
||||
switchBlockInDirection(tabId, NavigateDirection.Right);
|
||||
return true;
|
||||
});
|
||||
globalKeyMap.set("Ctrl:Shift:k", () => {
|
||||
const blockId = getFocusedBlockId();
|
||||
if (blockId == null) {
|
||||
return true;
|
||||
}
|
||||
replaceBlock(blockId, {
|
||||
meta: {
|
||||
view: "launcher",
|
||||
},
|
||||
});
|
||||
return true;
|
||||
});
|
||||
globalKeyMap.set("Cmd:g", () => {
|
||||
const bcm = getBlockComponentModel(getFocusedBlockInStaticTab());
|
||||
if (bcm.openSwitchConnection != null) {
|
||||
@ -445,6 +519,25 @@ function registerGlobalKeys() {
|
||||
// special case keys, handled by web view
|
||||
allKeys.push("Cmd:l", "Cmd:r", "Cmd:ArrowRight", "Cmd:ArrowLeft", "Cmd:o");
|
||||
getApi().registerGlobalWebviewKeys(allKeys);
|
||||
|
||||
const splitBlockKeys = new Map<string, KeyHandler>();
|
||||
splitBlockKeys.set("ArrowUp", () => {
|
||||
handleSplitVertical("before");
|
||||
return true;
|
||||
});
|
||||
splitBlockKeys.set("ArrowDown", () => {
|
||||
handleSplitVertical("after");
|
||||
return true;
|
||||
});
|
||||
splitBlockKeys.set("ArrowLeft", () => {
|
||||
handleSplitHorizontal("before");
|
||||
return true;
|
||||
});
|
||||
splitBlockKeys.set("ArrowRight", () => {
|
||||
handleSplitHorizontal("after");
|
||||
return true;
|
||||
});
|
||||
globalChordMap.set("Ctrl:Shift:s", splitBlockKeys);
|
||||
}
|
||||
|
||||
function getAllGlobalKeyBindings(): string[] {
|
||||
|
@ -52,6 +52,11 @@ class RpcApiType {
|
||||
return client.wshRpcCall("connlist", null, opts);
|
||||
}
|
||||
|
||||
// command "connlistaws" [call]
|
||||
ConnListAWSCommand(client: WshClient, opts?: RpcOpts): Promise<string[]> {
|
||||
return client.wshRpcCall("connlistaws", null, opts);
|
||||
}
|
||||
|
||||
// command "connreinstallwsh" [call]
|
||||
ConnReinstallWshCommand(client: WshClient, data: ConnExtData, opts?: RpcOpts): Promise<void> {
|
||||
return client.wshRpcCall("connreinstallwsh", data, opts);
|
||||
@ -122,6 +127,11 @@ class RpcApiType {
|
||||
return client.wshRpcCall("dispose", data, opts);
|
||||
}
|
||||
|
||||
// command "disposesuggestions" [call]
|
||||
DisposeSuggestionsCommand(client: WshClient, data: string, opts?: RpcOpts): Promise<void> {
|
||||
return client.wshRpcCall("disposesuggestions", data, opts);
|
||||
}
|
||||
|
||||
// command "eventpublish" [call]
|
||||
EventPublishCommand(client: WshClient, data: WaveEvent, opts?: RpcOpts): Promise<void> {
|
||||
return client.wshRpcCall("eventpublish", data, opts);
|
||||
@ -187,6 +197,11 @@ class RpcApiType {
|
||||
return client.wshRpcCall("fileinfo", data, opts);
|
||||
}
|
||||
|
||||
// command "filejoin" [call]
|
||||
FileJoinCommand(client: WshClient, data: string[], opts?: RpcOpts): Promise<FileInfo> {
|
||||
return client.wshRpcCall("filejoin", data, opts);
|
||||
}
|
||||
|
||||
// command "filelist" [call]
|
||||
FileListCommand(client: WshClient, data: FileListData, opts?: RpcOpts): Promise<FileInfo[]> {
|
||||
return client.wshRpcCall("filelist", data, opts);
|
||||
@ -212,6 +227,16 @@ class RpcApiType {
|
||||
return client.wshRpcCall("fileread", data, opts);
|
||||
}
|
||||
|
||||
// command "filereadstream" [responsestream]
|
||||
FileReadStreamCommand(client: WshClient, data: FileData, opts?: RpcOpts): AsyncGenerator<FileData, void, boolean> {
|
||||
return client.wshRpcStream("filereadstream", data, opts);
|
||||
}
|
||||
|
||||
// command "filesharecapability" [call]
|
||||
FileShareCapabilityCommand(client: WshClient, data: string, opts?: RpcOpts): Promise<FileShareCapability> {
|
||||
return client.wshRpcCall("filesharecapability", data, opts);
|
||||
}
|
||||
|
||||
// command "filestreamtar" [responsestream]
|
||||
FileStreamTarCommand(client: WshClient, data: CommandRemoteStreamTarData, opts?: RpcOpts): AsyncGenerator<Packet, void, boolean> {
|
||||
return client.wshRpcStream("filestreamtar", data, opts);
|
||||
@ -237,6 +262,11 @@ class RpcApiType {
|
||||
return client.wshRpcCall("getmeta", data, opts);
|
||||
}
|
||||
|
||||
// command "gettab" [call]
|
||||
GetTabCommand(client: WshClient, data: string, opts?: RpcOpts): Promise<Tab> {
|
||||
return client.wshRpcCall("gettab", data, opts);
|
||||
}
|
||||
|
||||
// command "getupdatechannel" [call]
|
||||
GetUpdateChannelCommand(client: WshClient, opts?: RpcOpts): Promise<string> {
|
||||
return client.wshRpcCall("getupdatechannel", null, opts);
|
||||
@ -268,7 +298,7 @@ class RpcApiType {
|
||||
}
|
||||
|
||||
// command "remotefilecopy" [call]
|
||||
RemoteFileCopyCommand(client: WshClient, data: CommandRemoteFileCopyData, opts?: RpcOpts): Promise<void> {
|
||||
RemoteFileCopyCommand(client: WshClient, data: CommandFileCopyData, opts?: RpcOpts): Promise<boolean> {
|
||||
return client.wshRpcCall("remotefilecopy", data, opts);
|
||||
}
|
||||
|
||||
@ -288,7 +318,7 @@ class RpcApiType {
|
||||
}
|
||||
|
||||
// command "remotefilemove" [call]
|
||||
RemoteFileMoveCommand(client: WshClient, data: CommandRemoteFileCopyData, opts?: RpcOpts): Promise<void> {
|
||||
RemoteFileMoveCommand(client: WshClient, data: CommandFileCopyData, opts?: RpcOpts): Promise<void> {
|
||||
return client.wshRpcCall("remotefilemove", data, opts);
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,7 @@ interface SuggestionControlProps {
|
||||
anchorRef: React.RefObject<HTMLElement>;
|
||||
isOpen: boolean;
|
||||
onClose: () => void;
|
||||
onSelect: (item: SuggestionType, queryStr: string) => void;
|
||||
onSelect: (item: SuggestionType, queryStr: string) => boolean;
|
||||
onTab?: (item: SuggestionType, queryStr: string) => string;
|
||||
fetchSuggestions: SuggestionsFnType;
|
||||
className?: string;
|
||||
@ -30,13 +30,16 @@ const SuggestionControl: React.FC<SuggestionControlProps> = ({
|
||||
isOpen,
|
||||
onClose,
|
||||
onSelect,
|
||||
onTab,
|
||||
fetchSuggestions,
|
||||
className,
|
||||
children,
|
||||
}) => {
|
||||
if (!isOpen || !anchorRef.current || !fetchSuggestions) return null;
|
||||
|
||||
return <SuggestionControlInner {...{ anchorRef, onClose, onSelect, fetchSuggestions, className, children }} />;
|
||||
return (
|
||||
<SuggestionControlInner {...{ anchorRef, onClose, onSelect, onTab, fetchSuggestions, className, children }} />
|
||||
);
|
||||
};
|
||||
|
||||
function highlightPositions(target: string, positions: number[]): ReactNode[] {
|
||||
@ -232,22 +235,44 @@ const SuggestionControlInner: React.FC<SuggestionControlInnerProps> = ({
|
||||
return () => document.removeEventListener("mousedown", handleClickOutside);
|
||||
}, [onClose, anchorRef]);
|
||||
|
||||
useEffect(() => {
|
||||
if (dropdownRef.current) {
|
||||
const children = dropdownRef.current.children;
|
||||
if (children[selectedIndex]) {
|
||||
(children[selectedIndex] as HTMLElement).scrollIntoView({
|
||||
behavior: "auto",
|
||||
block: "nearest",
|
||||
});
|
||||
}
|
||||
}
|
||||
}, [selectedIndex]);
|
||||
|
||||
const handleKeyDown = (e: React.KeyboardEvent) => {
|
||||
if (e.key === "ArrowDown") {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
setSelectedIndex((prev) => Math.min(prev + 1, suggestions.length - 1));
|
||||
} else if (e.key === "ArrowUp") {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
setSelectedIndex((prev) => Math.max(prev - 1, 0));
|
||||
} else if (e.key === "Enter" && selectedIndex >= 0) {
|
||||
} else if (e.key === "Enter") {
|
||||
e.preventDefault();
|
||||
onSelect(suggestions[selectedIndex], query);
|
||||
onClose();
|
||||
e.stopPropagation();
|
||||
let suggestion: SuggestionType = null;
|
||||
if (selectedIndex >= 0 && selectedIndex < suggestions.length) {
|
||||
suggestion = suggestions[selectedIndex];
|
||||
}
|
||||
if (onSelect(suggestion, query)) {
|
||||
onClose();
|
||||
}
|
||||
} else if (e.key === "Escape") {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
onClose();
|
||||
} else if (e.key === "Tab") {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
const suggestion = suggestions[selectedIndex];
|
||||
if (suggestion != null) {
|
||||
const tabResult = onTab?.(suggestion, query);
|
||||
@ -255,6 +280,14 @@ const SuggestionControlInner: React.FC<SuggestionControlInnerProps> = ({
|
||||
setQuery(tabResult);
|
||||
}
|
||||
}
|
||||
} else if (e.key === "PageDown") {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
setSelectedIndex((prev) => Math.min(prev + 10, suggestions.length - 1));
|
||||
} else if (e.key === "PageUp") {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
setSelectedIndex((prev) => Math.max(prev - 10, 0));
|
||||
}
|
||||
};
|
||||
return (
|
||||
|
@ -5,7 +5,8 @@ import { Button } from "@/app/element/button";
|
||||
import { modalsModel } from "@/app/store/modalmodel";
|
||||
import { WindowDrag } from "@/element/windowdrag";
|
||||
import { deleteLayoutModelForTab } from "@/layout/index";
|
||||
import { atoms, createTab, getApi, globalStore, isDev, PLATFORM, setActiveTab } from "@/store/global";
|
||||
import { atoms, createTab, getApi, globalStore, isDev, setActiveTab } from "@/store/global";
|
||||
import { PLATFORM, PlatformMacOS } from "@/util/platformutil";
|
||||
import { fireAndForget } from "@/util/util";
|
||||
import { useAtomValue } from "jotai";
|
||||
import { OverlayScrollbars } from "overlayscrollbars";
|
||||
@ -641,7 +642,7 @@ const TabBar = memo(({ workspace }: TabBarProps) => {
|
||||
</div>
|
||||
) : undefined;
|
||||
const appMenuButton =
|
||||
PLATFORM !== "darwin" && !settings["window:showmenubar"] ? (
|
||||
PLATFORM !== PlatformMacOS && !settings["window:showmenubar"] ? (
|
||||
<div ref={appMenuButtonRef} className="app-menu-button" onClick={onEllipsisClick}>
|
||||
<i className="fa fa-ellipsis" />
|
||||
</div>
|
||||
|
@ -144,7 +144,7 @@ export function CodeEditor({ blockId, text, language, filename, fileinfo, meta,
|
||||
const fileInfo = await RpcApi.RemoteFileJoinCommand(TabRpcClient, [filename], {
|
||||
route: makeConnRoute(meta.connection ?? ""),
|
||||
});
|
||||
setAbsPath(`${fileInfo.dir}/${fileInfo.name}`);
|
||||
setAbsPath(fileInfo.path);
|
||||
} catch (e) {
|
||||
setAbsPath(filename);
|
||||
}
|
||||
|
@ -212,7 +212,7 @@ const LauncherView: React.FC<ViewComponentProps<LauncherViewModel>> = ({ blockId
|
||||
value={searchTerm}
|
||||
onKeyDown={keydownWrapper(model.keyDownHandler.bind(model))}
|
||||
onChange={(e) => setSearchTerm(e.target.value)}
|
||||
className="sr-only"
|
||||
className="sr-only dummy"
|
||||
aria-label="Search widgets"
|
||||
/>
|
||||
|
||||
|
@ -2,16 +2,17 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import { Button } from "@/app/element/button";
|
||||
import { CopyButton } from "@/app/element/copybutton";
|
||||
import { Input } from "@/app/element/input";
|
||||
import { useDimensionsWithCallbackRef } from "@/app/hook/useDimensions";
|
||||
import { ContextMenuModel } from "@/app/store/contextmenu";
|
||||
import { PLATFORM, atoms, createBlock, getApi, globalStore } from "@/app/store/global";
|
||||
import { atoms, getApi, globalStore } from "@/app/store/global";
|
||||
import { RpcApi } from "@/app/store/wshclientapi";
|
||||
import { TabRpcClient } from "@/app/store/wshrpcutil";
|
||||
import type { PreviewModel } from "@/app/view/preview/preview";
|
||||
import { type PreviewModel } from "@/app/view/preview/preview";
|
||||
import { checkKeyPressed, isCharacterKeyEvent } from "@/util/keyutil";
|
||||
import { fireAndForget, isBlank, makeConnRoute, makeNativeLabel } from "@/util/util";
|
||||
import { PLATFORM, PlatformMacOS } from "@/util/platformutil";
|
||||
import { addOpenMenuItems } from "@/util/previewutil";
|
||||
import { fireAndForget, isBlank } from "@/util/util";
|
||||
import { formatRemoteUri } from "@/util/waveutil";
|
||||
import { offset, useDismiss, useFloating, useInteractions } from "@floating-ui/react";
|
||||
import {
|
||||
Column,
|
||||
@ -28,7 +29,7 @@ import clsx from "clsx";
|
||||
import dayjs from "dayjs";
|
||||
import { PrimitiveAtom, atom, useAtom, useAtomValue, useSetAtom } from "jotai";
|
||||
import { OverlayScrollbarsComponent, OverlayScrollbarsComponentRef } from "overlayscrollbars-react";
|
||||
import React, { Fragment, memo, useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||
import React, { Fragment, memo, useCallback, useEffect, useLayoutEffect, useMemo, useRef, useState } from "react";
|
||||
import { useDrag, useDrop } from "react-dnd";
|
||||
import { quote as shellQuote } from "shell-quote";
|
||||
import { debounce } from "throttle-debounce";
|
||||
@ -36,15 +37,13 @@ import "./directorypreview.scss";
|
||||
|
||||
const PageJumpSize = 20;
|
||||
|
||||
type FileCopyStatus = {
|
||||
copyData: CommandFileCopyData;
|
||||
copyError: string;
|
||||
allowRetry: boolean;
|
||||
};
|
||||
const recursiveError = "recursive flag must be set for directory operations";
|
||||
const overwriteError = "set overwrite flag to delete the existing file";
|
||||
const mergeError = "set overwrite flag to delete the existing contents or set merge flag to merge the contents";
|
||||
|
||||
declare module "@tanstack/react-table" {
|
||||
interface TableMeta<TData extends RowData> {
|
||||
updateName: (path: string) => void;
|
||||
updateName: (path: string, isDir: boolean) => void;
|
||||
newFile: () => void;
|
||||
newDirectory: () => void;
|
||||
}
|
||||
@ -212,6 +211,7 @@ function DirectoryTable({
|
||||
newDirectory,
|
||||
}: DirectoryTableProps) {
|
||||
const fullConfig = useAtomValue(atoms.fullConfigAtom);
|
||||
const setErrorMsg = useSetAtom(model.errorMsgAtom);
|
||||
const getIconFromMimeType = useCallback(
|
||||
(mimeType: string): string => {
|
||||
while (mimeType.length > 0) {
|
||||
@ -287,7 +287,7 @@ function DirectoryTable({
|
||||
|
||||
const setEntryManagerProps = useSetAtom(entryManagerOverlayPropsAtom);
|
||||
|
||||
const updateName = useCallback((path: string) => {
|
||||
const updateName = useCallback((path: string, isDir: boolean) => {
|
||||
const fileName = path.split("/").at(-1);
|
||||
setEntryManagerProps({
|
||||
entryManagerType: EntryManagerType.EditName,
|
||||
@ -298,16 +298,47 @@ function DirectoryTable({
|
||||
const lastInstance = path.lastIndexOf(fileName);
|
||||
newPath = path.substring(0, lastInstance) + newName;
|
||||
console.log(`replacing ${fileName} with ${newName}: ${path}`);
|
||||
fireAndForget(async () => {
|
||||
await RpcApi.FileMoveCommand(TabRpcClient, {
|
||||
srcuri: await model.formatRemoteUri(path, globalStore.get),
|
||||
desturi: await model.formatRemoteUri(newPath, globalStore.get),
|
||||
opts: {
|
||||
recursive: true,
|
||||
},
|
||||
const handleRename = (recursive: boolean) =>
|
||||
fireAndForget(async () => {
|
||||
try {
|
||||
let srcuri = await model.formatRemoteUri(path, globalStore.get);
|
||||
if (isDir) {
|
||||
srcuri += "/";
|
||||
}
|
||||
await RpcApi.FileMoveCommand(TabRpcClient, {
|
||||
srcuri,
|
||||
desturi: await model.formatRemoteUri(newPath, globalStore.get),
|
||||
opts: {
|
||||
recursive,
|
||||
},
|
||||
});
|
||||
} catch (e) {
|
||||
const errorText = `${e}`;
|
||||
console.warn(`Rename failed: ${errorText}`);
|
||||
let errorMsg: ErrorMsg;
|
||||
if (errorText.includes(recursiveError)) {
|
||||
errorMsg = {
|
||||
status: "Confirm Rename Directory",
|
||||
text: "Renaming a directory requires the recursive flag. Proceed?",
|
||||
level: "warning",
|
||||
buttons: [
|
||||
{
|
||||
text: "Rename Recursively",
|
||||
onClick: () => handleRename(true),
|
||||
},
|
||||
],
|
||||
};
|
||||
} else {
|
||||
errorMsg = {
|
||||
status: "Rename Failed",
|
||||
text: `${e}`,
|
||||
};
|
||||
}
|
||||
setErrorMsg(errorMsg);
|
||||
}
|
||||
model.refreshCallback();
|
||||
});
|
||||
model.refreshCallback();
|
||||
});
|
||||
handleRename(false);
|
||||
}
|
||||
setEntryManagerProps(undefined);
|
||||
},
|
||||
@ -352,7 +383,7 @@ function DirectoryTable({
|
||||
setSelectedPath((allRows[focusIndex]?.getValue("path") as string) ?? null);
|
||||
}, [table, focusIndex, data]);
|
||||
|
||||
useEffect(() => {
|
||||
useLayoutEffect(() => {
|
||||
const rows = table.getRowModel()?.flatRows;
|
||||
for (const row of rows) {
|
||||
if (row.getValue("name") == "..") {
|
||||
@ -360,7 +391,7 @@ function DirectoryTable({
|
||||
return;
|
||||
}
|
||||
}
|
||||
}, [table]);
|
||||
}, [table, data]);
|
||||
const columnSizeVars = useMemo(() => {
|
||||
const headers = table.getFlatHeaders();
|
||||
const colSizes: { [key: string]: number } = {};
|
||||
@ -454,13 +485,6 @@ function DirectoryTable({
|
||||
);
|
||||
}
|
||||
|
||||
function getNormFilePath(finfo: FileInfo): string {
|
||||
if (finfo.isdir) {
|
||||
return finfo.dir;
|
||||
}
|
||||
return finfo.dir + "/" + finfo.name;
|
||||
}
|
||||
|
||||
interface TableBodyProps {
|
||||
bodyRef: React.RefObject<HTMLDivElement>;
|
||||
model: PreviewModel;
|
||||
@ -490,6 +514,7 @@ function TableBody({
|
||||
const warningBoxRef = useRef<HTMLDivElement>();
|
||||
const rowRefs = useRef<HTMLDivElement[]>([]);
|
||||
const conn = useAtomValue(model.connection);
|
||||
const setErrorMsg = useSetAtom(model.errorMsgAtom);
|
||||
|
||||
useEffect(() => {
|
||||
if (focusIndex !== null && rowRefs.current[focusIndex] && bodyRef.current && osRef) {
|
||||
@ -524,17 +549,41 @@ function TableBody({
|
||||
if (finfo == null) {
|
||||
return;
|
||||
}
|
||||
const normPath = getNormFilePath(finfo);
|
||||
const fileName = finfo.path.split("/").pop();
|
||||
let parentFileInfo: FileInfo;
|
||||
try {
|
||||
parentFileInfo = await RpcApi.RemoteFileJoinCommand(TabRpcClient, [normPath, ".."], {
|
||||
route: makeConnRoute(conn),
|
||||
const handleFileDelete = (recursive: boolean) =>
|
||||
fireAndForget(async () => {
|
||||
const path = await model.formatRemoteUri(finfo.path, globalStore.get);
|
||||
try {
|
||||
await RpcApi.FileDeleteCommand(TabRpcClient, {
|
||||
path,
|
||||
recursive,
|
||||
});
|
||||
} catch (e) {
|
||||
const errorText = `${e}`;
|
||||
console.warn(`Delete failed: ${errorText}`);
|
||||
let errorMsg: ErrorMsg;
|
||||
if (errorText.includes(recursiveError)) {
|
||||
errorMsg = {
|
||||
status: "Confirm Delete Directory",
|
||||
text: "Deleting a directory requires the recursive flag. Proceed?",
|
||||
level: "warning",
|
||||
buttons: [
|
||||
{
|
||||
text: "Delete Recursively",
|
||||
onClick: () => handleFileDelete(true),
|
||||
},
|
||||
],
|
||||
};
|
||||
} else {
|
||||
errorMsg = {
|
||||
status: "Delete Failed",
|
||||
text: `${e}`,
|
||||
};
|
||||
}
|
||||
setErrorMsg(errorMsg);
|
||||
}
|
||||
setRefreshVersion((current) => current + 1);
|
||||
});
|
||||
} catch (e) {
|
||||
console.log("could not get parent file info. using child file info as fallback");
|
||||
parentFileInfo = finfo;
|
||||
}
|
||||
const menu: ContextMenuItem[] = [
|
||||
{
|
||||
label: "New File",
|
||||
@ -551,7 +600,7 @@ function TableBody({
|
||||
{
|
||||
label: "Rename",
|
||||
click: () => {
|
||||
table.options.meta.updateName(finfo.path);
|
||||
table.options.meta.updateName(finfo.path, finfo.isdir);
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -573,85 +622,15 @@ function TableBody({
|
||||
label: "Copy Full File Name (Shell Quoted)",
|
||||
click: () => fireAndForget(() => navigator.clipboard.writeText(shellQuote([finfo.path]))),
|
||||
},
|
||||
{
|
||||
type: "separator",
|
||||
},
|
||||
{
|
||||
label: "Download File",
|
||||
click: () => {
|
||||
getApi().downloadFile(normPath);
|
||||
},
|
||||
},
|
||||
{
|
||||
type: "separator",
|
||||
},
|
||||
{
|
||||
label: "Open Preview in New Block",
|
||||
click: () =>
|
||||
fireAndForget(async () => {
|
||||
const blockDef: BlockDef = {
|
||||
meta: {
|
||||
view: "preview",
|
||||
file: finfo.path,
|
||||
connection: conn,
|
||||
},
|
||||
};
|
||||
await createBlock(blockDef);
|
||||
}),
|
||||
},
|
||||
];
|
||||
if (!conn) {
|
||||
menu.push(
|
||||
{
|
||||
type: "separator",
|
||||
},
|
||||
// TODO: resolve correct host path if connection is WSL
|
||||
{
|
||||
label: makeNativeLabel(PLATFORM, finfo.isdir, false),
|
||||
click: () => {
|
||||
getApi().openNativePath(normPath);
|
||||
},
|
||||
},
|
||||
{
|
||||
label: makeNativeLabel(PLATFORM, true, true),
|
||||
click: () => {
|
||||
getApi().openNativePath(parentFileInfo.dir);
|
||||
},
|
||||
}
|
||||
);
|
||||
}
|
||||
if (finfo.mimetype == "directory") {
|
||||
menu.push({
|
||||
label: "Open Terminal in New Block",
|
||||
click: () =>
|
||||
fireAndForget(async () => {
|
||||
const termBlockDef: BlockDef = {
|
||||
meta: {
|
||||
controller: "shell",
|
||||
view: "term",
|
||||
"cmd:cwd": await model.formatRemoteUri(finfo.path, globalStore.get),
|
||||
connection: conn,
|
||||
},
|
||||
};
|
||||
await createBlock(termBlockDef);
|
||||
}),
|
||||
});
|
||||
}
|
||||
addOpenMenuItems(menu, conn, finfo);
|
||||
menu.push(
|
||||
{
|
||||
type: "separator",
|
||||
},
|
||||
{
|
||||
label: "Delete",
|
||||
click: () => {
|
||||
fireAndForget(async () => {
|
||||
await RpcApi.FileDeleteCommand(TabRpcClient, {
|
||||
path: await model.formatRemoteUri(finfo.path, globalStore.get),
|
||||
recursive: false,
|
||||
}).catch((e) => console.log(e));
|
||||
setRefreshVersion((current) => current + 1);
|
||||
});
|
||||
},
|
||||
click: () => handleFileDelete(false),
|
||||
}
|
||||
);
|
||||
ContextMenuModel.showContextMenu(menu, e);
|
||||
@ -683,7 +662,6 @@ function TableBody({
|
||||
setSearch={setSearch}
|
||||
idx={idx}
|
||||
handleFileContextMenu={handleFileContextMenu}
|
||||
ref={(el) => (rowRefs.current[idx] = el)}
|
||||
key={idx}
|
||||
/>
|
||||
))}
|
||||
@ -696,7 +674,6 @@ function TableBody({
|
||||
setSearch={setSearch}
|
||||
idx={idx + table.getTopRows().length}
|
||||
handleFileContextMenu={handleFileContextMenu}
|
||||
ref={(el) => (rowRefs.current[idx] = el)}
|
||||
key={idx}
|
||||
/>
|
||||
))}
|
||||
@ -715,40 +692,29 @@ type TableRowProps = {
|
||||
handleFileContextMenu: (e: any, finfo: FileInfo) => Promise<void>;
|
||||
};
|
||||
|
||||
const TableRow = React.forwardRef(function (
|
||||
{ model, row, focusIndex, setFocusIndex, setSearch, idx, handleFileContextMenu }: TableRowProps,
|
||||
ref: React.RefObject<HTMLDivElement>
|
||||
) {
|
||||
const dirPath = useAtomValue(model.normFilePath);
|
||||
const TableRow = React.forwardRef(function ({
|
||||
model,
|
||||
row,
|
||||
focusIndex,
|
||||
setFocusIndex,
|
||||
setSearch,
|
||||
idx,
|
||||
handleFileContextMenu,
|
||||
}: TableRowProps) {
|
||||
const dirPath = useAtomValue(model.statFilePath);
|
||||
const connection = useAtomValue(model.connection);
|
||||
const formatRemoteUri = useCallback(
|
||||
(path: string) => {
|
||||
let conn: string;
|
||||
if (!connection) {
|
||||
conn = "local";
|
||||
} else {
|
||||
conn = connection;
|
||||
}
|
||||
return `wsh://${conn}/${path}`;
|
||||
},
|
||||
[connection]
|
||||
);
|
||||
|
||||
const dragItem: DraggedFile = {
|
||||
relName: row.getValue("name") as string,
|
||||
absParent: dirPath,
|
||||
uri: formatRemoteUri(row.getValue("path") as string),
|
||||
uri: formatRemoteUri(row.getValue("path") as string, connection),
|
||||
isDir: row.original.isdir,
|
||||
};
|
||||
const [{ isDragging }, drag, dragPreview] = useDrag(
|
||||
const [_, drag] = useDrag(
|
||||
() => ({
|
||||
type: "FILE_ITEM",
|
||||
canDrag: true,
|
||||
item: () => dragItem,
|
||||
collect: (monitor) => {
|
||||
return {
|
||||
isDragging: monitor.isDragging(),
|
||||
};
|
||||
},
|
||||
}),
|
||||
[dragItem]
|
||||
);
|
||||
@ -791,14 +757,14 @@ function DirectoryPreview({ model }: DirectoryPreviewProps) {
|
||||
const [searchText, setSearchText] = useState("");
|
||||
const [focusIndex, setFocusIndex] = useState(0);
|
||||
const [unfilteredData, setUnfilteredData] = useState<FileInfo[]>([]);
|
||||
const [filteredData, setFilteredData] = useState<FileInfo[]>([]);
|
||||
const showHiddenFiles = useAtomValue(model.showHiddenFiles);
|
||||
const [selectedPath, setSelectedPath] = useState("");
|
||||
const [refreshVersion, setRefreshVersion] = useAtom(model.refreshVersion);
|
||||
const conn = useAtomValue(model.connection);
|
||||
const blockData = useAtomValue(model.blockAtom);
|
||||
const dirPath = useAtomValue(model.normFilePath);
|
||||
const [copyStatus, setCopyStatus] = useState<FileCopyStatus>(null);
|
||||
const finfo = useAtomValue(model.statFile);
|
||||
const dirPath = finfo?.path;
|
||||
const setErrorMsg = useSetAtom(model.errorMsgAtom);
|
||||
|
||||
useEffect(() => {
|
||||
model.refreshCallback = () => {
|
||||
@ -809,31 +775,55 @@ function DirectoryPreview({ model }: DirectoryPreviewProps) {
|
||||
};
|
||||
}, [setRefreshVersion]);
|
||||
|
||||
useEffect(() => {
|
||||
const getContent = async () => {
|
||||
const file = await RpcApi.FileReadCommand(
|
||||
TabRpcClient,
|
||||
{
|
||||
info: {
|
||||
path: await model.formatRemoteUri(dirPath, globalStore.get),
|
||||
},
|
||||
},
|
||||
null
|
||||
);
|
||||
setUnfilteredData(file.entries);
|
||||
};
|
||||
getContent();
|
||||
}, [conn, dirPath, refreshVersion]);
|
||||
useEffect(
|
||||
() =>
|
||||
fireAndForget(async () => {
|
||||
let entries: FileInfo[];
|
||||
try {
|
||||
const file = await RpcApi.FileReadCommand(
|
||||
TabRpcClient,
|
||||
{
|
||||
info: {
|
||||
path: await model.formatRemoteUri(dirPath, globalStore.get),
|
||||
},
|
||||
},
|
||||
null
|
||||
);
|
||||
entries = file.entries ?? [];
|
||||
if (file?.info && file.info.dir && file.info?.path !== file.info?.dir) {
|
||||
entries.unshift({
|
||||
name: "..",
|
||||
path: file?.info?.dir,
|
||||
isdir: true,
|
||||
modtime: new Date().getTime(),
|
||||
mimetype: "directory",
|
||||
});
|
||||
}
|
||||
} catch (e) {
|
||||
setErrorMsg({
|
||||
status: "Cannot Read Directory",
|
||||
text: `${e}`,
|
||||
});
|
||||
}
|
||||
setUnfilteredData(entries);
|
||||
}),
|
||||
[conn, dirPath, refreshVersion]
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
const filtered = unfilteredData?.filter((fileInfo) => {
|
||||
if (!showHiddenFiles && fileInfo.name.startsWith(".") && fileInfo.name != "..") {
|
||||
return false;
|
||||
}
|
||||
return fileInfo.name.toLowerCase().includes(searchText);
|
||||
});
|
||||
setFilteredData(filtered ?? []);
|
||||
}, [unfilteredData, showHiddenFiles, searchText]);
|
||||
const filteredData = useMemo(
|
||||
() =>
|
||||
unfilteredData?.filter((fileInfo) => {
|
||||
if (fileInfo.name == null) {
|
||||
console.log("fileInfo.name is null", fileInfo);
|
||||
return false;
|
||||
}
|
||||
if (!showHiddenFiles && fileInfo.name.startsWith(".") && fileInfo.name != "..") {
|
||||
return false;
|
||||
}
|
||||
return fileInfo.name.toLowerCase().includes(searchText);
|
||||
}) ?? [],
|
||||
[unfilteredData, showHiddenFiles, searchText]
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
model.directoryKeyDownHandler = (waveEvent: WaveKeyboardEvent): boolean => {
|
||||
@ -875,7 +865,7 @@ function DirectoryPreview({ model }: DirectoryPreviewProps) {
|
||||
if (
|
||||
checkKeyPressed(waveEvent, "Space") &&
|
||||
searchText == "" &&
|
||||
PLATFORM == "darwin" &&
|
||||
PLATFORM == PlatformMacOS &&
|
||||
!blockData?.meta?.connection
|
||||
) {
|
||||
getApi().onQuicklook(selectedPath);
|
||||
@ -910,24 +900,48 @@ function DirectoryPreview({ model }: DirectoryPreviewProps) {
|
||||
});
|
||||
|
||||
const handleDropCopy = useCallback(
|
||||
async (data: CommandFileCopyData) => {
|
||||
async (data: CommandFileCopyData, isDir: boolean) => {
|
||||
try {
|
||||
await RpcApi.FileCopyCommand(TabRpcClient, data, { timeout: data.opts.timeout });
|
||||
setCopyStatus(null);
|
||||
} catch (e) {
|
||||
console.log("copy failed:", e);
|
||||
console.warn("Copy failed:", e);
|
||||
const copyError = `${e}`;
|
||||
const allowRetry = copyError.endsWith("overwrite not specified");
|
||||
const copyStatus: FileCopyStatus = {
|
||||
copyError,
|
||||
copyData: data,
|
||||
allowRetry,
|
||||
};
|
||||
setCopyStatus(copyStatus);
|
||||
const allowRetry = copyError.includes(overwriteError) || copyError.includes(mergeError);
|
||||
let errorMsg: ErrorMsg;
|
||||
if (allowRetry) {
|
||||
errorMsg = {
|
||||
status: "Confirm Overwrite File(s)",
|
||||
text: "This copy operation will overwrite an existing file. Would you like to continue?",
|
||||
level: "warning",
|
||||
buttons: [
|
||||
{
|
||||
text: "Delete Then Copy",
|
||||
onClick: async () => {
|
||||
data.opts.overwrite = true;
|
||||
await handleDropCopy(data, isDir);
|
||||
},
|
||||
},
|
||||
{
|
||||
text: "Sync",
|
||||
onClick: async () => {
|
||||
data.opts.merge = true;
|
||||
await handleDropCopy(data, isDir);
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
} else {
|
||||
errorMsg = {
|
||||
status: "Copy Failed",
|
||||
text: copyError,
|
||||
level: "error",
|
||||
};
|
||||
}
|
||||
setErrorMsg(errorMsg);
|
||||
}
|
||||
model.refreshCallback();
|
||||
},
|
||||
[setCopyStatus, model.refreshCallback]
|
||||
[model.refreshCallback]
|
||||
);
|
||||
|
||||
const [, drop] = useDrop(
|
||||
@ -955,12 +969,12 @@ function DirectoryPreview({ model }: DirectoryPreviewProps) {
|
||||
desturi,
|
||||
opts,
|
||||
};
|
||||
await handleDropCopy(data);
|
||||
await handleDropCopy(data, draggedFile.isDir);
|
||||
}
|
||||
},
|
||||
// TODO: mabe add a hover option?
|
||||
}),
|
||||
[dirPath, model.formatRemoteUri, model.refreshCallback, setCopyStatus]
|
||||
[dirPath, model.formatRemoteUri, model.refreshCallback]
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
@ -1030,29 +1044,7 @@ function DirectoryPreview({ model }: DirectoryPreviewProps) {
|
||||
type: "separator",
|
||||
},
|
||||
];
|
||||
if (!conn) {
|
||||
// TODO: resolve correct host path if connection is WSL
|
||||
menu.push({
|
||||
label: makeNativeLabel(PLATFORM, true, true),
|
||||
click: () => {
|
||||
getApi().openNativePath(dirPath);
|
||||
},
|
||||
});
|
||||
}
|
||||
menu.push({
|
||||
label: "Open Terminal in New Block",
|
||||
click: async () => {
|
||||
const termBlockDef: BlockDef = {
|
||||
meta: {
|
||||
controller: "shell",
|
||||
view: "term",
|
||||
"cmd:cwd": dirPath,
|
||||
connection: conn,
|
||||
},
|
||||
};
|
||||
await createBlock(termBlockDef);
|
||||
},
|
||||
});
|
||||
addOpenMenuItems(menu, conn, finfo);
|
||||
|
||||
ContextMenuModel.showContextMenu(menu, e);
|
||||
},
|
||||
@ -1074,13 +1066,6 @@ function DirectoryPreview({ model }: DirectoryPreviewProps) {
|
||||
onContextMenu={(e) => handleFileContextMenu(e)}
|
||||
onClick={() => setEntryManagerProps(undefined)}
|
||||
>
|
||||
{copyStatus != null && (
|
||||
<CopyErrorOverlay
|
||||
copyStatus={copyStatus}
|
||||
setCopyStatus={setCopyStatus}
|
||||
handleDropCopy={handleDropCopy}
|
||||
/>
|
||||
)}
|
||||
<DirectoryTable
|
||||
model={model}
|
||||
data={filteredData}
|
||||
@ -1108,102 +1093,4 @@ function DirectoryPreview({ model }: DirectoryPreviewProps) {
|
||||
);
|
||||
}
|
||||
|
||||
const CopyErrorOverlay = React.memo(
|
||||
({
|
||||
copyStatus,
|
||||
setCopyStatus,
|
||||
handleDropCopy,
|
||||
}: {
|
||||
copyStatus: FileCopyStatus;
|
||||
setCopyStatus: (_: FileCopyStatus) => void;
|
||||
handleDropCopy: (data: CommandFileCopyData) => Promise<void>;
|
||||
}) => {
|
||||
const [overlayRefCallback, _, domRect] = useDimensionsWithCallbackRef(30);
|
||||
const width = domRect?.width;
|
||||
|
||||
const handleRetryCopy = React.useCallback(async () => {
|
||||
if (!copyStatus) {
|
||||
return;
|
||||
}
|
||||
const updatedData = {
|
||||
...copyStatus.copyData,
|
||||
opts: { ...copyStatus.copyData.opts, overwrite: true },
|
||||
};
|
||||
await handleDropCopy(updatedData);
|
||||
}, [copyStatus.copyData]);
|
||||
|
||||
let statusText = "Copy Error";
|
||||
let errorMsg = `error: ${copyStatus?.copyError}`;
|
||||
if (copyStatus?.allowRetry) {
|
||||
statusText = "Confirm Overwrite File(s)";
|
||||
errorMsg = "This copy operation will overwrite an existing file. Would you like to continue?";
|
||||
}
|
||||
|
||||
const buttonClassName = "outlined grey font-size-11 vertical-padding-3 horizontal-padding-7";
|
||||
|
||||
const handleRemoveCopyError = React.useCallback(async () => {
|
||||
setCopyStatus(null);
|
||||
}, [setCopyStatus]);
|
||||
|
||||
const handleCopyToClipboard = React.useCallback(async () => {
|
||||
await navigator.clipboard.writeText(errorMsg);
|
||||
}, [errorMsg]);
|
||||
|
||||
return (
|
||||
<div
|
||||
ref={overlayRefCallback}
|
||||
className="absolute top-[0] left-1.5 right-1.5 z-[var(--zindex-block-mask-inner)] overflow-hidden bg-[var(--conn-status-overlay-bg-color)] backdrop-blur-[50px] rounded-md shadow-lg"
|
||||
>
|
||||
<div className="flex flex-row justify-between p-2.5 pl-3 font-[var(--base-font)] text-[var(--secondary-text-color)]">
|
||||
<div
|
||||
className={clsx("flex flex-row items-center gap-3 grow min-w-0", {
|
||||
"items-start": true,
|
||||
})}
|
||||
>
|
||||
<i className="fa-solid fa-triangle-exclamation text-[#e6ba1e] text-base"></i>
|
||||
|
||||
<div className="flex flex-col items-start gap-1 grow w-full">
|
||||
<div className="max-w-full text-xs font-semibold leading-4 tracking-[0.11px] text-white">
|
||||
{statusText}
|
||||
</div>
|
||||
|
||||
<OverlayScrollbarsComponent
|
||||
className="group text-xs font-normal leading-[15px] tracking-[0.11px] text-wrap max-h-20 rounded-lg py-1.5 pl-0 relative w-full"
|
||||
options={{ scrollbars: { autoHide: "leave" } }}
|
||||
>
|
||||
<CopyButton
|
||||
className="invisible group-hover:visible flex absolute top-0 right-1 rounded backdrop-blur-lg p-1 items-center justify-end gap-1"
|
||||
onClick={handleCopyToClipboard}
|
||||
title="Copy"
|
||||
/>
|
||||
<div>{errorMsg}</div>
|
||||
</OverlayScrollbarsComponent>
|
||||
|
||||
{copyStatus?.allowRetry && (
|
||||
<div className="flex flex-row gap-1.5">
|
||||
<Button className={buttonClassName} onClick={handleRetryCopy}>
|
||||
Override
|
||||
</Button>
|
||||
<Button className={buttonClassName} onClick={handleRemoveCopyError}>
|
||||
Cancel
|
||||
</Button>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{!copyStatus?.allowRetry && (
|
||||
<div className="flex items-start">
|
||||
<Button
|
||||
className={clsx(buttonClassName, "fa-xmark fa-solid")}
|
||||
onClick={handleRemoveCopyError}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
export { DirectoryPreview };
|
||||
|
@ -3,8 +3,8 @@
|
||||
|
||||
import { BlockNodeModel } from "@/app/block/blocktypes";
|
||||
import { Button } from "@/app/element/button";
|
||||
import { CopyButton } from "@/app/element/copybutton";
|
||||
import { CenteredDiv } from "@/app/element/quickelems";
|
||||
import { TypeAheadModal } from "@/app/modals/typeaheadmodal";
|
||||
import { ContextMenuModel } from "@/app/store/contextmenu";
|
||||
import { tryReinjectKey } from "@/app/store/keymodel";
|
||||
import { RpcApi } from "@/app/store/wshclientapi";
|
||||
@ -12,37 +12,22 @@ import { TabRpcClient } from "@/app/store/wshrpcutil";
|
||||
import { BlockHeaderSuggestionControl } from "@/app/suggestion/suggestion";
|
||||
import { CodeEditor } from "@/app/view/codeeditor/codeeditor";
|
||||
import { Markdown } from "@/element/markdown";
|
||||
import {
|
||||
atoms,
|
||||
createBlock,
|
||||
getApi,
|
||||
getConnStatusAtom,
|
||||
getOverrideConfigAtom,
|
||||
getSettingsKeyAtom,
|
||||
globalStore,
|
||||
PLATFORM,
|
||||
refocusNode,
|
||||
} from "@/store/global";
|
||||
import { getConnStatusAtom, getOverrideConfigAtom, getSettingsKeyAtom, globalStore, refocusNode } from "@/store/global";
|
||||
import * as services from "@/store/services";
|
||||
import * as WOS from "@/store/wos";
|
||||
import { getWebServerEndpoint } from "@/util/endpoints";
|
||||
import { goHistory, goHistoryBack, goHistoryForward } from "@/util/historyutil";
|
||||
import { adaptFromReactOrNativeKeyEvent, checkKeyPressed, keydownWrapper } from "@/util/keyutil";
|
||||
import {
|
||||
base64ToString,
|
||||
fireAndForget,
|
||||
isBlank,
|
||||
jotaiLoadableValue,
|
||||
makeConnRoute,
|
||||
makeNativeLabel,
|
||||
stringToBase64,
|
||||
} from "@/util/util";
|
||||
import { adaptFromReactOrNativeKeyEvent, checkKeyPressed } from "@/util/keyutil";
|
||||
import { addOpenMenuItems } from "@/util/previewutil";
|
||||
import { base64ToString, fireAndForget, isBlank, jotaiLoadableValue, makeConnRoute, stringToBase64 } from "@/util/util";
|
||||
import { formatRemoteUri } from "@/util/waveutil";
|
||||
import { Monaco } from "@monaco-editor/react";
|
||||
import clsx from "clsx";
|
||||
import { Atom, atom, Getter, PrimitiveAtom, useAtomValue, useSetAtom, WritableAtom } from "jotai";
|
||||
import { Atom, atom, Getter, PrimitiveAtom, useAtom, useAtomValue, useSetAtom, WritableAtom } from "jotai";
|
||||
import { loadable } from "jotai/utils";
|
||||
import type * as MonacoTypes from "monaco-editor/esm/vs/editor/editor.api";
|
||||
import { createRef, memo, useCallback, useEffect, useMemo, useState } from "react";
|
||||
import { OverlayScrollbarsComponent } from "overlayscrollbars-react";
|
||||
import { createRef, memo, useCallback, useEffect, useMemo } from "react";
|
||||
import { TransformComponent, TransformWrapper, useControls } from "react-zoom-pan-pinch";
|
||||
import { CSVView } from "./csvview";
|
||||
import { DirectoryPreview } from "./directorypreview";
|
||||
@ -148,8 +133,6 @@ export class PreviewModel implements ViewModel {
|
||||
|
||||
metaFilePath: Atom<string>;
|
||||
statFilePath: Atom<Promise<string>>;
|
||||
normFilePath: Atom<Promise<string>>;
|
||||
loadableStatFilePath: Atom<Loadable<string>>;
|
||||
loadableFileInfo: Atom<Loadable<FileInfo>>;
|
||||
connection: Atom<Promise<string>>;
|
||||
connectionImmediate: Atom<string>;
|
||||
@ -161,6 +144,7 @@ export class PreviewModel implements ViewModel {
|
||||
fileContent: WritableAtom<Promise<string>, [string], void>;
|
||||
newFileContent: PrimitiveAtom<string | null>;
|
||||
connectionError: PrimitiveAtom<string>;
|
||||
errorMsgAtom: PrimitiveAtom<ErrorMsg>;
|
||||
|
||||
openFileModal: PrimitiveAtom<boolean>;
|
||||
openFileModalDelay: PrimitiveAtom<boolean>;
|
||||
@ -177,6 +161,8 @@ export class PreviewModel implements ViewModel {
|
||||
directoryKeyDownHandler: (waveEvent: WaveKeyboardEvent) => boolean;
|
||||
codeEditKeyDownHandler: (waveEvent: WaveKeyboardEvent) => boolean;
|
||||
|
||||
showS3 = atom(true);
|
||||
|
||||
constructor(blockId: string, nodeModel: BlockNodeModel) {
|
||||
this.viewType = "preview";
|
||||
this.blockId = blockId;
|
||||
@ -195,6 +181,7 @@ export class PreviewModel implements ViewModel {
|
||||
this.filterOutNowsh = atom(true);
|
||||
this.monacoRef = createRef();
|
||||
this.connectionError = atom("");
|
||||
this.errorMsgAtom = atom(null) as PrimitiveAtom<ErrorMsg | null>;
|
||||
this.viewIcon = atom((get) => {
|
||||
const blockData = get(this.blockAtom);
|
||||
if (blockData?.meta?.icon) {
|
||||
@ -244,10 +231,12 @@ export class PreviewModel implements ViewModel {
|
||||
if (loadableFileInfo.state == "hasData") {
|
||||
headerPath = loadableFileInfo.data?.path;
|
||||
if (headerPath == "~") {
|
||||
headerPath = `~ (${loadableFileInfo.data?.dir})`;
|
||||
headerPath = `~ (${loadableFileInfo.data?.dir + "/" + loadableFileInfo.data?.name})`;
|
||||
}
|
||||
}
|
||||
|
||||
if (!isBlank(headerPath) && headerPath != "/" && headerPath.endsWith("/")) {
|
||||
headerPath = headerPath.slice(0, -1);
|
||||
}
|
||||
const viewTextChildren: HeaderElem[] = [
|
||||
{
|
||||
elemtype: "text",
|
||||
@ -342,7 +331,6 @@ export class PreviewModel implements ViewModel {
|
||||
const isCeView = loadableSV.state == "hasData" && loadableSV.data.specializedView == "codeedit";
|
||||
if (mimeType == "directory") {
|
||||
const showHiddenFiles = get(this.showHiddenFiles);
|
||||
const settings = get(atoms.settingsAtom);
|
||||
return [
|
||||
{
|
||||
elemtype: "iconbutton",
|
||||
@ -380,17 +368,6 @@ export class PreviewModel implements ViewModel {
|
||||
const fileInfo = await get(this.statFile);
|
||||
return fileInfo?.path;
|
||||
});
|
||||
this.normFilePath = atom<Promise<string>>(async (get) => {
|
||||
const fileInfo = await get(this.statFile);
|
||||
if (fileInfo == null) {
|
||||
return null;
|
||||
}
|
||||
if (fileInfo.isdir) {
|
||||
return fileInfo.dir + "/";
|
||||
}
|
||||
return fileInfo.dir + "/" + fileInfo.name;
|
||||
});
|
||||
this.loadableStatFilePath = loadable(this.statFilePath);
|
||||
this.connection = atom<Promise<string>>(async (get) => {
|
||||
const connName = get(this.blockAtom)?.meta?.connection;
|
||||
try {
|
||||
@ -406,16 +383,24 @@ export class PreviewModel implements ViewModel {
|
||||
});
|
||||
this.statFile = atom<Promise<FileInfo>>(async (get) => {
|
||||
const fileName = get(this.metaFilePath);
|
||||
const path = await this.formatRemoteUri(fileName, get);
|
||||
if (fileName == null) {
|
||||
return null;
|
||||
}
|
||||
const statFile = await RpcApi.FileInfoCommand(TabRpcClient, {
|
||||
info: {
|
||||
path: await this.formatRemoteUri(fileName, get),
|
||||
},
|
||||
});
|
||||
console.log("stat file", statFile);
|
||||
return statFile;
|
||||
try {
|
||||
const statFile = await RpcApi.FileInfoCommand(TabRpcClient, {
|
||||
info: {
|
||||
path,
|
||||
},
|
||||
});
|
||||
return statFile;
|
||||
} catch (e) {
|
||||
const errorStatus: ErrorMsg = {
|
||||
status: "File Read Failed",
|
||||
text: `${e}`,
|
||||
};
|
||||
globalStore.set(this.errorMsgAtom, errorStatus);
|
||||
}
|
||||
});
|
||||
this.fileMimeType = atom<Promise<string>>(async (get) => {
|
||||
const fileInfo = await get(this.statFile);
|
||||
@ -427,22 +412,29 @@ export class PreviewModel implements ViewModel {
|
||||
|
||||
const fullFileAtom = atom<Promise<FileData>>(async (get) => {
|
||||
const fileName = get(this.metaFilePath);
|
||||
const path = await this.formatRemoteUri(fileName, get);
|
||||
if (fileName == null) {
|
||||
return null;
|
||||
}
|
||||
const file = await RpcApi.FileReadCommand(TabRpcClient, {
|
||||
info: {
|
||||
path: await this.formatRemoteUri(fileName, get),
|
||||
},
|
||||
});
|
||||
console.log("full file", file);
|
||||
return file;
|
||||
try {
|
||||
const file = await RpcApi.FileReadCommand(TabRpcClient, {
|
||||
info: {
|
||||
path,
|
||||
},
|
||||
});
|
||||
return file;
|
||||
} catch (e) {
|
||||
const errorStatus: ErrorMsg = {
|
||||
status: "File Read Failed",
|
||||
text: `${e}`,
|
||||
};
|
||||
globalStore.set(this.errorMsgAtom, errorStatus);
|
||||
}
|
||||
});
|
||||
|
||||
this.fileContentSaved = atom(null) as PrimitiveAtom<string | null>;
|
||||
const fileContentAtom = atom(
|
||||
async (get) => {
|
||||
const _ = get(this.metaFilePath);
|
||||
const newContent = get(this.newFileContent);
|
||||
if (newContent != null) {
|
||||
return newContent;
|
||||
@ -489,17 +481,17 @@ export class PreviewModel implements ViewModel {
|
||||
async getSpecializedView(getFn: Getter): Promise<{ specializedView?: string; errorStr?: string }> {
|
||||
const mimeType = await getFn(this.fileMimeType);
|
||||
const fileInfo = await getFn(this.statFile);
|
||||
const fileName = await getFn(this.statFilePath);
|
||||
const fileName = fileInfo?.name;
|
||||
const connErr = getFn(this.connectionError);
|
||||
const editMode = getFn(this.editMode);
|
||||
const parentFileInfo = await this.getParentInfo(fileInfo);
|
||||
const genErr = getFn(this.errorMsgAtom);
|
||||
|
||||
if (!fileInfo) {
|
||||
return { errorStr: `Load Error: ${genErr?.text}` };
|
||||
}
|
||||
if (connErr != "") {
|
||||
return { errorStr: `Connection Error: ${connErr}` };
|
||||
}
|
||||
if (parentFileInfo?.notfound ?? false) {
|
||||
return { errorStr: `Parent Directory Not Found: ${fileInfo.path}` };
|
||||
}
|
||||
if (fileInfo?.notfound) {
|
||||
return { specializedView: "codeedit" };
|
||||
}
|
||||
@ -582,19 +574,6 @@ export class PreviewModel implements ViewModel {
|
||||
globalStore.set(this.newFileContent, null);
|
||||
}
|
||||
|
||||
async getParentInfo(fileInfo: FileInfo): Promise<FileInfo | undefined> {
|
||||
const conn = await globalStore.get(this.connection);
|
||||
try {
|
||||
const parentFileInfo = await RpcApi.RemoteFileJoinCommand(TabRpcClient, [fileInfo.path, ".."], {
|
||||
route: makeConnRoute(conn),
|
||||
});
|
||||
console.log("parent file info", parentFileInfo);
|
||||
return parentFileInfo;
|
||||
} catch {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
async goParentDirectory({ fileInfo = null }: { fileInfo?: FileInfo | null }) {
|
||||
// optional parameter needed for recursive case
|
||||
const defaultFileInfo = await globalStore.get(this.statFile);
|
||||
@ -605,22 +584,13 @@ export class PreviewModel implements ViewModel {
|
||||
this.updateOpenFileModalAndError(false);
|
||||
return true;
|
||||
}
|
||||
const conn = await globalStore.get(this.connection);
|
||||
try {
|
||||
const newFileInfo = await RpcApi.RemoteFileJoinCommand(TabRpcClient, [fileInfo.path, ".."], {
|
||||
route: makeConnRoute(conn),
|
||||
});
|
||||
if (newFileInfo.path != "" && newFileInfo.notfound) {
|
||||
console.log("does not exist, ", newFileInfo.path);
|
||||
this.goParentDirectory({ fileInfo: newFileInfo });
|
||||
return;
|
||||
}
|
||||
this.updateOpenFileModalAndError(false);
|
||||
await this.goHistory(newFileInfo.path);
|
||||
await this.goHistory(fileInfo.dir);
|
||||
refocusNode(this.blockId);
|
||||
} catch (e) {
|
||||
globalStore.set(this.openFileError, e.message);
|
||||
console.error("Error opening file", [fileInfo.dir, ".."], e);
|
||||
console.error("Error opening file", fileInfo.dir, e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -674,8 +644,12 @@ export class PreviewModel implements ViewModel {
|
||||
globalStore.set(this.fileContent, newFileContent);
|
||||
globalStore.set(this.newFileContent, null);
|
||||
console.log("saved file", filePath);
|
||||
} catch (error) {
|
||||
console.error("Error saving file:", error);
|
||||
} catch (e) {
|
||||
const errorStatus: ErrorMsg = {
|
||||
status: "Save Failed",
|
||||
text: `${e}`,
|
||||
};
|
||||
globalStore.set(this.errorMsgAtom, errorStatus);
|
||||
}
|
||||
}
|
||||
|
||||
@ -686,22 +660,23 @@ export class PreviewModel implements ViewModel {
|
||||
}
|
||||
|
||||
async handleOpenFile(filePath: string) {
|
||||
const conn = globalStore.get(this.connectionImmediate);
|
||||
if (!isBlank(conn) && conn.startsWith("aws:")) {
|
||||
if (!isBlank(filePath) && filePath != "/" && filePath.startsWith("/")) {
|
||||
filePath = filePath.substring(1);
|
||||
}
|
||||
}
|
||||
const fileInfo = await globalStore.get(this.statFile);
|
||||
this.updateOpenFileModalAndError(false);
|
||||
if (fileInfo == null) {
|
||||
this.updateOpenFileModalAndError(false);
|
||||
return true;
|
||||
}
|
||||
const conn = await globalStore.get(this.connection);
|
||||
try {
|
||||
const newFileInfo = await RpcApi.RemoteFileJoinCommand(TabRpcClient, [fileInfo.dir, filePath], {
|
||||
route: makeConnRoute(conn),
|
||||
});
|
||||
this.updateOpenFileModalAndError(false);
|
||||
this.goHistory(newFileInfo.path);
|
||||
this.goHistory(filePath);
|
||||
refocusNode(this.blockId);
|
||||
} catch (e) {
|
||||
globalStore.set(this.openFileError, e.message);
|
||||
console.error("Error opening file", fileInfo.dir, filePath, e);
|
||||
console.error("Error opening file", filePath, e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -716,11 +691,18 @@ export class PreviewModel implements ViewModel {
|
||||
label: "Copy Full Path",
|
||||
click: () =>
|
||||
fireAndForget(async () => {
|
||||
const filePath = await globalStore.get(this.normFilePath);
|
||||
const filePath = await globalStore.get(this.statFilePath);
|
||||
if (filePath == null) {
|
||||
return;
|
||||
}
|
||||
await navigator.clipboard.writeText(filePath);
|
||||
const conn = await globalStore.get(this.connection);
|
||||
if (conn) {
|
||||
// remote path
|
||||
await navigator.clipboard.writeText(formatRemoteUri(filePath, conn));
|
||||
} else {
|
||||
// local path
|
||||
await navigator.clipboard.writeText(filePath);
|
||||
}
|
||||
}),
|
||||
});
|
||||
menuItems.push({
|
||||
@ -734,47 +716,8 @@ export class PreviewModel implements ViewModel {
|
||||
await navigator.clipboard.writeText(fileInfo.name);
|
||||
}),
|
||||
});
|
||||
const mimeType = jotaiLoadableValue(globalStore.get(this.fileMimeTypeLoadable), "");
|
||||
if (mimeType == "directory") {
|
||||
menuItems.push({
|
||||
label: "Open Terminal in New Block",
|
||||
click: () =>
|
||||
fireAndForget(async () => {
|
||||
const conn = await globalStore.get(this.connection);
|
||||
const fileInfo = await globalStore.get(this.statFile);
|
||||
const termBlockDef: BlockDef = {
|
||||
meta: {
|
||||
view: "term",
|
||||
controller: "shell",
|
||||
"cmd:cwd": fileInfo.dir,
|
||||
connection: conn,
|
||||
},
|
||||
};
|
||||
await createBlock(termBlockDef);
|
||||
}),
|
||||
});
|
||||
const conn = globalStore.get(this.connectionImmediate);
|
||||
if (!conn) {
|
||||
menuItems.push({
|
||||
label: makeNativeLabel(PLATFORM, true, true),
|
||||
click: async () => {
|
||||
const fileInfo = await globalStore.get(this.statFile);
|
||||
getApi().openNativePath(fileInfo.dir);
|
||||
},
|
||||
});
|
||||
}
|
||||
} else {
|
||||
const conn = globalStore.get(this.connectionImmediate);
|
||||
if (!conn) {
|
||||
menuItems.push({
|
||||
label: makeNativeLabel(PLATFORM, false, false),
|
||||
click: async () => {
|
||||
const fileInfo = await globalStore.get(this.statFile);
|
||||
getApi().openNativePath(`${fileInfo.dir}/${fileInfo.name}`);
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
const finfo = jotaiLoadableValue(globalStore.get(this.loadableFileInfo), null);
|
||||
addOpenMenuItems(menuItems, globalStore.get(this.connectionImmediate), finfo);
|
||||
const loadableSV = globalStore.get(this.loadableSpecializedView);
|
||||
const wordWrapAtom = getOverrideConfigAtom(this.blockId, "editor:wordwrap");
|
||||
const wordWrap = globalStore.get(wordWrapAtom) ?? false;
|
||||
@ -864,8 +807,7 @@ export class PreviewModel implements ViewModel {
|
||||
}
|
||||
|
||||
async formatRemoteUri(path: string, get: Getter): Promise<string> {
|
||||
const conn = (await get(this.connection)) ?? "local";
|
||||
return `wsh://${conn}/${path}`;
|
||||
return formatRemoteUri(path, await get(this.connection));
|
||||
}
|
||||
}
|
||||
|
||||
@ -932,13 +874,14 @@ function StreamingPreview({ model }: SpecializedViewProps) {
|
||||
const conn = useAtomValue(model.connection);
|
||||
const fileInfo = useAtomValue(model.statFile);
|
||||
const filePath = fileInfo.path;
|
||||
const remotePath = formatRemoteUri(filePath, conn);
|
||||
const usp = new URLSearchParams();
|
||||
usp.set("path", filePath);
|
||||
usp.set("path", remotePath);
|
||||
if (conn != null) {
|
||||
usp.set("connection", conn);
|
||||
}
|
||||
const streamingUrl = getWebServerEndpoint() + "/wave/stream-file?" + usp.toString();
|
||||
if (fileInfo.mimetype == "application/pdf") {
|
||||
const streamingUrl = `${getWebServerEndpoint()}/wave/stream-file?${usp.toString()}`;
|
||||
if (fileInfo.mimetype === "application/pdf") {
|
||||
return (
|
||||
<div className="view-preview view-preview-pdf">
|
||||
<iframe src={streamingUrl} width="100%" height="100%" name="pdfview" />
|
||||
@ -972,8 +915,8 @@ function StreamingPreview({ model }: SpecializedViewProps) {
|
||||
function CodeEditPreview({ model }: SpecializedViewProps) {
|
||||
const fileContent = useAtomValue(model.fileContent);
|
||||
const setNewFileContent = useSetAtom(model.newFileContent);
|
||||
const fileName = useAtomValue(model.statFilePath);
|
||||
const fileInfo = useAtomValue(model.statFile);
|
||||
const fileName = fileInfo?.name;
|
||||
const blockMeta = useAtomValue(model.blockAtom)?.meta;
|
||||
|
||||
function codeEditKeyDownHandler(e: WaveKeyboardEvent): boolean {
|
||||
@ -1071,6 +1014,7 @@ const SpecializedView = memo(({ parentRef, model }: SpecializedViewProps) => {
|
||||
const specializedView = useAtomValue(model.specializedView);
|
||||
const mimeType = useAtomValue(model.fileMimeType);
|
||||
const setCanPreview = useSetAtom(model.canPreview);
|
||||
const path = useAtomValue(model.statFilePath);
|
||||
|
||||
useEffect(() => {
|
||||
setCanPreview(canPreview(mimeType));
|
||||
@ -1083,7 +1027,7 @@ const SpecializedView = memo(({ parentRef, model }: SpecializedViewProps) => {
|
||||
if (!SpecializedViewComponent) {
|
||||
return <CenteredDiv>Invalid Specialzied View Component ({specializedView.specializedView})</CenteredDiv>;
|
||||
}
|
||||
return <SpecializedViewComponent model={model} parentRef={parentRef} />;
|
||||
return <SpecializedViewComponent key={path} model={model} parentRef={parentRef} />;
|
||||
});
|
||||
|
||||
const fetchSuggestions = async (
|
||||
@ -1091,28 +1035,33 @@ const fetchSuggestions = async (
|
||||
query: string,
|
||||
reqContext: SuggestionRequestContext
|
||||
): Promise<FetchSuggestionsResponse> => {
|
||||
const conn = await globalStore.get(model.connection);
|
||||
let route = makeConnRoute(conn);
|
||||
if (isBlank(conn) || conn.startsWith("aws:")) {
|
||||
route = null;
|
||||
}
|
||||
if (reqContext?.dispose) {
|
||||
RpcApi.DisposeSuggestionsCommand(TabRpcClient, reqContext.widgetid, { noresponse: true, route: route });
|
||||
return null;
|
||||
}
|
||||
const fileInfo = await globalStore.get(model.statFile);
|
||||
if (fileInfo == null) {
|
||||
return null;
|
||||
}
|
||||
const conn = await globalStore.get(model.connection);
|
||||
return await RpcApi.FetchSuggestionsCommand(
|
||||
TabRpcClient,
|
||||
{
|
||||
suggestiontype: "file",
|
||||
"file:cwd": fileInfo.dir,
|
||||
query: query,
|
||||
widgetid: reqContext.widgetid,
|
||||
reqnum: reqContext.reqnum,
|
||||
},
|
||||
{
|
||||
route: makeConnRoute(conn),
|
||||
}
|
||||
);
|
||||
const sdata = {
|
||||
suggestiontype: "file",
|
||||
"file:cwd": fileInfo.path,
|
||||
query: query,
|
||||
widgetid: reqContext.widgetid,
|
||||
reqnum: reqContext.reqnum,
|
||||
"file:connection": conn,
|
||||
};
|
||||
return await RpcApi.FetchSuggestionsCommand(TabRpcClient, sdata, {
|
||||
route: route,
|
||||
});
|
||||
};
|
||||
|
||||
function PreviewView({
|
||||
blockId,
|
||||
blockRef,
|
||||
contentRef,
|
||||
model,
|
||||
@ -1123,14 +1072,35 @@ function PreviewView({
|
||||
model: PreviewModel;
|
||||
}) {
|
||||
const connStatus = useAtomValue(model.connStatus);
|
||||
const [errorMsg, setErrorMsg] = useAtom(model.errorMsgAtom);
|
||||
const connection = useAtomValue(model.connectionImmediate);
|
||||
const fileInfo = useAtomValue(model.statFile);
|
||||
|
||||
useEffect(() => {
|
||||
console.log("fileInfo or connection changed", fileInfo, connection);
|
||||
if (!fileInfo) {
|
||||
return;
|
||||
}
|
||||
setErrorMsg(null);
|
||||
}, [connection, fileInfo]);
|
||||
|
||||
if (connStatus?.status != "connected") {
|
||||
return null;
|
||||
}
|
||||
const handleSelect = (s: SuggestionType) => {
|
||||
const handleSelect = (s: SuggestionType, queryStr: string): boolean => {
|
||||
if (s == null) {
|
||||
if (isBlank(queryStr)) {
|
||||
globalStore.set(model.openFileModal, false);
|
||||
return true;
|
||||
}
|
||||
model.handleOpenFile(queryStr);
|
||||
return true;
|
||||
}
|
||||
model.handleOpenFile(s["file:path"]);
|
||||
return true;
|
||||
};
|
||||
const handleTab = (s: SuggestionType, query: string): string => {
|
||||
if (s["mime:type"] == "directory") {
|
||||
if (s["file:mimetype"] == "directory") {
|
||||
return s["file:name"] + "/";
|
||||
} else {
|
||||
return s["file:name"];
|
||||
@ -1139,10 +1109,11 @@ function PreviewView({
|
||||
const fetchSuggestionsFn = async (query, ctx) => {
|
||||
return await fetchSuggestions(model, query, ctx);
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
{/* <OpenFileModal blockId={blockId} model={model} blockRef={blockRef} /> */}
|
||||
<div key="fullpreview" className="full-preview scrollbar-hide-until-hover">
|
||||
{errorMsg && <ErrorOverlay errorMsg={errorMsg} resetOverlay={() => setErrorMsg(null)} />}
|
||||
<div ref={contentRef} className="full-preview-content">
|
||||
<SpecializedView parentRef={contentRef} model={model} />
|
||||
</div>
|
||||
@ -1160,70 +1131,80 @@ function PreviewView({
|
||||
);
|
||||
}
|
||||
|
||||
const OpenFileModal = memo(
|
||||
({
|
||||
model,
|
||||
blockRef,
|
||||
blockId,
|
||||
}: {
|
||||
model: PreviewModel;
|
||||
blockRef: React.RefObject<HTMLDivElement>;
|
||||
blockId: string;
|
||||
}) => {
|
||||
const openFileModal = useAtomValue(model.openFileModal);
|
||||
const curFileName = useAtomValue(model.metaFilePath);
|
||||
const [filePath, setFilePath] = useState("");
|
||||
const isNodeFocused = useAtomValue(model.nodeModel.isFocused);
|
||||
const handleKeyDown = useCallback(
|
||||
keydownWrapper((waveEvent: WaveKeyboardEvent): boolean => {
|
||||
if (checkKeyPressed(waveEvent, "Escape")) {
|
||||
model.updateOpenFileModalAndError(false);
|
||||
return true;
|
||||
}
|
||||
const ErrorOverlay = memo(({ errorMsg, resetOverlay }: { errorMsg: ErrorMsg; resetOverlay: () => void }) => {
|
||||
const showDismiss = errorMsg.showDismiss ?? true;
|
||||
const buttonClassName = "outlined grey font-size-11 vertical-padding-3 horizontal-padding-7";
|
||||
|
||||
const handleCommandOperations = async () => {
|
||||
if (checkKeyPressed(waveEvent, "Enter")) {
|
||||
await model.handleOpenFile(filePath);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
handleCommandOperations().catch((error) => {
|
||||
console.error("Error handling key down:", error);
|
||||
model.updateOpenFileModalAndError(true, "An error occurred during operation.");
|
||||
return false;
|
||||
});
|
||||
return false;
|
||||
}),
|
||||
[model, blockId, filePath, curFileName]
|
||||
);
|
||||
const handleFileSuggestionSelect = (value) => {
|
||||
globalStore.set(model.openFileModal, false);
|
||||
};
|
||||
const handleFileSuggestionChange = (value) => {
|
||||
setFilePath(value);
|
||||
};
|
||||
const handleBackDropClick = () => {
|
||||
globalStore.set(model.openFileModal, false);
|
||||
};
|
||||
if (!openFileModal) {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
<TypeAheadModal
|
||||
label="Open path"
|
||||
blockRef={blockRef}
|
||||
anchorRef={model.previewTextRef}
|
||||
onKeyDown={handleKeyDown}
|
||||
onSelect={handleFileSuggestionSelect}
|
||||
onChange={handleFileSuggestionChange}
|
||||
onClickBackdrop={handleBackDropClick}
|
||||
autoFocus={isNodeFocused}
|
||||
giveFocusRef={model.openFileModalGiveFocusRef}
|
||||
/>
|
||||
);
|
||||
let iconClass = "fa-solid fa-circle-exclamation text-[var(--error-color)] text-base";
|
||||
if (errorMsg.level == "warning") {
|
||||
iconClass = "fa-solid fa-triangle-exclamation text-[var(--warning-color)] text-base";
|
||||
}
|
||||
);
|
||||
|
||||
const handleCopyToClipboard = useCallback(async () => {
|
||||
await navigator.clipboard.writeText(errorMsg.text);
|
||||
}, [errorMsg.text]);
|
||||
|
||||
return (
|
||||
<div className="absolute top-[0] left-1.5 right-1.5 z-[var(--zindex-block-mask-inner)] overflow-hidden bg-[var(--conn-status-overlay-bg-color)] backdrop-blur-[50px] rounded-md shadow-lg">
|
||||
<div className="flex flex-row justify-between p-2.5 pl-3 font-[var(--base-font)] text-[var(--secondary-text-color)]">
|
||||
<div
|
||||
className={clsx("flex flex-row items-center gap-3 grow min-w-0 shrink", {
|
||||
"items-start": true,
|
||||
})}
|
||||
>
|
||||
<i className={iconClass}></i>
|
||||
|
||||
<div className="flex flex-col items-start gap-1 grow w-full shrink min-w-0">
|
||||
<div className="max-w-full text-xs font-semibold leading-4 tracking-[0.11px] text-white overflow-hidden">
|
||||
{errorMsg.status}
|
||||
</div>
|
||||
|
||||
<OverlayScrollbarsComponent
|
||||
className="group text-xs font-normal leading-[15px] tracking-[0.11px] text-wrap max-h-20 rounded-lg py-1.5 pl-0 relative w-full"
|
||||
options={{ scrollbars: { autoHide: "leave" } }}
|
||||
>
|
||||
<CopyButton
|
||||
className="invisible group-hover:visible flex absolute top-0 right-1 rounded backdrop-blur-lg p-1 items-center justify-end gap-1"
|
||||
onClick={handleCopyToClipboard}
|
||||
title="Copy"
|
||||
/>
|
||||
<div>{errorMsg.text}</div>
|
||||
</OverlayScrollbarsComponent>
|
||||
{!!errorMsg.buttons && (
|
||||
<div className="flex flex-row gap-2">
|
||||
{errorMsg.buttons?.map((buttonDef) => (
|
||||
<Button
|
||||
className={buttonClassName}
|
||||
onClick={() => {
|
||||
buttonDef.onClick();
|
||||
resetOverlay();
|
||||
}}
|
||||
key={crypto.randomUUID()}
|
||||
>
|
||||
{buttonDef.text}
|
||||
</Button>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{showDismiss && (
|
||||
<div className="flex items-start">
|
||||
<Button
|
||||
className={clsx(buttonClassName, "fa-xmark fa-solid")}
|
||||
onClick={() => {
|
||||
if (errorMsg.closeAction) {
|
||||
errorMsg.closeAction();
|
||||
}
|
||||
resetOverlay();
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
});
|
||||
|
||||
export { PreviewView };
|
||||
|
@ -377,7 +377,7 @@ function SysinfoView({ model, blockId }: SysinfoViewProps) {
|
||||
return () => {
|
||||
unsubFn();
|
||||
};
|
||||
}, [connName]);
|
||||
}, [connName, addContinuousData]);
|
||||
if (connStatus?.status != "connected") {
|
||||
return null;
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ import {
|
||||
import * as services from "@/store/services";
|
||||
import * as keyutil from "@/util/keyutil";
|
||||
import { boundNumber, fireAndForget, stringToBase64, useAtomValueSafe } from "@/util/util";
|
||||
import { computeBgStyleFromMeta } from "@/util/waveutil";
|
||||
import { ISearchOptions } from "@xterm/addon-search";
|
||||
import clsx from "clsx";
|
||||
import debug from "debug";
|
||||
@ -1070,8 +1071,11 @@ const TerminalView = ({ blockId, model }: ViewComponentProps<TermViewModel>) =>
|
||||
blockId: blockId,
|
||||
};
|
||||
|
||||
const termBg = computeBgStyleFromMeta(blockData?.meta);
|
||||
|
||||
return (
|
||||
<div className={clsx("view-term", "term-mode-" + termMode)} ref={viewRef}>
|
||||
{termBg && <div className="absolute inset-0 z-0 pointer-events-none" style={termBg} />}
|
||||
<TermResyncHandler blockId={blockId} model={model} />
|
||||
<TermThemeUpdater blockId={blockId} model={model} termRef={model.termRef} />
|
||||
<TermStickers config={stickerConfig} />
|
||||
|
@ -119,7 +119,8 @@ function TermSticker({ sticker, config }: { sticker: StickerType; config: Sticke
|
||||
if (sticker.imgsrc == null) {
|
||||
return null;
|
||||
}
|
||||
const streamingUrl = getWebServerEndpoint() + "/wave/stream-file?path=" + encodeURIComponent(sticker.imgsrc);
|
||||
const streamingUrl =
|
||||
getWebServerEndpoint() + "/wave/stream-local-file?path=" + encodeURIComponent(sticker.imgsrc);
|
||||
return (
|
||||
<div className="term-sticker term-sticker-image" style={style} onClick={clickHandler}>
|
||||
<img src={streamingUrl} />
|
||||
|
@ -5,8 +5,9 @@ import { getFileSubject } from "@/app/store/wps";
|
||||
import { sendWSCommand } from "@/app/store/ws";
|
||||
import { RpcApi } from "@/app/store/wshclientapi";
|
||||
import { TabRpcClient } from "@/app/store/wshrpcutil";
|
||||
import { PLATFORM, WOS, atoms, fetchWaveFile, getSettingsKeyAtom, globalStore, openLink } from "@/store/global";
|
||||
import { WOS, atoms, fetchWaveFile, getSettingsKeyAtom, globalStore, openLink } from "@/store/global";
|
||||
import * as services from "@/store/services";
|
||||
import { PLATFORM, PlatformMacOS } from "@/util/platformutil";
|
||||
import { base64ToArray, fireAndForget } from "@/util/util";
|
||||
import { SearchAddon } from "@xterm/addon-search";
|
||||
import { SerializeAddon } from "@xterm/addon-serialize";
|
||||
@ -44,6 +45,95 @@ type TermWrapOptions = {
|
||||
sendDataHandler?: (data: string) => void;
|
||||
};
|
||||
|
||||
function handleOscWaveCommand(data: string, blockId: string, loaded: boolean): boolean {
|
||||
if (!loaded) {
|
||||
return false;
|
||||
}
|
||||
if (!data || data.length === 0) {
|
||||
console.log("Invalid Wave OSC command received (empty)");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Expected formats:
|
||||
// "setmeta;{JSONDATA}"
|
||||
// "setmeta;[wave-id];{JSONDATA}"
|
||||
const parts = data.split(";");
|
||||
if (parts[0] !== "setmeta") {
|
||||
console.log("Invalid Wave OSC command received (bad command)", data);
|
||||
return false;
|
||||
}
|
||||
let jsonPayload: string;
|
||||
let waveId: string | undefined;
|
||||
if (parts.length === 2) {
|
||||
jsonPayload = parts[1];
|
||||
} else if (parts.length >= 3) {
|
||||
waveId = parts[1];
|
||||
jsonPayload = parts.slice(2).join(";");
|
||||
} else {
|
||||
console.log("Invalid Wave OSC command received (1 part)", data);
|
||||
return false;
|
||||
}
|
||||
|
||||
let meta: any;
|
||||
try {
|
||||
meta = JSON.parse(jsonPayload);
|
||||
} catch (e) {
|
||||
console.error("Invalid JSON in Wave OSC command:", e);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (waveId) {
|
||||
// Resolve the wave id to an ORef using our ResolveIdsCommand.
|
||||
fireAndForget(() => {
|
||||
return RpcApi.ResolveIdsCommand(TabRpcClient, { blockid: blockId, ids: [waveId] })
|
||||
.then((response: { resolvedids: { [key: string]: any } }) => {
|
||||
const oref = response.resolvedids[waveId];
|
||||
if (!oref) {
|
||||
console.error("Failed to resolve wave id:", waveId);
|
||||
return;
|
||||
}
|
||||
services.ObjectService.UpdateObjectMeta(oref, meta);
|
||||
})
|
||||
.catch((err: any) => {
|
||||
console.error("Error resolving wave id", waveId, err);
|
||||
});
|
||||
});
|
||||
} else {
|
||||
// No wave id provided; update using the current block id.
|
||||
fireAndForget(() => {
|
||||
return services.ObjectService.UpdateObjectMeta(WOS.makeORef("block", blockId), meta);
|
||||
});
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function handleOsc7Command(data: string, blockId: string, loaded: boolean): boolean {
|
||||
if (!loaded) {
|
||||
return false;
|
||||
}
|
||||
if (data == null || data.length == 0) {
|
||||
console.log("Invalid OSC 7 command received (empty)");
|
||||
return false;
|
||||
}
|
||||
if (data.startsWith("file://")) {
|
||||
data = data.substring(7);
|
||||
const nextSlashIdx = data.indexOf("/");
|
||||
if (nextSlashIdx == -1) {
|
||||
console.log("Invalid OSC 7 command received (bad path)", data);
|
||||
return false;
|
||||
}
|
||||
data = data.substring(nextSlashIdx);
|
||||
}
|
||||
setTimeout(() => {
|
||||
fireAndForget(() =>
|
||||
services.ObjectService.UpdateObjectMeta(WOS.makeORef("block", blockId), {
|
||||
"cmd:cwd": data,
|
||||
})
|
||||
);
|
||||
}, 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
export class TermWrap {
|
||||
blockId: string;
|
||||
ptyOffset: number;
|
||||
@ -62,6 +152,7 @@ export class TermWrap {
|
||||
sendDataHandler: (data: string) => void;
|
||||
onSearchResultsDidChange?: (result: { resultIndex: number; resultCount: number }) => void;
|
||||
private toDispose: TermTypes.IDisposable[] = [];
|
||||
pasteActive: boolean = false;
|
||||
|
||||
constructor(
|
||||
blockId: string,
|
||||
@ -77,7 +168,7 @@ export class TermWrap {
|
||||
this.hasResized = false;
|
||||
this.terminal = new Terminal(options);
|
||||
this.fitAddon = new FitAddon();
|
||||
this.fitAddon.noScrollbar = PLATFORM == "darwin";
|
||||
this.fitAddon.noScrollbar = PLATFORM === PlatformMacOS;
|
||||
this.serializeAddon = new SerializeAddon();
|
||||
this.searchAddon = new SearchAddon();
|
||||
this.terminal.loadAddon(this.searchAddon);
|
||||
@ -87,7 +178,7 @@ export class TermWrap {
|
||||
new WebLinksAddon((e, uri) => {
|
||||
e.preventDefault();
|
||||
switch (PLATFORM) {
|
||||
case "darwin":
|
||||
case PlatformMacOS:
|
||||
if (e.metaKey) {
|
||||
fireAndForget(() => openLink(uri));
|
||||
}
|
||||
@ -113,29 +204,12 @@ export class TermWrap {
|
||||
loggedWebGL = true;
|
||||
}
|
||||
}
|
||||
// Register OSC 9283 handler
|
||||
this.terminal.parser.registerOscHandler(9283, (data: string) => {
|
||||
return handleOscWaveCommand(data, this.blockId, this.loaded);
|
||||
});
|
||||
this.terminal.parser.registerOscHandler(7, (data: string) => {
|
||||
if (!this.loaded) {
|
||||
return false;
|
||||
}
|
||||
if (data == null || data.length == 0) {
|
||||
return false;
|
||||
}
|
||||
if (data.startsWith("file://")) {
|
||||
data = data.substring(7);
|
||||
const nextSlashIdx = data.indexOf("/");
|
||||
if (nextSlashIdx == -1) {
|
||||
return false;
|
||||
}
|
||||
data = data.substring(nextSlashIdx);
|
||||
}
|
||||
setTimeout(() => {
|
||||
fireAndForget(() =>
|
||||
services.ObjectService.UpdateObjectMeta(WOS.makeORef("block", this.blockId), {
|
||||
"cmd:cwd": data,
|
||||
})
|
||||
);
|
||||
}, 0);
|
||||
return true;
|
||||
return handleOsc7Command(data, this.blockId, this.loaded);
|
||||
});
|
||||
this.terminal.attachCustomKeyEventHandler(waveOptions.keydownHandler);
|
||||
this.connectElem = connectElem;
|
||||
@ -144,6 +218,19 @@ export class TermWrap {
|
||||
this.handleResize_debounced = debounce(50, this.handleResize.bind(this));
|
||||
this.terminal.open(this.connectElem);
|
||||
this.handleResize();
|
||||
let pasteEventHandler = () => {
|
||||
this.pasteActive = true;
|
||||
setTimeout(() => {
|
||||
this.pasteActive = false;
|
||||
}, 30);
|
||||
};
|
||||
pasteEventHandler = pasteEventHandler.bind(this);
|
||||
this.connectElem.addEventListener("paste", pasteEventHandler, true);
|
||||
this.toDispose.push({
|
||||
dispose: () => {
|
||||
this.connectElem.removeEventListener("paste", pasteEventHandler, true);
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
async initTerminal() {
|
||||
@ -190,6 +277,12 @@ export class TermWrap {
|
||||
if (!this.loaded) {
|
||||
return;
|
||||
}
|
||||
if (this.pasteActive) {
|
||||
this.pasteActive = false;
|
||||
if (this.multiInputCallback) {
|
||||
this.multiInputCallback(data);
|
||||
}
|
||||
}
|
||||
this.sendDataHandler?.(data);
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import { BlockNodeModel } from "@/app/block/blocktypes";
|
||||
import { getBlockMetaKeyAtom, globalStore, PLATFORM, WOS } from "@/app/store/global";
|
||||
import { getBlockMetaKeyAtom, globalStore, WOS } from "@/app/store/global";
|
||||
import { makeORef } from "@/app/store/wos";
|
||||
import { waveEventSubscribe } from "@/app/store/wps";
|
||||
import { RpcResponseHelper, WshClient } from "@/app/store/wshclient";
|
||||
@ -13,6 +13,7 @@ import { VDomView } from "@/app/view/vdom/vdom";
|
||||
import { applyCanvasOp, mergeBackendUpdates, restoreVDomElems } from "@/app/view/vdom/vdom-utils";
|
||||
import { getWebServerEndpoint } from "@/util/endpoints";
|
||||
import { adaptFromReactOrNativeKeyEvent, checkKeyPressed } from "@/util/keyutil";
|
||||
import { PLATFORM, PlatformMacOS } from "@/util/platformutil";
|
||||
import debug from "debug";
|
||||
import * as jotai from "jotai";
|
||||
|
||||
@ -73,7 +74,7 @@ function annotateEvent(event: VDomEvent, propName: string, reactEvent: React.Syn
|
||||
movementx: mouseEvent.movementX,
|
||||
movementy: mouseEvent.movementY,
|
||||
};
|
||||
if (PLATFORM == "darwin") {
|
||||
if (PLATFORM == PlatformMacOS) {
|
||||
event.mousedata.cmd = event.mousedata.meta;
|
||||
event.mousedata.option = event.mousedata.alt;
|
||||
} else {
|
||||
|
@ -616,9 +616,10 @@ const BookmarkTypeahead = memo(
|
||||
onClose={() => model.setTypeaheadOpen(false)}
|
||||
onSelect={(suggestion) => {
|
||||
if (suggestion == null || suggestion.type != "url") {
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
model.loadUrl(suggestion["url:url"], "bookmark-typeahead");
|
||||
return true;
|
||||
}}
|
||||
fetchSuggestions={model.fetchBookmarkSuggestions}
|
||||
placeholderText="Open Bookmark..."
|
||||
|
@ -41,4 +41,22 @@
|
||||
--text-default: 14px;
|
||||
|
||||
--radius: 8px;
|
||||
|
||||
/* ANSI Colors (Default Dark Palette) */
|
||||
--ansi-black: #757575;
|
||||
--ansi-red: #cc685c;
|
||||
--ansi-green: #76c266;
|
||||
--ansi-yellow: #cbca9b;
|
||||
--ansi-blue: #85aacb;
|
||||
--ansi-magenta: #cc72ca;
|
||||
--ansi-cyan: #74a7cb;
|
||||
--ansi-white: #c1c1c1;
|
||||
--ansi-brightblack: #727272;
|
||||
--ansi-brightred: #cc9d97;
|
||||
--ansi-brightgreen: #a3dd97;
|
||||
--ansi-brightyellow: #cbcaaa;
|
||||
--ansi-brightblue: #9ab6cb;
|
||||
--ansi-brightmagenta: #cc8ecb;
|
||||
--ansi-brightcyan: #b7b8cb;
|
||||
--ansi-brightwhite: #f0f0f0;
|
||||
}
|
||||
|
20
frontend/types/custom.d.ts
vendored
20
frontend/types/custom.d.ts
vendored
@ -101,6 +101,8 @@ declare global {
|
||||
sendLog: (log: string) => void;
|
||||
onQuicklook: (filePath: string) => void;
|
||||
openNativePath(filePath: string): void;
|
||||
captureScreenshot(rect: Electron.Rectangle): Promise<string>;
|
||||
setKeyboardChordMode: () => void;
|
||||
};
|
||||
|
||||
type ElectronContextMenuItem = {
|
||||
@ -290,6 +292,9 @@ declare global {
|
||||
// If true, filters out 'nowsh' connections (when managing connections)
|
||||
filterOutNowsh?: jotai.Atom<boolean>;
|
||||
|
||||
// if true, show s3 connections in picker
|
||||
showS3?: jotai.Atom<boolean>;
|
||||
|
||||
// If true, removes padding inside the block content area.
|
||||
noPadding?: jotai.Atom<boolean>;
|
||||
|
||||
@ -427,6 +432,21 @@ declare global {
|
||||
uri: string;
|
||||
absParent: string;
|
||||
relName: string;
|
||||
isDir: boolean;
|
||||
};
|
||||
|
||||
type ErrorButtonDef = {
|
||||
text: string;
|
||||
onClick: () => void;
|
||||
};
|
||||
|
||||
type ErrorMsg = {
|
||||
status: string;
|
||||
text: string;
|
||||
level?: "error" | "warning";
|
||||
buttons?: Array<ErrorButtonDef>;
|
||||
closeAction?: () => void;
|
||||
showDismiss?: boolean;
|
||||
};
|
||||
}
|
||||
|
||||
|
13
frontend/types/gotypes.d.ts
vendored
13
frontend/types/gotypes.d.ts
vendored
@ -204,13 +204,6 @@ declare global {
|
||||
message: string;
|
||||
};
|
||||
|
||||
// wshrpc.CommandRemoteFileCopyData
|
||||
type CommandRemoteFileCopyData = {
|
||||
srcuri: string;
|
||||
desturi: string;
|
||||
opts?: FileCopyOpts;
|
||||
};
|
||||
|
||||
// wshrpc.CommandRemoteListEntriesData
|
||||
type CommandRemoteListEntriesData = {
|
||||
path: string;
|
||||
@ -460,6 +453,12 @@ declare global {
|
||||
append?: boolean;
|
||||
};
|
||||
|
||||
// wshrpc.FileShareCapability
|
||||
type FileShareCapability = {
|
||||
canappend: boolean;
|
||||
canmkdir: boolean;
|
||||
};
|
||||
|
||||
// wconfig.FullConfigType
|
||||
type FullConfigType = {
|
||||
settings: SettingsType;
|
||||
|
@ -31,6 +31,35 @@ function keydownWrapper(
|
||||
};
|
||||
}
|
||||
|
||||
function waveEventToKeyDesc(waveEvent: WaveKeyboardEvent): string {
|
||||
let keyDesc: string[] = [];
|
||||
if (waveEvent.cmd) {
|
||||
keyDesc.push("Cmd");
|
||||
}
|
||||
if (waveEvent.option) {
|
||||
keyDesc.push("Option");
|
||||
}
|
||||
if (waveEvent.meta) {
|
||||
keyDesc.push("Meta");
|
||||
}
|
||||
if (waveEvent.control) {
|
||||
keyDesc.push("Ctrl");
|
||||
}
|
||||
if (waveEvent.shift) {
|
||||
keyDesc.push("Shift");
|
||||
}
|
||||
if (waveEvent.key != null && waveEvent.key != "") {
|
||||
if (waveEvent.key == " ") {
|
||||
keyDesc.push("Space");
|
||||
} else {
|
||||
keyDesc.push(waveEvent.key);
|
||||
}
|
||||
} else {
|
||||
keyDesc.push("c{" + waveEvent.code + "}");
|
||||
}
|
||||
return keyDesc.join(":");
|
||||
}
|
||||
|
||||
function parseKey(key: string): { key: string; type: string } {
|
||||
let regexMatch = key.match(KeyTypeCodeRegex);
|
||||
if (regexMatch != null && regexMatch.length > 1) {
|
||||
@ -183,7 +212,7 @@ function checkKeyPressed(event: WaveKeyboardEvent, keyDescription: string): bool
|
||||
}
|
||||
if (keyPress.keyType == KeyTypeKey) {
|
||||
eventKey = event.key;
|
||||
if (eventKey.length == 1 && /[A-Z]/.test(eventKey.charAt(0))) {
|
||||
if (eventKey != null && eventKey.length == 1 && /[A-Z]/.test(eventKey.charAt(0))) {
|
||||
// key is upper case A-Z, this means shift is applied, we want to allow
|
||||
// "Shift:e" as well as "Shift:E" or "E"
|
||||
eventKey = eventKey.toLocaleLowerCase();
|
||||
@ -303,4 +332,5 @@ export {
|
||||
keydownWrapper,
|
||||
parseKeyDescription,
|
||||
setKeyUtilPlatform,
|
||||
waveEventToKeyDesc,
|
||||
};
|
||||
|
27
frontend/util/platformutil.ts
Normal file
27
frontend/util/platformutil.ts
Normal file
@ -0,0 +1,27 @@
|
||||
export const PlatformMacOS = "darwin";
|
||||
export let PLATFORM: NodeJS.Platform = PlatformMacOS;
|
||||
|
||||
export function setPlatform(platform: NodeJS.Platform) {
|
||||
PLATFORM = platform;
|
||||
}
|
||||
|
||||
export function makeNativeLabel(isDirectory: boolean) {
|
||||
let managerName: string;
|
||||
if (!isDirectory) {
|
||||
managerName = "Default Application";
|
||||
} else if (PLATFORM === PlatformMacOS) {
|
||||
managerName = "Finder";
|
||||
} else if (PLATFORM == "win32") {
|
||||
managerName = "Explorer";
|
||||
} else {
|
||||
managerName = "File Manager";
|
||||
}
|
||||
|
||||
let fileAction: string;
|
||||
if (isDirectory) {
|
||||
fileAction = "Reveal";
|
||||
} else {
|
||||
fileAction = "Open File";
|
||||
}
|
||||
return `${fileAction} in ${managerName}`;
|
||||
}
|
77
frontend/util/previewutil.ts
Normal file
77
frontend/util/previewutil.ts
Normal file
@ -0,0 +1,77 @@
|
||||
import { createBlock, getApi } from "@/app/store/global";
|
||||
import { makeNativeLabel } from "./platformutil";
|
||||
import { fireAndForget } from "./util";
|
||||
import { formatRemoteUri } from "./waveutil";
|
||||
|
||||
export function addOpenMenuItems(menu: ContextMenuItem[], conn: string, finfo: FileInfo): ContextMenuItem[] {
|
||||
if (!finfo) {
|
||||
return menu;
|
||||
}
|
||||
menu.push({
|
||||
type: "separator",
|
||||
});
|
||||
if (!conn) {
|
||||
// TODO: resolve correct host path if connection is WSL
|
||||
// if the entry is a directory, reveal it in the file manager, if the entry is a file, reveal its parent directory
|
||||
menu.push({
|
||||
label: makeNativeLabel(true),
|
||||
click: () => {
|
||||
getApi().openNativePath(finfo.isdir ? finfo.path : finfo.dir);
|
||||
},
|
||||
});
|
||||
// if the entry is a file, open it in the default application
|
||||
if (!finfo.isdir) {
|
||||
menu.push({
|
||||
label: makeNativeLabel(false),
|
||||
click: () => {
|
||||
getApi().openNativePath(finfo.path);
|
||||
},
|
||||
});
|
||||
}
|
||||
} else {
|
||||
menu.push({
|
||||
label: "Download File",
|
||||
click: () => {
|
||||
const remoteUri = formatRemoteUri(finfo.path, conn);
|
||||
getApi().downloadFile(remoteUri);
|
||||
},
|
||||
});
|
||||
}
|
||||
menu.push({
|
||||
type: "separator",
|
||||
});
|
||||
if (!finfo.isdir) {
|
||||
menu.push({
|
||||
label: "Open Preview in New Block",
|
||||
click: () =>
|
||||
fireAndForget(async () => {
|
||||
const blockDef: BlockDef = {
|
||||
meta: {
|
||||
view: "preview",
|
||||
file: finfo.path,
|
||||
connection: conn,
|
||||
},
|
||||
};
|
||||
await createBlock(blockDef);
|
||||
}),
|
||||
});
|
||||
}
|
||||
// TODO: improve behavior as we add more connection types
|
||||
if (!conn?.startsWith("aws:")) {
|
||||
menu.push({
|
||||
label: "Open Terminal in New Block",
|
||||
click: () => {
|
||||
const termBlockDef: BlockDef = {
|
||||
meta: {
|
||||
controller: "shell",
|
||||
view: "term",
|
||||
"cmd:cwd": finfo.isdir ? finfo.path : finfo.dir,
|
||||
connection: conn,
|
||||
},
|
||||
};
|
||||
fireAndForget(() => createBlock(termBlockDef));
|
||||
},
|
||||
});
|
||||
}
|
||||
return menu;
|
||||
}
|
4
frontend/util/sharedconst.ts
Normal file
4
frontend/util/sharedconst.ts
Normal file
@ -0,0 +1,4 @@
|
||||
// Copyright 2025, Command Line Inc.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
export const CHORD_TIMEOUT = 2000;
|
@ -306,29 +306,6 @@ function sleep(ms: number): Promise<void> {
|
||||
return new Promise((resolve) => setTimeout(resolve, ms));
|
||||
}
|
||||
|
||||
function makeNativeLabel(platform: string, isDirectory: boolean, isParent: boolean) {
|
||||
let managerName: string;
|
||||
if (!isDirectory && !isParent) {
|
||||
managerName = "Default Application";
|
||||
} else if (platform == "darwin") {
|
||||
managerName = "Finder";
|
||||
} else if (platform == "win32") {
|
||||
managerName = "Explorer";
|
||||
} else {
|
||||
managerName = "File Manager";
|
||||
}
|
||||
|
||||
let fileAction: string;
|
||||
if (isParent) {
|
||||
fileAction = "Reveal";
|
||||
} else if (isDirectory) {
|
||||
fileAction = "Open Directory";
|
||||
} else {
|
||||
fileAction = "Open File";
|
||||
}
|
||||
return `${fileAction} in ${managerName}`;
|
||||
}
|
||||
|
||||
function mergeMeta(meta: MetaType, metaUpdate: MetaType, prefix?: string): MetaType {
|
||||
const rtn: MetaType = {};
|
||||
|
||||
@ -419,7 +396,6 @@ export {
|
||||
makeConnRoute,
|
||||
makeExternLink,
|
||||
makeIconClass,
|
||||
makeNativeLabel,
|
||||
mergeMeta,
|
||||
sleep,
|
||||
stringToBase64,
|
||||
|
102
frontend/util/waveutil.ts
Normal file
102
frontend/util/waveutil.ts
Normal file
@ -0,0 +1,102 @@
|
||||
// Copyright 2025, Command Line Inc.
|
||||
// SPDX-License-Identifier: Apache-2.0s
|
||||
|
||||
import { getWebServerEndpoint } from "@/util/endpoints";
|
||||
import { boundNumber, isBlank } from "@/util/util";
|
||||
import { generate as generateCSS, parse as parseCSS, walk as walkCSS } from "css-tree";
|
||||
|
||||
function encodeFileURL(file: string) {
|
||||
const webEndpoint = getWebServerEndpoint();
|
||||
const fileUri = formatRemoteUri(file, "local");
|
||||
const rtn = webEndpoint + `/wave/stream-file?path=${encodeURIComponent(fileUri)}&no404=1`;
|
||||
return rtn;
|
||||
}
|
||||
|
||||
export function processBackgroundUrls(cssText: string): string {
|
||||
if (isBlank(cssText)) {
|
||||
return null;
|
||||
}
|
||||
cssText = cssText.trim();
|
||||
if (cssText.endsWith(";")) {
|
||||
cssText = cssText.slice(0, -1);
|
||||
}
|
||||
const attrRe = /^background(-image)?\s*:\s*/i;
|
||||
cssText = cssText.replace(attrRe, "");
|
||||
const ast = parseCSS("background: " + cssText, {
|
||||
context: "declaration",
|
||||
});
|
||||
let hasUnsafeUrl = false;
|
||||
walkCSS(ast, {
|
||||
visit: "Url",
|
||||
enter(node) {
|
||||
const originalUrl = node.value.trim();
|
||||
if (
|
||||
originalUrl.startsWith("http:") ||
|
||||
originalUrl.startsWith("https:") ||
|
||||
originalUrl.startsWith("data:")
|
||||
) {
|
||||
return;
|
||||
}
|
||||
// allow file:/// urls (if they are absolute)
|
||||
if (originalUrl.startsWith("file://")) {
|
||||
const path = originalUrl.slice(7);
|
||||
if (!path.startsWith("/")) {
|
||||
console.log(`Invalid background, contains a non-absolute file URL: ${originalUrl}`);
|
||||
hasUnsafeUrl = true;
|
||||
return;
|
||||
}
|
||||
const newUrl = encodeFileURL(path);
|
||||
node.value = newUrl;
|
||||
return;
|
||||
}
|
||||
// allow absolute paths
|
||||
if (originalUrl.startsWith("/") || originalUrl.startsWith("~/") || /^[a-zA-Z]:(\/|\\)/.test(originalUrl)) {
|
||||
const newUrl = encodeFileURL(originalUrl);
|
||||
node.value = newUrl;
|
||||
return;
|
||||
}
|
||||
hasUnsafeUrl = true;
|
||||
console.log(`Invalid background, contains an unsafe URL scheme: ${originalUrl}`);
|
||||
},
|
||||
});
|
||||
if (hasUnsafeUrl) {
|
||||
return null;
|
||||
}
|
||||
const rtnStyle = generateCSS(ast);
|
||||
if (rtnStyle == null) {
|
||||
return null;
|
||||
}
|
||||
return rtnStyle.replace(/^background:\s*/, "");
|
||||
}
|
||||
|
||||
export function computeBgStyleFromMeta(meta: MetaType, defaultOpacity: number = null): React.CSSProperties {
|
||||
const bgAttr = meta?.["bg"];
|
||||
if (isBlank(bgAttr)) {
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
const processedBg = processBackgroundUrls(bgAttr);
|
||||
const rtn: React.CSSProperties = {};
|
||||
rtn.background = processedBg;
|
||||
rtn.opacity = boundNumber(meta["bg:opacity"], 0, 1) ?? defaultOpacity;
|
||||
if (!isBlank(meta?.["bg:blendmode"])) {
|
||||
rtn.backgroundBlendMode = meta["bg:blendmode"];
|
||||
}
|
||||
return rtn;
|
||||
} catch (e) {
|
||||
console.error("error processing background", e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export function formatRemoteUri(path: string, connection: string): string {
|
||||
connection = connection ?? "local";
|
||||
// TODO: We need a better way to handle s3 paths
|
||||
let retVal: string;
|
||||
if (connection.startsWith("aws:")) {
|
||||
retVal = `${connection}:s3://${path ?? ""}`;
|
||||
} else {
|
||||
retVal = `wsh://${connection}/${path}`;
|
||||
}
|
||||
return retVal;
|
||||
}
|
28
go.mod
28
go.mod
@ -8,7 +8,7 @@ require (
|
||||
github.com/alexflint/go-filemutex v1.3.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.1
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.6
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.1
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.76.1
|
||||
github.com/aws/smithy-go v1.22.2
|
||||
github.com/creack/pty v1.1.21
|
||||
github.com/emirpasic/gods v1.18.1
|
||||
@ -35,11 +35,12 @@ require (
|
||||
github.com/tailscale/hujson v0.0.0-20241010212012-29efb4a0184b
|
||||
github.com/ubuntu/gowsl v0.0.0-20240906163211-049fd49bd93b
|
||||
github.com/wavetermdev/htmltoken v0.2.0
|
||||
golang.org/x/crypto v0.32.0
|
||||
golang.org/x/mod v0.22.0
|
||||
golang.org/x/crypto v0.33.0
|
||||
golang.org/x/mod v0.23.0
|
||||
golang.org/x/sync v0.11.0
|
||||
golang.org/x/sys v0.30.0
|
||||
golang.org/x/term v0.28.0
|
||||
google.golang.org/api v0.220.0
|
||||
golang.org/x/term v0.29.0
|
||||
google.golang.org/api v0.221.0
|
||||
gopkg.in/ini.v1 v1.67.0
|
||||
)
|
||||
|
||||
@ -56,11 +57,11 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.14 // indirect
|
||||
@ -96,15 +97,14 @@ require (
|
||||
go.opentelemetry.io/otel/metric v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.34.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
golang.org/x/net v0.34.0 // indirect
|
||||
golang.org/x/oauth2 v0.25.0 // indirect
|
||||
golang.org/x/sync v0.11.0 // indirect
|
||||
golang.org/x/net v0.35.0 // indirect
|
||||
golang.org/x/oauth2 v0.26.0 // indirect
|
||||
golang.org/x/text v0.22.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/time v0.10.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250127172529-29210b9bc287 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6 // indirect
|
||||
google.golang.org/grpc v1.70.0 // indirect
|
||||
google.golang.org/protobuf v1.36.4 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
|
52
go.sum
52
go.sum
@ -32,18 +32,18 @@ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 h1:m1GeXHVMJsRsUAqG6H
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32/go.mod h1:IitoQxGfaKdVLNg0hD8/DXmAqNy0H4K2H2Sf91ti8sI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.30 h1:yQSv0NQ4CRHoki6AcV/Ldoa4/QCMJauZkF23qznBCPQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.30/go.mod h1:jH3z32wDrsducaYX26xnl41ksYFWqjHphIciwIANZkc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32 h1:OIHj/nAhVzIXGzbAE+4XmZ8FPvro3THr6NlqErJc3wY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32/go.mod h1:LiBEsDo34OJXqdDlRGsilhlIiXR7DL+6Cx2f4p1EgzI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.4 h1:iwk7v5+lUtA0cIQcQM6EyCXtQJZ9MGIWWaf0JKud5UE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.4/go.mod h1:o9mSr0x1NwImSmP9q38aTUhjYwcDm277YUURBjXcC2I=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.0 h1:kT2WeWcFySdYpPgyqJMSUE7781Qucjtn6wBvrgm9P+M=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.0/go.mod h1:WYH1ABybY7JK9TITPnk6ZlP7gQB8psI4c9qDmMsnLSA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13 h1:SYVGSFQHlchIcy6e7x12bsrxClCXSP5et8cqVhL8cuw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13/go.mod h1:kizuDaLX37bG5WZaoxGPQR/LNFXpxp0vsUnqfkWXfNE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.11 h1:P8qJcYGVDswlMkVFhMi7SJmlf0jNA0JRbvE/q2PuXD8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.11/go.mod h1:9yp5x5vYwyhnZZ9cKLBxZmrJTGv99C9iVmG7AKeUvdc=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.1 h1:hbTWOPUgAnPpk5+G1jZjYnq4eKCAePwRJEqLN1Tj7Bg=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.1/go.mod h1:Mo2xdnRzOyZQkGHEbhOgooG0eIV+GqS/g8LU4B5iftI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13 h1:OBsrtam3rk8NfBEq7OLOMm5HtQ9Yyw32X4UQMya/wjw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13/go.mod h1:3U4gFA5pmoCOja7aq4nSaIAGbaOHv2Yl2ug018cmC+Q=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.76.1 h1:d4ZG8mELlLeUWFBMCqPtRfEP3J6aQgg/KTC9jLSlkMs=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.76.1/go.mod h1:uZoEIR6PzGOZEjgAZE4hfYfsqK2zOHhq68JLKEvvXj4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.15 h1:/eE3DogBjYlvlbhd2ssWyeuovWunHLxfgw3s/OJa4GQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.15/go.mod h1:2PCJYpi7EKeA5SkStAmZlF6fi0uUABuhtF8ILHjGc3Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14 h1:M/zwXiL2iXUrHputuXgmO94TVNmcenPHxgLXLutodKE=
|
||||
@ -202,14 +202,14 @@ go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC
|
||||
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
||||
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
||||
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
|
||||
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
|
||||
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
|
||||
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
|
||||
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
|
||||
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
|
||||
golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE=
|
||||
golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -222,23 +222,23 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
|
||||
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
|
||||
golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
|
||||
golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
|
||||
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
||||
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4=
|
||||
golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.220.0 h1:3oMI4gdBgB72WFVwE1nerDD8W3HUOS4kypK6rRLbGns=
|
||||
google.golang.org/api v0.220.0/go.mod h1:26ZAlY6aN/8WgpCzjPNy18QpYaz7Zgg1h0qe1GkZEmY=
|
||||
google.golang.org/api v0.221.0 h1:qzaJfLhDsbMeFee8zBRdt/Nc+xmOuafD/dbdgGfutOU=
|
||||
google.golang.org/api v0.221.0/go.mod h1:7sOU2+TL4TxUTdbi0gWgAIg7tH5qBXxoyhtL+9x3biQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250127172529-29210b9bc287 h1:J1H9f+LEdWAfHcez/4cvaVBox7cOYT+IU6rgqj5x++8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250127172529-29210b9bc287/go.mod h1:8BS3B93F/U1juMFq9+EDk+qOT5CO1R9IzXxG3PTqiRk=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6 h1:2duwAxN2+k0xLNpjnHTXoMUgnv6VPSp5fiqTuwSxjmI=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6/go.mod h1:8BS3B93F/U1juMFq9+EDk+qOT5CO1R9IzXxG3PTqiRk=
|
||||
google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ=
|
||||
google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw=
|
||||
google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM=
|
||||
google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
26
package.json
26
package.json
@ -7,7 +7,7 @@
|
||||
"productName": "Wave",
|
||||
"description": "Open-Source AI-Native Terminal Built for Seamless Workflows",
|
||||
"license": "Apache-2.0",
|
||||
"version": "0.11.0",
|
||||
"version": "0.11.1-beta.3",
|
||||
"homepage": "https://waveterm.dev",
|
||||
"build": {
|
||||
"appId": "dev.commandline.waveterm"
|
||||
@ -33,15 +33,15 @@
|
||||
"@chromatic-com/storybook": "^3.2.4",
|
||||
"@eslint/js": "^9.20.0",
|
||||
"@rollup/plugin-node-resolve": "^16.0.0",
|
||||
"@storybook/addon-essentials": "^8.5.6",
|
||||
"@storybook/addon-interactions": "^8.5.6",
|
||||
"@storybook/addon-links": "^8.5.6",
|
||||
"@storybook/blocks": "^8.5.6",
|
||||
"@storybook/builder-vite": "^8.5.6",
|
||||
"@storybook/react": "^8.5.6",
|
||||
"@storybook/react-vite": "^8.5.6",
|
||||
"@storybook/test": "^8.5.6",
|
||||
"@storybook/theming": "^8.5.6",
|
||||
"@storybook/addon-essentials": "^8.5.8",
|
||||
"@storybook/addon-interactions": "^8.5.8",
|
||||
"@storybook/addon-links": "^8.5.8",
|
||||
"@storybook/blocks": "^8.5.8",
|
||||
"@storybook/builder-vite": "^8.5.8",
|
||||
"@storybook/react": "^8.5.8",
|
||||
"@storybook/react-vite": "^8.5.8",
|
||||
"@storybook/test": "^8.5.8",
|
||||
"@storybook/theming": "^8.5.8",
|
||||
"@tailwindcss/vite": "^4.0.6",
|
||||
"@types/color": "^4.2.0",
|
||||
"@types/css-tree": "^2",
|
||||
@ -63,7 +63,7 @@
|
||||
"@vitejs/plugin-react-swc": "^3.8.0",
|
||||
"@vitest/coverage-istanbul": "^3.0.5",
|
||||
"electron": "^34.0.2",
|
||||
"electron-builder": "^25.1.8",
|
||||
"electron-builder": "^26.0",
|
||||
"electron-vite": "^2.3.0",
|
||||
"eslint": "^9.20.1",
|
||||
"eslint-config-prettier": "^10.0.1",
|
||||
@ -74,7 +74,7 @@
|
||||
"rollup-plugin-flow": "^1.1.1",
|
||||
"sass": "^1.84.0",
|
||||
"semver": "^7.7.1",
|
||||
"storybook": "^8.5.6",
|
||||
"storybook": "^8.5.8",
|
||||
"storybook-dark-mode": "^4.0.2",
|
||||
"tailwindcss": "^4.0.6",
|
||||
"tailwindcss-animate": "^1.0.7",
|
||||
@ -115,7 +115,7 @@
|
||||
"css-tree": "^3.1.0",
|
||||
"dayjs": "^1.11.13",
|
||||
"debug": "^4.4.0",
|
||||
"electron-updater": "6.3.9",
|
||||
"electron-updater": "^6.6",
|
||||
"env-paths": "^3.0.0",
|
||||
"fast-average-color": "^9.4.0",
|
||||
"htl": "^0.3.1",
|
||||
|
@ -17,9 +17,9 @@ import (
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
|
||||
"github.com/aws/smithy-go"
|
||||
"github.com/wavetermdev/waveterm/pkg/waveobj"
|
||||
"github.com/wavetermdev/waveterm/pkg/wconfig"
|
||||
"gopkg.in/ini.v1"
|
||||
)
|
||||
|
||||
@ -43,25 +43,27 @@ func GetConfig(ctx context.Context, profile string) (*aws.Config, error) {
|
||||
return nil, fmt.Errorf("invalid connection string: %s)", profile)
|
||||
}
|
||||
profile = connMatch[1]
|
||||
log.Printf("GetConfig: profile=%s", profile)
|
||||
profiles, cerrs := wconfig.ReadWaveHomeConfigFile(wconfig.ProfilesFile)
|
||||
if len(cerrs) > 0 {
|
||||
return nil, fmt.Errorf("error reading config file: %v", cerrs[0])
|
||||
}
|
||||
if profiles[profile] != nil {
|
||||
configfilepath, _ := getTempFileFromConfig(profiles, ProfileConfigKey, profile)
|
||||
credentialsfilepath, _ := getTempFileFromConfig(profiles, ProfileCredentialsKey, profile)
|
||||
if configfilepath != "" {
|
||||
log.Printf("configfilepath: %s", configfilepath)
|
||||
optfns = append(optfns, config.WithSharedConfigFiles([]string{configfilepath}))
|
||||
tempfiles[profile+"_config"] = configfilepath
|
||||
}
|
||||
if credentialsfilepath != "" {
|
||||
log.Printf("credentialsfilepath: %s", credentialsfilepath)
|
||||
optfns = append(optfns, config.WithSharedCredentialsFiles([]string{credentialsfilepath}))
|
||||
tempfiles[profile+"_credentials"] = credentialsfilepath
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Reimplement generic profile support
|
||||
// profiles, cerrs := wconfig.ReadWaveHomeConfigFile(wconfig.ProfilesFile)
|
||||
// if len(cerrs) > 0 {
|
||||
// return nil, fmt.Errorf("error reading config file: %v", cerrs[0])
|
||||
// }
|
||||
// if profiles[profile] != nil {
|
||||
// configfilepath, _ := getTempFileFromConfig(profiles, ProfileConfigKey, profile)
|
||||
// credentialsfilepath, _ := getTempFileFromConfig(profiles, ProfileCredentialsKey, profile)
|
||||
// if configfilepath != "" {
|
||||
// log.Printf("configfilepath: %s", configfilepath)
|
||||
// optfns = append(optfns, config.WithSharedConfigFiles([]string{configfilepath}))
|
||||
// tempfiles[profile+"_config"] = configfilepath
|
||||
// }
|
||||
// if credentialsfilepath != "" {
|
||||
// log.Printf("credentialsfilepath: %s", credentialsfilepath)
|
||||
// optfns = append(optfns, config.WithSharedCredentialsFiles([]string{credentialsfilepath}))
|
||||
// tempfiles[profile+"_credentials"] = credentialsfilepath
|
||||
// }
|
||||
// }
|
||||
optfns = append(optfns, config.WithRegion("us-west-2"))
|
||||
trimmedProfile := strings.TrimPrefix(profile, ProfilePrefix)
|
||||
optfns = append(optfns, config.WithSharedConfigProfile(trimmedProfile))
|
||||
}
|
||||
@ -93,17 +95,18 @@ func getTempFileFromConfig(config waveobj.MetaMapType, key string, profile strin
|
||||
|
||||
func ParseProfiles() map[string]struct{} {
|
||||
profiles := make(map[string]struct{})
|
||||
fname := config.DefaultSharedConfigFilename() // Get aws.config default shared configuration file name
|
||||
f, err := ini.Load(fname) // Load ini file
|
||||
fname := config.DefaultSharedConfigFilename()
|
||||
errs := []error{}
|
||||
f, err := ini.Load(fname) // Load ini file
|
||||
if err != nil {
|
||||
log.Printf("error reading aws config file: %v", err)
|
||||
return nil
|
||||
}
|
||||
for _, v := range f.Sections() {
|
||||
if len(v.Keys()) != 0 { // Get only the sections having Keys
|
||||
parts := strings.Split(v.Name(), " ")
|
||||
if len(parts) == 2 && parts[0] == "profile" { // skip default
|
||||
profiles[ProfilePrefix+parts[1]] = struct{}{}
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
for _, v := range f.Sections() {
|
||||
if len(v.Keys()) != 0 { // Get only the sections having Keys
|
||||
parts := strings.Split(v.Name(), " ")
|
||||
if len(parts) == 2 && parts[0] == "profile" { // skip default
|
||||
profiles[ProfilePrefix+parts[1]] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -111,26 +114,40 @@ func ParseProfiles() map[string]struct{} {
|
||||
fname = config.DefaultSharedCredentialsFilename()
|
||||
f, err = ini.Load(fname)
|
||||
if err != nil {
|
||||
log.Printf("error reading aws credentials file: %v", err)
|
||||
if profiles == nil {
|
||||
profiles = make(map[string]struct{})
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
for _, v := range f.Sections() {
|
||||
profiles[ProfilePrefix+v.Name()] = struct{}{}
|
||||
}
|
||||
return profiles
|
||||
}
|
||||
for _, v := range f.Sections() {
|
||||
profiles[ProfilePrefix+v.Name()] = struct{}{}
|
||||
if len(errs) > 0 {
|
||||
log.Printf("error reading aws config/credentials file: %v", errs)
|
||||
}
|
||||
return profiles
|
||||
}
|
||||
|
||||
func ListBuckets(ctx context.Context, client *s3.Client) ([]types.Bucket, error) {
|
||||
output, err := client.ListBuckets(ctx, &s3.ListBucketsInput{})
|
||||
if err != nil {
|
||||
var apiErr smithy.APIError
|
||||
if errors.As(err, &apiErr) {
|
||||
return nil, fmt.Errorf("error listing buckets: %v", apiErr)
|
||||
var err error
|
||||
var output *s3.ListBucketsOutput
|
||||
var buckets []types.Bucket
|
||||
bucketPaginator := s3.NewListBucketsPaginator(client, &s3.ListBucketsInput{})
|
||||
for bucketPaginator.HasMorePages() {
|
||||
output, err = bucketPaginator.NextPage(ctx)
|
||||
if err != nil {
|
||||
CheckAccessDeniedErr(&err)
|
||||
return nil, fmt.Errorf("error listing buckets: %v", err)
|
||||
} else {
|
||||
buckets = append(buckets, output.Buckets...)
|
||||
}
|
||||
return nil, fmt.Errorf("error listing buckets: %v", err)
|
||||
}
|
||||
return output.Buckets, nil
|
||||
return buckets, nil
|
||||
}
|
||||
|
||||
func CheckAccessDeniedErr(err *error) bool {
|
||||
var apiErr smithy.APIError
|
||||
if err != nil && errors.As(*err, &apiErr) && apiErr.ErrorCode() == "AccessDenied" {
|
||||
*err = apiErr
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -47,6 +47,9 @@ func (c *Connection) GetPathWithHost() string {
|
||||
if c.Host == "" {
|
||||
return ""
|
||||
}
|
||||
if c.Path == "" {
|
||||
return c.Host
|
||||
}
|
||||
if strings.HasPrefix(c.Path, "/") {
|
||||
return c.Host + c.Path
|
||||
}
|
||||
@ -91,12 +94,12 @@ func GetConnNameFromContext(ctx context.Context) (string, error) {
|
||||
|
||||
// ParseURI parses a connection URI and returns the connection type, host/path, and parameters.
|
||||
func ParseURI(uri string) (*Connection, error) {
|
||||
split := strings.SplitN(uri, "//", 2)
|
||||
split := strings.SplitN(uri, "://", 2)
|
||||
var scheme string
|
||||
var rest string
|
||||
if len(split) > 1 {
|
||||
scheme = strings.TrimSuffix(split[0], ":")
|
||||
rest = split[1]
|
||||
scheme = split[0]
|
||||
rest = strings.TrimPrefix(split[1], "//")
|
||||
} else {
|
||||
rest = split[0]
|
||||
}
|
||||
@ -107,16 +110,13 @@ func ParseURI(uri string) (*Connection, error) {
|
||||
parseGenericPath := func() {
|
||||
split = strings.SplitN(rest, "/", 2)
|
||||
host = split[0]
|
||||
if len(split) > 1 {
|
||||
if len(split) > 1 && split[1] != "" {
|
||||
remotePath = split[1]
|
||||
} else if strings.HasSuffix(rest, "/") {
|
||||
// preserve trailing slash
|
||||
remotePath = "/"
|
||||
} else {
|
||||
split = strings.SplitN(rest, "/", 2)
|
||||
host = split[0]
|
||||
if len(split) > 1 {
|
||||
remotePath = split[1]
|
||||
} else {
|
||||
remotePath = "/"
|
||||
}
|
||||
remotePath = ""
|
||||
}
|
||||
}
|
||||
parseWshPath := func() {
|
||||
|
@ -17,20 +17,20 @@ func TestParseURI_WSHWithScheme(t *testing.T) {
|
||||
}
|
||||
expected := "/path/to/file"
|
||||
if c.Path != expected {
|
||||
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
|
||||
t.Fatalf("expected path to be \"%q\", got \"%q\"", expected, c.Path)
|
||||
}
|
||||
expected = "user@localhost:8080"
|
||||
if c.Host != expected {
|
||||
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
|
||||
t.Fatalf("expected host to be \"%q\", got \"%q\"", expected, c.Host)
|
||||
}
|
||||
expected = "user@localhost:8080/path/to/file"
|
||||
pathWithHost := c.GetPathWithHost()
|
||||
if pathWithHost != expected {
|
||||
t.Fatalf("expected path with host to be %q, got %q", expected, pathWithHost)
|
||||
t.Fatalf("expected path with host to be \"%q\", got \"%q\"", expected, pathWithHost)
|
||||
}
|
||||
expected = "wsh"
|
||||
if c.Scheme != expected {
|
||||
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
|
||||
t.Fatalf("expected scheme to be \"%q\", got \"%q\"", expected, c.Scheme)
|
||||
}
|
||||
if len(c.GetSchemeParts()) != 1 {
|
||||
t.Fatalf("expected scheme parts to be 1, got %d", len(c.GetSchemeParts()))
|
||||
@ -44,27 +44,27 @@ func TestParseURI_WSHWithScheme(t *testing.T) {
|
||||
}
|
||||
expected = "/path/to/file"
|
||||
if c.Path != expected {
|
||||
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
|
||||
t.Fatalf("expected path to be \"%q\", got \"%q\"", expected, c.Path)
|
||||
}
|
||||
expected = "user@192.168.0.1:22"
|
||||
if c.Host != expected {
|
||||
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
|
||||
t.Fatalf("expected host to be \"%q\", got \"%q\"", expected, c.Host)
|
||||
}
|
||||
expected = "user@192.168.0.1:22/path/to/file"
|
||||
pathWithHost = c.GetPathWithHost()
|
||||
if pathWithHost != expected {
|
||||
t.Fatalf("expected path with host to be %q, got %q", expected, pathWithHost)
|
||||
t.Fatalf("expected path with host to be \"%q\", got \"%q\"", expected, pathWithHost)
|
||||
}
|
||||
expected = "wsh"
|
||||
if c.GetType() != expected {
|
||||
t.Fatalf("expected conn type to be %q, got %q", expected, c.Scheme)
|
||||
t.Fatalf("expected conn type to be \"%q\", got \"%q\"", expected, c.Scheme)
|
||||
}
|
||||
if len(c.GetSchemeParts()) != 1 {
|
||||
t.Fatalf("expected scheme parts to be 1, got %d", len(c.GetSchemeParts()))
|
||||
}
|
||||
got := c.GetFullURI()
|
||||
if got != cstr {
|
||||
t.Fatalf("expected full URI to be %q, got %q", cstr, got)
|
||||
t.Fatalf("expected full URI to be \"%q\", got \"%q\"", cstr, got)
|
||||
}
|
||||
}
|
||||
|
||||
@ -77,20 +77,20 @@ func TestParseURI_WSHRemoteShorthand(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse URI: %v", err)
|
||||
}
|
||||
expected := "/path/to/file"
|
||||
expected := "path/to/file"
|
||||
if c.Path != expected {
|
||||
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
|
||||
t.Fatalf("expected path to be \"%q\", got \"%q\"", expected, c.Path)
|
||||
}
|
||||
if c.Host != "conn" {
|
||||
t.Fatalf("expected host to be empty, got %q", c.Host)
|
||||
t.Fatalf("expected host to be empty, got \"%q\"", c.Host)
|
||||
}
|
||||
expected = "wsh"
|
||||
if c.Scheme != expected {
|
||||
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
|
||||
t.Fatalf("expected scheme to be \"%q\", got \"%q\"", expected, c.Scheme)
|
||||
}
|
||||
expected = "wsh://conn/path/to/file"
|
||||
if c.GetFullURI() != expected {
|
||||
t.Fatalf("expected full URI to be %q, got %q", expected, c.GetFullURI())
|
||||
t.Fatalf("expected full URI to be \"%q\", got \"%q\"", expected, c.GetFullURI())
|
||||
}
|
||||
|
||||
// Test with a complex remote path
|
||||
@ -99,21 +99,21 @@ func TestParseURI_WSHRemoteShorthand(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse URI: %v", err)
|
||||
}
|
||||
expected = "/path/to/file"
|
||||
expected = "path/to/file"
|
||||
if c.Path != expected {
|
||||
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
|
||||
t.Fatalf("expected path to be \"%q\", got \"%q\"", expected, c.Path)
|
||||
}
|
||||
expected = "user@localhost:8080"
|
||||
if c.Host != expected {
|
||||
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
|
||||
t.Fatalf("expected host to be \"%q\", got \"%q\"", expected, c.Host)
|
||||
}
|
||||
expected = "wsh"
|
||||
if c.Scheme != expected {
|
||||
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
|
||||
t.Fatalf("expected scheme to be \"%q\", got \"%q\"", expected, c.Scheme)
|
||||
}
|
||||
expected = "wsh://user@localhost:8080/path/to/file"
|
||||
if c.GetFullURI() != expected {
|
||||
t.Fatalf("expected full URI to be %q, got %q", expected, c.GetFullURI())
|
||||
t.Fatalf("expected full URI to be \"%q\", got \"%q\"", expected, c.GetFullURI())
|
||||
}
|
||||
|
||||
// Test with an IP address
|
||||
@ -122,21 +122,21 @@ func TestParseURI_WSHRemoteShorthand(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse URI: %v", err)
|
||||
}
|
||||
expected = "/path/to/file"
|
||||
expected = "path/to/file"
|
||||
if c.Path != expected {
|
||||
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
|
||||
t.Fatalf("expected path to be \"%q\", got \"%q\"", expected, c.Path)
|
||||
}
|
||||
expected = "user@192.168.0.1:8080"
|
||||
if c.Host != expected {
|
||||
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
|
||||
t.Fatalf("expected host to be \"%q\", got \"%q\"", expected, c.Host)
|
||||
}
|
||||
expected = "wsh"
|
||||
if c.Scheme != expected {
|
||||
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
|
||||
t.Fatalf("expected scheme to be \"%q\", got \"%q\"", expected, c.Scheme)
|
||||
}
|
||||
expected = "wsh://user@192.168.0.1:8080/path/to/file"
|
||||
if c.GetFullURI() != expected {
|
||||
t.Fatalf("expected full URI to be %q, got %q", expected, c.GetFullURI())
|
||||
t.Fatalf("expected full URI to be \"%q\", got \"%q\"", expected, c.GetFullURI())
|
||||
}
|
||||
}
|
||||
|
||||
@ -151,19 +151,19 @@ func TestParseURI_WSHCurrentPathShorthand(t *testing.T) {
|
||||
}
|
||||
expected := "~/path/to/file"
|
||||
if c.Path != expected {
|
||||
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
|
||||
t.Fatalf("expected path to be \"%q\", got \"%q\"", expected, c.Path)
|
||||
}
|
||||
expected = "current"
|
||||
if c.Host != expected {
|
||||
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
|
||||
t.Fatalf("expected host to be \"%q\", got \"%q\"", expected, c.Host)
|
||||
}
|
||||
expected = "wsh"
|
||||
if c.Scheme != expected {
|
||||
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
|
||||
t.Fatalf("expected scheme to be \"%q\", got \"%q\"", expected, c.Scheme)
|
||||
}
|
||||
expected = "wsh://current/~/path/to/file"
|
||||
if c.GetFullURI() != expected {
|
||||
t.Fatalf("expected full URI to be %q, got %q", expected, c.GetFullURI())
|
||||
t.Fatalf("expected full URI to be \"%q\", got \"%q\"", expected, c.GetFullURI())
|
||||
}
|
||||
|
||||
// Test with a absolute path
|
||||
@ -174,19 +174,19 @@ func TestParseURI_WSHCurrentPathShorthand(t *testing.T) {
|
||||
}
|
||||
expected = "/path/to/file"
|
||||
if c.Path != expected {
|
||||
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
|
||||
t.Fatalf("expected path to be \"%q\", got \"%q\"", expected, c.Path)
|
||||
}
|
||||
expected = "current"
|
||||
if c.Host != expected {
|
||||
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
|
||||
t.Fatalf("expected host to be \"%q\", got \"%q\"", expected, c.Host)
|
||||
}
|
||||
expected = "wsh"
|
||||
if c.Scheme != expected {
|
||||
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
|
||||
t.Fatalf("expected scheme to be \"%q\", got \"%q\"", expected, c.Scheme)
|
||||
}
|
||||
expected = "wsh://current/path/to/file"
|
||||
if c.GetFullURI() != expected {
|
||||
t.Fatalf("expected full URI to be %q, got %q", expected, c.GetFullURI())
|
||||
t.Fatalf("expected full URI to be \"%q\", got \"%q\"", expected, c.GetFullURI())
|
||||
}
|
||||
}
|
||||
|
||||
@ -198,19 +198,19 @@ func TestParseURI_WSHCurrentPath(t *testing.T) {
|
||||
}
|
||||
expected := "./Documents/path/to/file"
|
||||
if c.Path != expected {
|
||||
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
|
||||
t.Fatalf("expected path to be \"%q\", got \"%q\"", expected, c.Path)
|
||||
}
|
||||
expected = "current"
|
||||
if c.Host != expected {
|
||||
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
|
||||
t.Fatalf("expected host to be \"%q\", got \"%q\"", expected, c.Host)
|
||||
}
|
||||
expected = "wsh"
|
||||
if c.Scheme != expected {
|
||||
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
|
||||
t.Fatalf("expected scheme to be \"%q\", got \"%q\"", expected, c.Scheme)
|
||||
}
|
||||
expected = "wsh://current/./Documents/path/to/file"
|
||||
if c.GetFullURI() != expected {
|
||||
t.Fatalf("expected full URI to be %q, got %q", expected, c.GetFullURI())
|
||||
t.Fatalf("expected full URI to be \"%q\", got \"%q\"", expected, c.GetFullURI())
|
||||
}
|
||||
|
||||
cstr = "path/to/file"
|
||||
@ -266,19 +266,19 @@ func TestParseURI_WSHCurrentPathWindows(t *testing.T) {
|
||||
}
|
||||
expected := ".\\Documents\\path\\to\\file"
|
||||
if c.Path != expected {
|
||||
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
|
||||
t.Fatalf("expected path to be \"%q\", got \"%q\"", expected, c.Path)
|
||||
}
|
||||
expected = "current"
|
||||
if c.Host != expected {
|
||||
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
|
||||
t.Fatalf("expected host to be \"%q\", got \"%q\"", expected, c.Host)
|
||||
}
|
||||
expected = "wsh"
|
||||
if c.Scheme != expected {
|
||||
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
|
||||
t.Fatalf("expected scheme to be \"%q\", got \"%q\"", expected, c.Scheme)
|
||||
}
|
||||
expected = "wsh://current/.\\Documents\\path\\to\\file"
|
||||
if c.GetFullURI() != expected {
|
||||
t.Fatalf("expected full URI to be %q, got %q", expected, c.GetFullURI())
|
||||
t.Fatalf("expected full URI to be \"%q\", got \"%q\"", expected, c.GetFullURI())
|
||||
}
|
||||
}
|
||||
|
||||
@ -291,14 +291,14 @@ func TestParseURI_WSHLocalShorthand(t *testing.T) {
|
||||
}
|
||||
expected := "~/path/to/file"
|
||||
if c.Path != expected {
|
||||
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
|
||||
t.Fatalf("expected path to be \"%q\", got \"%q\"", expected, c.Path)
|
||||
}
|
||||
if c.Host != "local" {
|
||||
t.Fatalf("expected host to be empty, got %q", c.Host)
|
||||
t.Fatalf("expected host to be empty, got \"%q\"", c.Host)
|
||||
}
|
||||
expected = "wsh"
|
||||
if c.Scheme != expected {
|
||||
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
|
||||
t.Fatalf("expected scheme to be \"%q\", got \"%q\"", expected, c.Scheme)
|
||||
}
|
||||
|
||||
cstr = "wsh:///~/path/to/file"
|
||||
@ -308,18 +308,18 @@ func TestParseURI_WSHLocalShorthand(t *testing.T) {
|
||||
}
|
||||
expected = "~/path/to/file"
|
||||
if c.Path != expected {
|
||||
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
|
||||
t.Fatalf("expected path to be \"%q\", got \"%q\"", expected, c.Path)
|
||||
}
|
||||
if c.Host != "local" {
|
||||
t.Fatalf("expected host to be empty, got %q", c.Host)
|
||||
t.Fatalf("expected host to be empty, got \"%q\"", c.Host)
|
||||
}
|
||||
expected = "wsh"
|
||||
if c.Scheme != expected {
|
||||
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
|
||||
t.Fatalf("expected scheme to be \"%q\", got \"%q\"", expected, c.Scheme)
|
||||
}
|
||||
expected = "wsh://local/~/path/to/file"
|
||||
if c.GetFullURI() != expected {
|
||||
t.Fatalf("expected full URI to be %q, got %q", expected, c.GetFullURI())
|
||||
t.Fatalf("expected full URI to be \"%q\", got \"%q\"", expected, c.GetFullURI())
|
||||
}
|
||||
}
|
||||
|
||||
@ -334,19 +334,19 @@ func TestParseURI_WSHWSL(t *testing.T) {
|
||||
}
|
||||
expected := "/path/to/file"
|
||||
if c.Path != expected {
|
||||
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
|
||||
t.Fatalf("expected path to be \"%q\", got \"%q\"", expected, c.Path)
|
||||
}
|
||||
expected = "wsl://Ubuntu"
|
||||
if c.Host != expected {
|
||||
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
|
||||
t.Fatalf("expected host to be \"%q\", got \"%q\"", expected, c.Host)
|
||||
}
|
||||
expected = "wsh"
|
||||
if c.Scheme != expected {
|
||||
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
|
||||
t.Fatalf("expected scheme to be \"%q\", got \"%q\"", expected, c.Scheme)
|
||||
}
|
||||
expected = "wsh://wsl://Ubuntu/path/to/file"
|
||||
if expected != c.GetFullURI() {
|
||||
t.Fatalf("expected full URI to be %q, got %q", expected, c.GetFullURI())
|
||||
t.Fatalf("expected full URI to be \"%q\", got \"%q\"", expected, c.GetFullURI())
|
||||
}
|
||||
}
|
||||
t.Log("Testing with scheme")
|
||||
@ -368,19 +368,19 @@ func TestParseUri_LocalWindowsAbsPath(t *testing.T) {
|
||||
}
|
||||
expected := "C:\\path\\to\\file"
|
||||
if c.Path != expected {
|
||||
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
|
||||
t.Fatalf("expected path to be \"%q\", got \"%q\"", expected, c.Path)
|
||||
}
|
||||
expected = "local"
|
||||
if c.Host != expected {
|
||||
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
|
||||
t.Fatalf("expected host to be \"%q\", got \"%q\"", expected, c.Host)
|
||||
}
|
||||
expected = "wsh"
|
||||
if c.Scheme != expected {
|
||||
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
|
||||
t.Fatalf("expected scheme to be \"%q\", got \"%q\"", expected, c.Scheme)
|
||||
}
|
||||
expected = "wsh://local/C:\\path\\to\\file"
|
||||
if c.GetFullURI() != expected {
|
||||
t.Fatalf("expected full URI to be %q, got %q", expected, c.GetFullURI())
|
||||
t.Fatalf("expected full URI to be \"%q\", got \"%q\"", expected, c.GetFullURI())
|
||||
}
|
||||
}
|
||||
|
||||
@ -399,19 +399,19 @@ func TestParseURI_LocalWindowsRelativeShorthand(t *testing.T) {
|
||||
}
|
||||
expected := "~\\path\\to\\file"
|
||||
if c.Path != expected {
|
||||
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
|
||||
t.Fatalf("expected path to be \"%q\", got \"%q\"", expected, c.Path)
|
||||
}
|
||||
expected = "local"
|
||||
if c.Host != expected {
|
||||
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
|
||||
t.Fatalf("expected host to be \"%q\", got \"%q\"", expected, c.Host)
|
||||
}
|
||||
expected = "wsh"
|
||||
if c.Scheme != expected {
|
||||
t.Fatalf("expected scheme to be %q, got %q", expected, c.Scheme)
|
||||
t.Fatalf("expected scheme to be \"%q\", got \"%q\"", expected, c.Scheme)
|
||||
}
|
||||
expected = "wsh://local/~\\path\\to\\file"
|
||||
if c.GetFullURI() != expected {
|
||||
t.Fatalf("expected full URI to be %q, got %q", expected, c.GetFullURI())
|
||||
t.Fatalf("expected full URI to be \"%q\", got \"%q\"", expected, c.GetFullURI())
|
||||
}
|
||||
}
|
||||
|
||||
@ -424,22 +424,60 @@ func TestParseURI_BasicS3(t *testing.T) {
|
||||
}
|
||||
expected := "path/to/file"
|
||||
if c.Path != expected {
|
||||
t.Fatalf("expected path to be %q, got %q", expected, c.Path)
|
||||
t.Fatalf("expected path to be \"%q\", got \"%q\"", expected, c.Path)
|
||||
}
|
||||
expected = "bucket"
|
||||
if c.Host != expected {
|
||||
t.Fatalf("expected host to be %q, got %q", expected, c.Host)
|
||||
t.Fatalf("expected host to be \"%q\", got \"%q\"", expected, c.Host)
|
||||
}
|
||||
expected = "bucket/path/to/file"
|
||||
pathWithHost := c.GetPathWithHost()
|
||||
if pathWithHost != expected {
|
||||
t.Fatalf("expected path with host to be %q, got %q", expected, pathWithHost)
|
||||
t.Fatalf("expected path with host to be \"%q\", got \"%q\"", expected, pathWithHost)
|
||||
}
|
||||
expected = "s3"
|
||||
if c.GetType() != expected {
|
||||
t.Fatalf("expected conn type to be %q, got %q", expected, c.GetType())
|
||||
t.Fatalf("expected conn type to be \"%q\", got \"%q\"", expected, c.GetType())
|
||||
}
|
||||
if len(c.GetSchemeParts()) != 2 {
|
||||
t.Fatalf("expected scheme parts to be 2, got %d", len(c.GetSchemeParts()))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseURI_S3BucketOnly(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testUri := func(cstr string, pathExpected string, pathWithHostExpected string) {
|
||||
c, err := connparse.ParseURI(cstr)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse URI: %v", err)
|
||||
}
|
||||
if c.Path != pathExpected {
|
||||
t.Fatalf("expected path to be \"%q\", got \"%q\"", pathExpected, c.Path)
|
||||
}
|
||||
expected := "bucket"
|
||||
if c.Host != expected {
|
||||
t.Fatalf("expected host to be \"%q\", got \"%q\"", expected, c.Host)
|
||||
}
|
||||
pathWithHost := c.GetPathWithHost()
|
||||
if pathWithHost != pathWithHostExpected {
|
||||
t.Fatalf("expected path with host to be \"%q\", got \"%q\"", expected, pathWithHost)
|
||||
}
|
||||
expected = "s3"
|
||||
if c.GetType() != expected {
|
||||
t.Fatalf("expected conn type to be \"%q\", got \"%q\"", expected, c.GetType())
|
||||
}
|
||||
if len(c.GetSchemeParts()) != 2 {
|
||||
t.Fatalf("expected scheme parts to be 2, got %d", len(c.GetSchemeParts()))
|
||||
}
|
||||
fullUri := c.GetFullURI()
|
||||
if fullUri != cstr {
|
||||
t.Fatalf("expected full URI to be \"%q\", got \"%q\"", cstr, fullUri)
|
||||
}
|
||||
}
|
||||
|
||||
t.Log("Testing with no trailing slash")
|
||||
testUri("profile:s3://bucket", "", "bucket")
|
||||
t.Log("Testing with trailing slash")
|
||||
testUri("profile:s3://bucket/", "/", "bucket/")
|
||||
}
|
||||
|
@ -5,8 +5,10 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/awsconn"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fstype"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/s3fs"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/wavefs"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/wshfs"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes"
|
||||
@ -28,12 +30,12 @@ func CreateFileShareClient(ctx context.Context, connection string) (fstype.FileS
|
||||
}
|
||||
conntype := conn.GetType()
|
||||
if conntype == connparse.ConnectionTypeS3 {
|
||||
// config, err := awsconn.GetConfig(ctx, connection)
|
||||
// if err != nil {
|
||||
// log.Printf("error getting aws config: %v", err)
|
||||
// return nil, nil
|
||||
// }
|
||||
return nil, nil
|
||||
config, err := awsconn.GetConfig(ctx, connection)
|
||||
if err != nil {
|
||||
log.Printf("error getting aws config: %v", err)
|
||||
return nil, nil
|
||||
}
|
||||
return s3fs.NewS3Client(config), conn
|
||||
} else if conntype == connparse.ConnectionTypeWave {
|
||||
return wavefs.NewWaveClient(), conn
|
||||
} else if conntype == connparse.ConnectionTypeWsh {
|
||||
@ -45,6 +47,7 @@ func CreateFileShareClient(ctx context.Context, connection string) (fstype.FileS
|
||||
}
|
||||
|
||||
func Read(ctx context.Context, data wshrpc.FileData) (*wshrpc.FileData, error) {
|
||||
log.Printf("Read: %v", data.Info.Path)
|
||||
client, conn := CreateFileShareClient(ctx, data.Info.Path)
|
||||
if conn == nil || client == nil {
|
||||
return nil, fmt.Errorf(ErrorParsingConnection, data.Info.Path)
|
||||
@ -53,6 +56,7 @@ func Read(ctx context.Context, data wshrpc.FileData) (*wshrpc.FileData, error) {
|
||||
}
|
||||
|
||||
func ReadStream(ctx context.Context, data wshrpc.FileData) <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData] {
|
||||
log.Printf("ReadStream: %v", data.Info.Path)
|
||||
client, conn := CreateFileShareClient(ctx, data.Info.Path)
|
||||
if conn == nil || client == nil {
|
||||
return wshutil.SendErrCh[wshrpc.FileData](fmt.Errorf(ErrorParsingConnection, data.Info.Path))
|
||||
@ -61,6 +65,7 @@ func ReadStream(ctx context.Context, data wshrpc.FileData) <-chan wshrpc.RespOrE
|
||||
}
|
||||
|
||||
func ReadTarStream(ctx context.Context, data wshrpc.CommandRemoteStreamTarData) <-chan wshrpc.RespOrErrorUnion[iochantypes.Packet] {
|
||||
log.Printf("ReadTarStream: %v", data.Path)
|
||||
client, conn := CreateFileShareClient(ctx, data.Path)
|
||||
if conn == nil || client == nil {
|
||||
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf(ErrorParsingConnection, data.Path))
|
||||
@ -69,6 +74,7 @@ func ReadTarStream(ctx context.Context, data wshrpc.CommandRemoteStreamTarData)
|
||||
}
|
||||
|
||||
func ListEntries(ctx context.Context, path string, opts *wshrpc.FileListOpts) ([]*wshrpc.FileInfo, error) {
|
||||
log.Printf("ListEntries: %v", path)
|
||||
client, conn := CreateFileShareClient(ctx, path)
|
||||
if conn == nil || client == nil {
|
||||
return nil, fmt.Errorf(ErrorParsingConnection, path)
|
||||
@ -77,6 +83,7 @@ func ListEntries(ctx context.Context, path string, opts *wshrpc.FileListOpts) ([
|
||||
}
|
||||
|
||||
func ListEntriesStream(ctx context.Context, path string, opts *wshrpc.FileListOpts) <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
|
||||
log.Printf("ListEntriesStream: %v", path)
|
||||
client, conn := CreateFileShareClient(ctx, path)
|
||||
if conn == nil || client == nil {
|
||||
return wshutil.SendErrCh[wshrpc.CommandRemoteListEntriesRtnData](fmt.Errorf(ErrorParsingConnection, path))
|
||||
@ -85,6 +92,7 @@ func ListEntriesStream(ctx context.Context, path string, opts *wshrpc.FileListOp
|
||||
}
|
||||
|
||||
func Stat(ctx context.Context, path string) (*wshrpc.FileInfo, error) {
|
||||
log.Printf("Stat: %v", path)
|
||||
client, conn := CreateFileShareClient(ctx, path)
|
||||
if conn == nil || client == nil {
|
||||
return nil, fmt.Errorf(ErrorParsingConnection, path)
|
||||
@ -93,6 +101,7 @@ func Stat(ctx context.Context, path string) (*wshrpc.FileInfo, error) {
|
||||
}
|
||||
|
||||
func PutFile(ctx context.Context, data wshrpc.FileData) error {
|
||||
log.Printf("PutFile: %v", data.Info.Path)
|
||||
client, conn := CreateFileShareClient(ctx, data.Info.Path)
|
||||
if conn == nil || client == nil {
|
||||
return fmt.Errorf(ErrorParsingConnection, data.Info.Path)
|
||||
@ -101,6 +110,7 @@ func PutFile(ctx context.Context, data wshrpc.FileData) error {
|
||||
}
|
||||
|
||||
func Mkdir(ctx context.Context, path string) error {
|
||||
log.Printf("Mkdir: %v", path)
|
||||
client, conn := CreateFileShareClient(ctx, path)
|
||||
if conn == nil || client == nil {
|
||||
return fmt.Errorf(ErrorParsingConnection, path)
|
||||
@ -109,6 +119,11 @@ func Mkdir(ctx context.Context, path string) error {
|
||||
}
|
||||
|
||||
func Move(ctx context.Context, data wshrpc.CommandFileCopyData) error {
|
||||
opts := data.Opts
|
||||
if opts == nil {
|
||||
opts = &wshrpc.FileCopyOpts{}
|
||||
}
|
||||
log.Printf("Move: srcuri: %v, desturi: %v, opts: %v", data.SrcUri, data.DestUri, opts)
|
||||
srcClient, srcConn := CreateFileShareClient(ctx, data.SrcUri)
|
||||
if srcConn == nil || srcClient == nil {
|
||||
return fmt.Errorf("error creating fileshare client, could not parse source connection %s", data.SrcUri)
|
||||
@ -118,17 +133,23 @@ func Move(ctx context.Context, data wshrpc.CommandFileCopyData) error {
|
||||
return fmt.Errorf("error creating fileshare client, could not parse destination connection %s", data.DestUri)
|
||||
}
|
||||
if srcConn.Host != destConn.Host {
|
||||
err := destClient.CopyRemote(ctx, srcConn, destConn, srcClient, data.Opts)
|
||||
isDir, err := destClient.CopyRemote(ctx, srcConn, destConn, srcClient, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot copy %q to %q: %w", data.SrcUri, data.DestUri, err)
|
||||
}
|
||||
return srcClient.Delete(ctx, srcConn, data.Opts.Recursive)
|
||||
return srcClient.Delete(ctx, srcConn, opts.Recursive && isDir)
|
||||
} else {
|
||||
return srcClient.MoveInternal(ctx, srcConn, destConn, data.Opts)
|
||||
return srcClient.MoveInternal(ctx, srcConn, destConn, opts)
|
||||
}
|
||||
}
|
||||
|
||||
func Copy(ctx context.Context, data wshrpc.CommandFileCopyData) error {
|
||||
opts := data.Opts
|
||||
if opts == nil {
|
||||
opts = &wshrpc.FileCopyOpts{}
|
||||
}
|
||||
opts.Recursive = true
|
||||
log.Printf("Copy: srcuri: %v, desturi: %v, opts: %v", data.SrcUri, data.DestUri, opts)
|
||||
srcClient, srcConn := CreateFileShareClient(ctx, data.SrcUri)
|
||||
if srcConn == nil || srcClient == nil {
|
||||
return fmt.Errorf("error creating fileshare client, could not parse source connection %s", data.SrcUri)
|
||||
@ -138,13 +159,16 @@ func Copy(ctx context.Context, data wshrpc.CommandFileCopyData) error {
|
||||
return fmt.Errorf("error creating fileshare client, could not parse destination connection %s", data.DestUri)
|
||||
}
|
||||
if srcConn.Host != destConn.Host {
|
||||
return destClient.CopyRemote(ctx, srcConn, destConn, srcClient, data.Opts)
|
||||
_, err := destClient.CopyRemote(ctx, srcConn, destConn, srcClient, opts)
|
||||
return err
|
||||
} else {
|
||||
return srcClient.CopyInternal(ctx, srcConn, destConn, data.Opts)
|
||||
_, err := srcClient.CopyInternal(ctx, srcConn, destConn, opts)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func Delete(ctx context.Context, data wshrpc.CommandDeleteFileData) error {
|
||||
log.Printf("Delete: %v", data)
|
||||
client, conn := CreateFileShareClient(ctx, data.Path)
|
||||
if conn == nil || client == nil {
|
||||
return fmt.Errorf(ErrorParsingConnection, data.Path)
|
||||
@ -152,18 +176,29 @@ func Delete(ctx context.Context, data wshrpc.CommandDeleteFileData) error {
|
||||
return client.Delete(ctx, conn, data.Recursive)
|
||||
}
|
||||
|
||||
func Join(ctx context.Context, path string, parts ...string) (string, error) {
|
||||
func Join(ctx context.Context, path string, parts ...string) (*wshrpc.FileInfo, error) {
|
||||
log.Printf("Join: %v", path)
|
||||
client, conn := CreateFileShareClient(ctx, path)
|
||||
if conn == nil || client == nil {
|
||||
return "", fmt.Errorf(ErrorParsingConnection, path)
|
||||
return nil, fmt.Errorf(ErrorParsingConnection, path)
|
||||
}
|
||||
return client.Join(ctx, conn, parts...)
|
||||
}
|
||||
|
||||
func Append(ctx context.Context, data wshrpc.FileData) error {
|
||||
log.Printf("Append: %v", data.Info.Path)
|
||||
client, conn := CreateFileShareClient(ctx, data.Info.Path)
|
||||
if conn == nil || client == nil {
|
||||
return fmt.Errorf(ErrorParsingConnection, data.Info.Path)
|
||||
}
|
||||
return client.AppendFile(ctx, conn, data)
|
||||
}
|
||||
|
||||
func GetCapability(ctx context.Context, path string) (wshrpc.FileShareCapability, error) {
|
||||
log.Printf("GetCapability: %v", path)
|
||||
client, conn := CreateFileShareClient(ctx, path)
|
||||
if conn == nil || client == nil {
|
||||
return wshrpc.FileShareCapability{}, fmt.Errorf(ErrorParsingConnection, path)
|
||||
}
|
||||
return client.GetCapability(), nil
|
||||
}
|
||||
|
37
pkg/remote/fileshare/fspath/fspath.go
Normal file
37
pkg/remote/fileshare/fspath/fspath.go
Normal file
@ -0,0 +1,37 @@
|
||||
package fspath
|
||||
|
||||
import (
|
||||
pathpkg "path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// Separator is the path separator
|
||||
Separator = "/"
|
||||
)
|
||||
|
||||
func Dir(path string) string {
|
||||
return pathpkg.Dir(ToSlash(path))
|
||||
}
|
||||
|
||||
func Base(path string) string {
|
||||
return pathpkg.Base(ToSlash(path))
|
||||
}
|
||||
|
||||
func Join(elem ...string) string {
|
||||
joined := pathpkg.Join(elem...)
|
||||
return ToSlash(joined)
|
||||
}
|
||||
|
||||
// FirstLevelDir returns the first level directory of a path and a boolean indicating if the path has more than one level.
|
||||
func FirstLevelDir(path string) (string, bool) {
|
||||
if strings.Count(path, Separator) > 0 {
|
||||
path = strings.SplitN(path, Separator, 2)[0]
|
||||
return path, true
|
||||
}
|
||||
return path, false
|
||||
}
|
||||
|
||||
func ToSlash(path string) string {
|
||||
return strings.ReplaceAll(path, "\\", Separator)
|
||||
}
|
@ -5,12 +5,23 @@ package fstype
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes"
|
||||
"github.com/wavetermdev/waveterm/pkg/wshrpc"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultTimeout = 30 * time.Second
|
||||
FileMode os.FileMode = 0644
|
||||
DirMode os.FileMode = 0755 | os.ModeDir
|
||||
RecursiveRequiredError = "recursive flag must be set for directory operations"
|
||||
MergeRequiredError = "directory already exists at %q, set overwrite flag to delete the existing contents or set merge flag to merge the contents"
|
||||
OverwriteRequiredError = "file already exists at %q, set overwrite flag to delete the existing file"
|
||||
)
|
||||
|
||||
type FileShareClient interface {
|
||||
// Stat returns the file info at the given parsed connection path
|
||||
Stat(ctx context.Context, conn *connparse.Connection) (*wshrpc.FileInfo, error)
|
||||
@ -32,14 +43,16 @@ type FileShareClient interface {
|
||||
Mkdir(ctx context.Context, conn *connparse.Connection) error
|
||||
// Move moves the file within the same connection
|
||||
MoveInternal(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error
|
||||
// Copy copies the file within the same connection
|
||||
CopyInternal(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error
|
||||
// CopyRemote copies the file between different connections
|
||||
CopyRemote(ctx context.Context, srcConn, destConn *connparse.Connection, srcClient FileShareClient, opts *wshrpc.FileCopyOpts) error
|
||||
// Copy copies the file within the same connection. Returns whether the copy source was a directory
|
||||
CopyInternal(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) (bool, error)
|
||||
// CopyRemote copies the file between different connections. Returns whether the copy source was a directory
|
||||
CopyRemote(ctx context.Context, srcConn, destConn *connparse.Connection, srcClient FileShareClient, opts *wshrpc.FileCopyOpts) (bool, error)
|
||||
// Delete deletes the entry at the given path
|
||||
Delete(ctx context.Context, conn *connparse.Connection, recursive bool) error
|
||||
// Join joins the given parts to the connection path
|
||||
Join(ctx context.Context, conn *connparse.Connection, parts ...string) (string, error)
|
||||
Join(ctx context.Context, conn *connparse.Connection, parts ...string) (*wshrpc.FileInfo, error)
|
||||
// GetConnectionType returns the type of connection for the fileshare
|
||||
GetConnectionType() string
|
||||
// GetCapability returns the capability of the fileshare
|
||||
GetCapability() wshrpc.FileShareCapability
|
||||
}
|
||||
|
316
pkg/remote/fileshare/fsutil/fsutil.go
Normal file
316
pkg/remote/fileshare/fsutil/fsutil.go
Normal file
@ -0,0 +1,316 @@
|
||||
package fsutil
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fspath"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fstype"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/pathtree"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/tarcopy"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
|
||||
"github.com/wavetermdev/waveterm/pkg/wshrpc"
|
||||
)
|
||||
|
||||
func GetParentPath(conn *connparse.Connection) string {
|
||||
hostAndPath := conn.GetPathWithHost()
|
||||
return GetParentPathString(hostAndPath)
|
||||
}
|
||||
|
||||
func GetParentPathString(hostAndPath string) string {
|
||||
if hostAndPath == "" || hostAndPath == fspath.Separator {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Remove trailing slash if present
|
||||
if strings.HasSuffix(hostAndPath, fspath.Separator) {
|
||||
hostAndPath = hostAndPath[:len(hostAndPath)-1]
|
||||
}
|
||||
|
||||
lastSlash := strings.LastIndex(hostAndPath, fspath.Separator)
|
||||
if lastSlash <= 0 {
|
||||
return ""
|
||||
}
|
||||
return hostAndPath[:lastSlash+1]
|
||||
}
|
||||
|
||||
func PrefixCopyInternal(ctx context.Context, srcConn, destConn *connparse.Connection, c fstype.FileShareClient, opts *wshrpc.FileCopyOpts, listEntriesPrefix func(ctx context.Context, host string, path string) ([]string, error), copyFunc func(ctx context.Context, host string, path string) error) (bool, error) {
|
||||
log.Printf("PrefixCopyInternal: %v -> %v", srcConn.GetFullURI(), destConn.GetFullURI())
|
||||
srcHasSlash := strings.HasSuffix(srcConn.Path, fspath.Separator)
|
||||
srcPath, destPath, srcInfo, err := DetermineCopyDestPath(ctx, srcConn, destConn, c, c, opts)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
recursive := opts != nil && opts.Recursive
|
||||
if srcInfo.IsDir {
|
||||
if !recursive {
|
||||
return false, fmt.Errorf(fstype.RecursiveRequiredError)
|
||||
}
|
||||
if !srcHasSlash {
|
||||
srcPath += fspath.Separator
|
||||
}
|
||||
destPath += fspath.Separator
|
||||
log.Printf("Copying directory: %v -> %v", srcPath, destPath)
|
||||
entries, err := listEntriesPrefix(ctx, srcConn.Host, srcPath)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error listing source directory: %w", err)
|
||||
}
|
||||
|
||||
tree := pathtree.NewTree(srcPath, fspath.Separator)
|
||||
for _, entry := range entries {
|
||||
tree.Add(entry)
|
||||
}
|
||||
|
||||
/* tree.Walk will return false, the full path in the source bucket for each item.
|
||||
prefixToRemove specifies how much of that path we want in the destination subtree.
|
||||
If the source path has a trailing slash, we don't want to include the source directory itself in the destination subtree.*/
|
||||
prefixToRemove := srcPath
|
||||
if !srcHasSlash {
|
||||
prefixToRemove = fspath.Dir(srcPath) + fspath.Separator
|
||||
}
|
||||
return true, tree.Walk(func(path string, numChildren int) error {
|
||||
// since this is a prefix filesystem, we only care about leafs
|
||||
if numChildren > 0 {
|
||||
return nil
|
||||
}
|
||||
destFilePath := destPath + strings.TrimPrefix(path, prefixToRemove)
|
||||
return copyFunc(ctx, path, destFilePath)
|
||||
})
|
||||
} else {
|
||||
return false, copyFunc(ctx, srcPath, destPath)
|
||||
}
|
||||
}
|
||||
|
||||
func PrefixCopyRemote(ctx context.Context, srcConn, destConn *connparse.Connection, srcClient, destClient fstype.FileShareClient, destPutFile func(host string, path string, size int64, reader io.Reader) error, opts *wshrpc.FileCopyOpts) (bool, error) {
|
||||
// prefix to be used if the destination is a directory. The destPath returned in the following call only applies if the destination is not a directory.
|
||||
destPathPrefix, err := CleanPathPrefix(destConn.Path)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error cleaning destination path: %w", err)
|
||||
}
|
||||
destPathPrefix += fspath.Separator
|
||||
|
||||
_, destPath, srcInfo, err := DetermineCopyDestPath(ctx, srcConn, destConn, srcClient, destClient, opts)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
log.Printf("Copying: %v -> %v", srcConn.GetFullURI(), destConn.GetFullURI())
|
||||
readCtx, cancel := context.WithCancelCause(ctx)
|
||||
defer cancel(nil)
|
||||
ioch := srcClient.ReadTarStream(readCtx, srcConn, opts)
|
||||
err = tarcopy.TarCopyDest(readCtx, cancel, ioch, func(next *tar.Header, reader *tar.Reader, singleFile bool) error {
|
||||
if next.Typeflag == tar.TypeDir {
|
||||
return nil
|
||||
}
|
||||
if singleFile && srcInfo.IsDir {
|
||||
return fmt.Errorf("protocol error: source is a directory, but only a single file is being copied")
|
||||
}
|
||||
fileName, err := CleanPathPrefix(fspath.Join(destPathPrefix, next.Name))
|
||||
if singleFile {
|
||||
fileName = destPath
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error cleaning path: %w", err)
|
||||
}
|
||||
log.Printf("CopyRemote: writing file: %s; size: %d\n", fileName, next.Size)
|
||||
return destPutFile(destConn.Host, fileName, next.Size, reader)
|
||||
})
|
||||
if err != nil {
|
||||
cancel(err)
|
||||
return false, err
|
||||
}
|
||||
return srcInfo.IsDir, nil
|
||||
}
|
||||
|
||||
func DetermineCopyDestPath(ctx context.Context, srcConn, destConn *connparse.Connection, srcClient, destClient fstype.FileShareClient, opts *wshrpc.FileCopyOpts) (srcPath, destPath string, srcInfo *wshrpc.FileInfo, err error) {
|
||||
merge := opts != nil && opts.Merge
|
||||
overwrite := opts != nil && opts.Overwrite
|
||||
recursive := opts != nil && opts.Recursive
|
||||
if overwrite && merge {
|
||||
return "", "", nil, fmt.Errorf("cannot specify both overwrite and merge")
|
||||
}
|
||||
|
||||
srcHasSlash := strings.HasSuffix(srcConn.Path, fspath.Separator)
|
||||
srcPath = srcConn.Path
|
||||
destHasSlash := strings.HasSuffix(destConn.Path, fspath.Separator)
|
||||
destPath, err = CleanPathPrefix(destConn.Path)
|
||||
if err != nil {
|
||||
return "", "", nil, fmt.Errorf("error cleaning destination path: %w", err)
|
||||
}
|
||||
|
||||
srcInfo, err = srcClient.Stat(ctx, srcConn)
|
||||
if err != nil {
|
||||
return "", "", nil, fmt.Errorf("error getting source file info: %w", err)
|
||||
} else if srcInfo.NotFound {
|
||||
return "", "", nil, fmt.Errorf("source file not found: %w", err)
|
||||
}
|
||||
destInfo, err := destClient.Stat(ctx, destConn)
|
||||
destExists := err == nil && !destInfo.NotFound
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return "", "", nil, fmt.Errorf("error getting destination file info: %w", err)
|
||||
}
|
||||
originalDestPath := destPath
|
||||
if !srcHasSlash {
|
||||
if (destExists && destInfo.IsDir) || (!destExists && !destHasSlash && srcInfo.IsDir) {
|
||||
destPath = fspath.Join(destPath, fspath.Base(srcConn.Path))
|
||||
}
|
||||
}
|
||||
destConn.Path = destPath
|
||||
if originalDestPath != destPath {
|
||||
destInfo, err = destClient.Stat(ctx, destConn)
|
||||
destExists = err == nil && !destInfo.NotFound
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return "", "", nil, fmt.Errorf("error getting destination file info: %w", err)
|
||||
}
|
||||
}
|
||||
if destExists {
|
||||
if overwrite {
|
||||
log.Printf("Deleting existing file: %s\n", destConn.GetFullURI())
|
||||
err = destClient.Delete(ctx, destConn, destInfo.IsDir && recursive)
|
||||
if err != nil {
|
||||
return "", "", nil, fmt.Errorf("error deleting conflicting destination file: %w", err)
|
||||
}
|
||||
} else if destInfo.IsDir && srcInfo.IsDir {
|
||||
if !merge {
|
||||
return "", "", nil, fmt.Errorf(fstype.MergeRequiredError, destConn.GetFullURI())
|
||||
}
|
||||
} else {
|
||||
return "", "", nil, fmt.Errorf(fstype.OverwriteRequiredError, destConn.GetFullURI())
|
||||
}
|
||||
}
|
||||
return srcPath, destPath, srcInfo, nil
|
||||
}
|
||||
|
||||
// CleanPathPrefix corrects paths for prefix filesystems (i.e. ones that don't have directories)
|
||||
func CleanPathPrefix(path string) (string, error) {
|
||||
if path == "" {
|
||||
return "", nil
|
||||
}
|
||||
if strings.HasPrefix(path, fspath.Separator) {
|
||||
path = path[1:]
|
||||
}
|
||||
if strings.HasPrefix(path, "~") || strings.HasPrefix(path, ".") || strings.HasPrefix(path, "..") {
|
||||
return "", fmt.Errorf("path cannot start with ~, ., or ..")
|
||||
}
|
||||
var newParts []string
|
||||
for _, part := range strings.Split(path, fspath.Separator) {
|
||||
if part == ".." {
|
||||
if len(newParts) > 0 {
|
||||
newParts = newParts[:len(newParts)-1]
|
||||
}
|
||||
} else if part != "." {
|
||||
newParts = append(newParts, part)
|
||||
}
|
||||
}
|
||||
return fspath.Join(newParts...), nil
|
||||
}
|
||||
|
||||
func ReadFileStream(ctx context.Context, readCh <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData], fileInfoCallback func(finfo wshrpc.FileInfo), dirCallback func(entries []*wshrpc.FileInfo) error, fileCallback func(data io.Reader) error) error {
|
||||
var fileData *wshrpc.FileData
|
||||
firstPk := true
|
||||
isDir := false
|
||||
drain := true
|
||||
defer func() {
|
||||
if drain {
|
||||
utilfn.DrainChannelSafe(readCh, "ReadFileStream")
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("context cancelled: %v", context.Cause(ctx))
|
||||
case respUnion, ok := <-readCh:
|
||||
if !ok {
|
||||
drain = false
|
||||
return nil
|
||||
}
|
||||
if respUnion.Error != nil {
|
||||
return respUnion.Error
|
||||
}
|
||||
resp := respUnion.Response
|
||||
if firstPk {
|
||||
firstPk = false
|
||||
// first packet has the fileinfo
|
||||
if resp.Info == nil {
|
||||
return fmt.Errorf("stream file protocol error, first pk fileinfo is empty")
|
||||
}
|
||||
fileData = &resp
|
||||
if fileData.Info.IsDir {
|
||||
isDir = true
|
||||
}
|
||||
fileInfoCallback(*fileData.Info)
|
||||
continue
|
||||
}
|
||||
if isDir {
|
||||
if len(resp.Entries) == 0 {
|
||||
continue
|
||||
}
|
||||
if resp.Data64 != "" {
|
||||
return fmt.Errorf("stream file protocol error, directory entry has data")
|
||||
}
|
||||
if err := dirCallback(resp.Entries); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if resp.Data64 == "" {
|
||||
continue
|
||||
}
|
||||
decoder := base64.NewDecoder(base64.StdEncoding, bytes.NewReader([]byte(resp.Data64)))
|
||||
if err := fileCallback(decoder); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ReadStreamToFileData(ctx context.Context, readCh <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData]) (*wshrpc.FileData, error) {
|
||||
var fileData *wshrpc.FileData
|
||||
var dataBuf bytes.Buffer
|
||||
var entries []*wshrpc.FileInfo
|
||||
err := ReadFileStream(ctx, readCh, func(finfo wshrpc.FileInfo) {
|
||||
fileData = &wshrpc.FileData{
|
||||
Info: &finfo,
|
||||
}
|
||||
}, func(fileEntries []*wshrpc.FileInfo) error {
|
||||
entries = append(entries, fileEntries...)
|
||||
return nil
|
||||
}, func(data io.Reader) error {
|
||||
if _, err := io.Copy(&dataBuf, data); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fileData == nil {
|
||||
return nil, fmt.Errorf("stream file protocol error, no file info")
|
||||
}
|
||||
if !fileData.Info.IsDir {
|
||||
fileData.Data64 = base64.StdEncoding.EncodeToString(dataBuf.Bytes())
|
||||
} else {
|
||||
fileData.Entries = entries
|
||||
}
|
||||
return fileData, nil
|
||||
}
|
||||
|
||||
func ReadFileStreamToWriter(ctx context.Context, readCh <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData], writer io.Writer) error {
|
||||
return ReadFileStream(ctx, readCh, func(finfo wshrpc.FileInfo) {
|
||||
}, func(entries []*wshrpc.FileInfo) error {
|
||||
return nil
|
||||
}, func(data io.Reader) error {
|
||||
_, err := io.Copy(writer, data)
|
||||
return err
|
||||
})
|
||||
}
|
127
pkg/remote/fileshare/pathtree/pathtree.go
Normal file
127
pkg/remote/fileshare/pathtree/pathtree.go
Normal file
@ -0,0 +1,127 @@
|
||||
package pathtree
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type WalkFunc func(path string, numChildren int) error
|
||||
|
||||
type Tree struct {
|
||||
Root *Node
|
||||
RootPath string
|
||||
nodes map[string]*Node
|
||||
delimiter string
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
Children map[string]*Node
|
||||
}
|
||||
|
||||
func (n *Node) Walk(curPath string, walkFunc WalkFunc, delimiter string) error {
|
||||
if err := walkFunc(curPath, len(n.Children)); err != nil {
|
||||
return err
|
||||
}
|
||||
for name, child := range n.Children {
|
||||
if err := child.Walk(curPath+delimiter+name, walkFunc, delimiter); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewTree(path string, delimiter string) *Tree {
|
||||
if len(delimiter) > 1 {
|
||||
log.Printf("pathtree.NewTree: Warning: multi-character delimiter '%s' may cause unexpected behavior", delimiter)
|
||||
}
|
||||
if path != "" && !strings.HasSuffix(path, delimiter) {
|
||||
path += delimiter
|
||||
}
|
||||
return &Tree{
|
||||
Root: &Node{
|
||||
Children: make(map[string]*Node),
|
||||
},
|
||||
nodes: make(map[string]*Node),
|
||||
RootPath: path,
|
||||
delimiter: delimiter,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) Add(path string) {
|
||||
// Validate input
|
||||
if path == "" {
|
||||
return
|
||||
}
|
||||
var relativePath string
|
||||
if t.RootPath == "" {
|
||||
relativePath = path
|
||||
} else {
|
||||
relativePath = strings.TrimPrefix(path, t.RootPath)
|
||||
|
||||
// If the path is not a child of the root path, ignore it
|
||||
if relativePath == path {
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// If the path is already in the tree, ignore it
|
||||
if t.nodes[relativePath] != nil {
|
||||
return
|
||||
}
|
||||
|
||||
components := strings.Split(relativePath, t.delimiter)
|
||||
// Validate path components
|
||||
for _, component := range components {
|
||||
if component == "" || component == "." || component == ".." {
|
||||
log.Printf("pathtree.Add: invalid path component: %s", component)
|
||||
return // Skip invalid paths
|
||||
}
|
||||
}
|
||||
|
||||
// Quick check to see if the parent path is already in the tree, in which case we can skip the loop
|
||||
if parent := t.tryAddToExistingParent(components); parent {
|
||||
return
|
||||
}
|
||||
|
||||
t.addNewPath(components)
|
||||
}
|
||||
|
||||
func (t *Tree) tryAddToExistingParent(components []string) bool {
|
||||
if len(components) <= 1 {
|
||||
return false
|
||||
}
|
||||
parentPath := strings.Join(components[:len(components)-1], t.delimiter)
|
||||
if t.nodes[parentPath] == nil {
|
||||
return false
|
||||
}
|
||||
lastPathComponent := components[len(components)-1]
|
||||
t.nodes[parentPath].Children[lastPathComponent] = &Node{
|
||||
Children: make(map[string]*Node),
|
||||
}
|
||||
t.nodes[strings.Join(components, t.delimiter)] = t.nodes[parentPath].Children[lastPathComponent]
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *Tree) addNewPath(components []string) {
|
||||
currentNode := t.Root
|
||||
for i, component := range components {
|
||||
if _, ok := currentNode.Children[component]; !ok {
|
||||
currentNode.Children[component] = &Node{
|
||||
Children: make(map[string]*Node),
|
||||
}
|
||||
curPath := strings.Join(components[:i+1], t.delimiter)
|
||||
t.nodes[curPath] = currentNode.Children[component]
|
||||
}
|
||||
currentNode = currentNode.Children[component]
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) Walk(walkFunc WalkFunc) error {
|
||||
for key, child := range t.Root.Children {
|
||||
if err := child.Walk(t.RootPath+key, walkFunc, t.delimiter); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
112
pkg/remote/fileshare/pathtree/pathtree_test.go
Normal file
112
pkg/remote/fileshare/pathtree/pathtree_test.go
Normal file
@ -0,0 +1,112 @@
|
||||
package pathtree_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"testing"
|
||||
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/pathtree"
|
||||
)
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tree := initializeTree()
|
||||
|
||||
// Check that the tree has the expected structure
|
||||
if len(tree.Root.Children) != 3 {
|
||||
t.Errorf("expected 3 children, got %d", len(tree.Root.Children))
|
||||
}
|
||||
|
||||
if len(tree.Root.Children["a"].Children) != 3 {
|
||||
t.Errorf("expected 3 children, got %d", len(tree.Root.Children["a"].Children))
|
||||
}
|
||||
|
||||
if len(tree.Root.Children["b"].Children) != 1 {
|
||||
t.Errorf("expected 1 child, got %d", len(tree.Root.Children["b"].Children))
|
||||
}
|
||||
|
||||
if len(tree.Root.Children["b"].Children["g"].Children) != 1 {
|
||||
t.Errorf("expected 1 child, got %d", len(tree.Root.Children["b"].Children["g"].Children))
|
||||
}
|
||||
|
||||
if len(tree.Root.Children["b"].Children["g"].Children["h"].Children) != 0 {
|
||||
t.Errorf("expected 0 children, got %d", len(tree.Root.Children["b"].Children["g"].Children["h"].Children))
|
||||
}
|
||||
|
||||
if len(tree.Root.Children["c"].Children) != 0 {
|
||||
t.Errorf("expected 0 children, got %d", len(tree.Root.Children["c"].Children))
|
||||
}
|
||||
|
||||
// Check that adding the same path again does not change the tree
|
||||
tree.Add("root/a/d")
|
||||
if len(tree.Root.Children["a"].Children) != 3 {
|
||||
t.Errorf("expected 3 children, got %d", len(tree.Root.Children["a"].Children))
|
||||
}
|
||||
|
||||
// Check that adding a path that is not a child of the root path does not change the tree
|
||||
tree.Add("etc/passwd")
|
||||
if len(tree.Root.Children) != 3 {
|
||||
t.Errorf("expected 3 children, got %d", len(tree.Root.Children))
|
||||
}
|
||||
}
|
||||
|
||||
func TestWalk(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tree := initializeTree()
|
||||
|
||||
// Check that the tree traverses all nodes and identifies leaf nodes correctly
|
||||
pathMap := make(map[string]int)
|
||||
err := tree.Walk(func(path string, numChildren int) error {
|
||||
pathMap[path] = numChildren
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
expectedPathMap := map[string]int{
|
||||
"root/a": 3,
|
||||
"root/a/d": 0,
|
||||
"root/a/e": 0,
|
||||
"root/a/f": 0,
|
||||
"root/b": 1,
|
||||
"root/b/g": 1,
|
||||
"root/b/g/h": 0,
|
||||
"root/c": 0,
|
||||
}
|
||||
|
||||
log.Printf("pathMap: %v", pathMap)
|
||||
|
||||
for path, numChildren := range expectedPathMap {
|
||||
if pathMap[path] != numChildren {
|
||||
t.Errorf("expected %d children for path %s, got %d", numChildren, path, pathMap[path])
|
||||
}
|
||||
}
|
||||
|
||||
expectedError := errors.New("test error")
|
||||
|
||||
// Check that the walk function returns an error if it is returned by the walk function
|
||||
err = tree.Walk(func(path string, numChildren int) error {
|
||||
return expectedError
|
||||
})
|
||||
if err != expectedError {
|
||||
t.Errorf("expected error %v, got %v", expectedError, err)
|
||||
}
|
||||
}
|
||||
|
||||
func initializeTree() *pathtree.Tree {
|
||||
tree := pathtree.NewTree("root/", "/")
|
||||
tree.Add("root/a")
|
||||
tree.Add("root/b")
|
||||
tree.Add("root/c")
|
||||
tree.Add("root/a/d")
|
||||
tree.Add("root/a/e")
|
||||
tree.Add("root/a/f")
|
||||
tree.Add("root/b/g")
|
||||
tree.Add("root/b/g/h")
|
||||
log.Printf("tree: %v", tree)
|
||||
return tree
|
||||
}
|
@ -4,16 +4,31 @@
|
||||
package s3fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/smithy-go"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/awsconn"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fspath"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fstype"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fsutil"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/pathtree"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/fileutil"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/tarcopy"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
|
||||
"github.com/wavetermdev/waveterm/pkg/wshrpc"
|
||||
"github.com/wavetermdev/waveterm/pkg/wshutil"
|
||||
)
|
||||
@ -31,94 +46,767 @@ func NewS3Client(config *aws.Config) *S3Client {
|
||||
}
|
||||
|
||||
func (c S3Client) Read(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) (*wshrpc.FileData, error) {
|
||||
return nil, errors.ErrUnsupported
|
||||
rtnCh := c.ReadStream(ctx, conn, data)
|
||||
return fsutil.ReadStreamToFileData(ctx, rtnCh)
|
||||
}
|
||||
|
||||
func (c S3Client) ReadStream(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData] {
|
||||
return wshutil.SendErrCh[wshrpc.FileData](errors.ErrUnsupported)
|
||||
bucket := conn.Host
|
||||
objectKey := conn.Path
|
||||
rtn := make(chan wshrpc.RespOrErrorUnion[wshrpc.FileData], 16)
|
||||
go func() {
|
||||
defer close(rtn)
|
||||
finfo, err := c.Stat(ctx, conn)
|
||||
if err != nil {
|
||||
rtn <- wshutil.RespErr[wshrpc.FileData](err)
|
||||
return
|
||||
}
|
||||
rtn <- wshrpc.RespOrErrorUnion[wshrpc.FileData]{Response: wshrpc.FileData{Info: finfo}}
|
||||
if finfo.NotFound {
|
||||
rtn <- wshrpc.RespOrErrorUnion[wshrpc.FileData]{Response: wshrpc.FileData{Entries: []*wshrpc.FileInfo{
|
||||
{
|
||||
Path: finfo.Dir,
|
||||
Dir: fspath.Dir(finfo.Dir),
|
||||
Name: "..",
|
||||
IsDir: true,
|
||||
Size: 0,
|
||||
ModTime: time.Now().Unix(),
|
||||
MimeType: "directory",
|
||||
},
|
||||
}}}
|
||||
return
|
||||
}
|
||||
if finfo.IsDir {
|
||||
listEntriesCh := c.ListEntriesStream(ctx, conn, nil)
|
||||
defer func() {
|
||||
utilfn.DrainChannelSafe(listEntriesCh, "s3fs.ReadStream")
|
||||
}()
|
||||
for respUnion := range listEntriesCh {
|
||||
if respUnion.Error != nil {
|
||||
rtn <- wshutil.RespErr[wshrpc.FileData](respUnion.Error)
|
||||
return
|
||||
}
|
||||
resp := respUnion.Response
|
||||
if len(resp.FileInfo) > 0 {
|
||||
rtn <- wshrpc.RespOrErrorUnion[wshrpc.FileData]{Response: wshrpc.FileData{Entries: resp.FileInfo}}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
var result *s3.GetObjectOutput
|
||||
var err error
|
||||
if data.At != nil {
|
||||
log.Printf("reading %v with offset %d and size %d", conn.GetFullURI(), data.At.Offset, data.At.Size)
|
||||
result, err = c.client.GetObject(ctx, &s3.GetObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(objectKey),
|
||||
Range: aws.String(fmt.Sprintf("bytes=%d-%d", data.At.Offset, data.At.Offset+int64(data.At.Size)-1)),
|
||||
})
|
||||
} else {
|
||||
result, err = c.client.GetObject(ctx, &s3.GetObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(objectKey),
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("error getting object %v:%v: %v", bucket, objectKey, err)
|
||||
var noKey *types.NoSuchKey
|
||||
if errors.As(err, &noKey) {
|
||||
err = noKey
|
||||
}
|
||||
rtn <- wshutil.RespErr[wshrpc.FileData](err)
|
||||
return
|
||||
}
|
||||
size := int64(0)
|
||||
if result.ContentLength != nil {
|
||||
size = *result.ContentLength
|
||||
}
|
||||
finfo := &wshrpc.FileInfo{
|
||||
Name: objectKey,
|
||||
IsDir: false,
|
||||
Size: size,
|
||||
ModTime: result.LastModified.UnixMilli(),
|
||||
Path: conn.GetFullURI(),
|
||||
Dir: fsutil.GetParentPath(conn),
|
||||
}
|
||||
fileutil.AddMimeTypeToFileInfo(finfo.Path, finfo)
|
||||
rtn <- wshrpc.RespOrErrorUnion[wshrpc.FileData]{Response: wshrpc.FileData{Info: finfo}}
|
||||
if size == 0 {
|
||||
log.Printf("no data to read")
|
||||
return
|
||||
}
|
||||
defer utilfn.GracefulClose(result.Body, "s3fs", conn.GetFullURI())
|
||||
bytesRemaining := size
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
rtn <- wshutil.RespErr[wshrpc.FileData](context.Cause(ctx))
|
||||
return
|
||||
default:
|
||||
buf := make([]byte, min(bytesRemaining, wshrpc.FileChunkSize))
|
||||
n, err := result.Body.Read(buf)
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
rtn <- wshutil.RespErr[wshrpc.FileData](err)
|
||||
return
|
||||
}
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
bytesRemaining -= int64(n)
|
||||
rtn <- wshrpc.RespOrErrorUnion[wshrpc.FileData]{Response: wshrpc.FileData{Data64: base64.StdEncoding.EncodeToString(buf[:n])}}
|
||||
if bytesRemaining == 0 || errors.Is(err, io.EOF) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return rtn
|
||||
}
|
||||
|
||||
func (c S3Client) ReadTarStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileCopyOpts) <-chan wshrpc.RespOrErrorUnion[iochantypes.Packet] {
|
||||
return wshutil.SendErrCh[iochantypes.Packet](errors.ErrUnsupported)
|
||||
}
|
||||
recursive := opts != nil && opts.Recursive
|
||||
bucket := conn.Host
|
||||
if bucket == "" || bucket == "/" {
|
||||
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf("bucket must be specified"))
|
||||
}
|
||||
|
||||
func (c S3Client) ListEntriesStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
|
||||
ch := make(chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData], 16)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
list, err := c.ListEntries(ctx, conn, opts)
|
||||
if err != nil {
|
||||
ch <- wshutil.RespErr[wshrpc.CommandRemoteListEntriesRtnData](err)
|
||||
return
|
||||
}
|
||||
if list == nil {
|
||||
ch <- wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]{Response: wshrpc.CommandRemoteListEntriesRtnData{}}
|
||||
return
|
||||
}
|
||||
for i := 0; i < len(list); i += wshrpc.DirChunkSize {
|
||||
ch <- wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]{Response: wshrpc.CommandRemoteListEntriesRtnData{FileInfo: list[i:min(i+wshrpc.DirChunkSize, len(list))]}}
|
||||
// whether the operation is on the whole bucket
|
||||
wholeBucket := conn.Path == "" || conn.Path == fspath.Separator
|
||||
|
||||
// get the object if it's a single file operation
|
||||
var singleFileResult *s3.GetObjectOutput
|
||||
// this ensures we don't leak the object if we error out before copying it
|
||||
closeSingleFileResult := true
|
||||
defer func() {
|
||||
// in case we error out before the object gets copied, make sure to close it
|
||||
if singleFileResult != nil && closeSingleFileResult {
|
||||
utilfn.GracefulClose(singleFileResult.Body, "s3fs", conn.Path)
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
var err error
|
||||
if !wholeBucket {
|
||||
singleFileResult, err = c.client.GetObject(ctx, &s3.GetObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(conn.Path), // does not care if the path has a prefixed slash
|
||||
})
|
||||
if err != nil {
|
||||
// if the object doesn't exist, we can assume the prefix is a directory and continue
|
||||
var noKey *types.NoSuchKey
|
||||
var notFound *types.NotFound
|
||||
if !errors.As(err, &noKey) && !errors.As(err, ¬Found) {
|
||||
return wshutil.SendErrCh[iochantypes.Packet](err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// whether the operation is on a single file
|
||||
singleFile := singleFileResult != nil
|
||||
|
||||
if !singleFile && !recursive {
|
||||
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf(fstype.RecursiveRequiredError))
|
||||
}
|
||||
|
||||
// whether to include the directory itself in the tar
|
||||
includeDir := (wholeBucket && conn.Path == "") || (singleFileResult == nil && conn.Path != "" && !strings.HasSuffix(conn.Path, fspath.Separator))
|
||||
|
||||
timeout := fstype.DefaultTimeout
|
||||
if opts.Timeout > 0 {
|
||||
timeout = time.Duration(opts.Timeout) * time.Millisecond
|
||||
}
|
||||
readerCtx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
|
||||
// the prefix that should be removed from the tar paths
|
||||
tarPathPrefix := conn.Path
|
||||
|
||||
if wholeBucket {
|
||||
// we treat the bucket name as the root directory. If we're not including the directory itself, we need to remove the bucket name from the tar paths
|
||||
if includeDir {
|
||||
tarPathPrefix = ""
|
||||
} else {
|
||||
tarPathPrefix = bucket
|
||||
}
|
||||
} else if singleFile || includeDir {
|
||||
// if we're including the directory itself, we need to remove the last part of the path
|
||||
tarPathPrefix = fsutil.GetParentPathString(tarPathPrefix)
|
||||
}
|
||||
|
||||
rtn, writeHeader, fileWriter, tarClose := tarcopy.TarCopySrc(readerCtx, tarPathPrefix)
|
||||
go func() {
|
||||
defer func() {
|
||||
tarClose()
|
||||
cancel()
|
||||
}()
|
||||
|
||||
// below we get the objects concurrently so we need to store the results in a map
|
||||
objMap := make(map[string]*s3.GetObjectOutput)
|
||||
// close the objects when we're done
|
||||
defer func() {
|
||||
for key, obj := range objMap {
|
||||
utilfn.GracefulClose(obj.Body, "s3fs", key)
|
||||
}
|
||||
}()
|
||||
|
||||
// tree to keep track of the paths we've added and insert fake directories for subpaths
|
||||
tree := pathtree.NewTree(tarPathPrefix, "/")
|
||||
|
||||
if singleFile {
|
||||
objMap[conn.Path] = singleFileResult
|
||||
tree.Add(conn.Path)
|
||||
} else {
|
||||
// list the objects in the bucket and add them to a tree that we can then walk to write the tar entries
|
||||
var input *s3.ListObjectsV2Input
|
||||
if wholeBucket {
|
||||
// get all the objects in the bucket
|
||||
input = &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(bucket),
|
||||
}
|
||||
} else {
|
||||
objectPrefix := conn.Path
|
||||
if !strings.HasSuffix(objectPrefix, fspath.Separator) {
|
||||
objectPrefix = objectPrefix + fspath.Separator
|
||||
}
|
||||
input = &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(bucket),
|
||||
Prefix: aws.String(objectPrefix),
|
||||
}
|
||||
}
|
||||
|
||||
errs := make([]error, 0)
|
||||
// mutex to protect the tree and objMap since we're fetching objects concurrently
|
||||
treeMapMutex := sync.Mutex{}
|
||||
// wait group to await the finished fetches
|
||||
wg := sync.WaitGroup{}
|
||||
getObjectAndFileInfo := func(obj *types.Object) {
|
||||
defer wg.Done()
|
||||
result, err := c.client.GetObject(ctx, &s3.GetObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: obj.Key,
|
||||
})
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
return
|
||||
}
|
||||
path := *obj.Key
|
||||
if wholeBucket {
|
||||
path = fspath.Join(bucket, path)
|
||||
}
|
||||
treeMapMutex.Lock()
|
||||
defer treeMapMutex.Unlock()
|
||||
objMap[path] = result
|
||||
tree.Add(path)
|
||||
}
|
||||
|
||||
if err := c.listFilesPrefix(ctx, input, func(obj *types.Object) (bool, error) {
|
||||
wg.Add(1)
|
||||
go getObjectAndFileInfo(obj)
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
rtn <- wshutil.RespErr[iochantypes.Packet](err)
|
||||
return
|
||||
}
|
||||
wg.Wait()
|
||||
if len(errs) > 0 {
|
||||
rtn <- wshutil.RespErr[iochantypes.Packet](errors.Join(errs...))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Walk the tree and write the tar entries
|
||||
if err := tree.Walk(func(path string, numChildren int) error {
|
||||
mapEntry, isFile := objMap[path]
|
||||
|
||||
// default vals assume entry is dir, since mapEntry might not exist
|
||||
modTime := int64(time.Now().Unix())
|
||||
mode := fstype.DirMode
|
||||
size := int64(numChildren)
|
||||
|
||||
if isFile {
|
||||
mode = fstype.FileMode
|
||||
size = *mapEntry.ContentLength
|
||||
if mapEntry.LastModified != nil {
|
||||
modTime = mapEntry.LastModified.UnixMilli()
|
||||
}
|
||||
}
|
||||
|
||||
finfo := &wshrpc.FileInfo{
|
||||
Name: path,
|
||||
IsDir: !isFile,
|
||||
Size: size,
|
||||
ModTime: modTime,
|
||||
Mode: mode,
|
||||
}
|
||||
if err := writeHeader(fileutil.ToFsFileInfo(finfo), path, singleFile); err != nil {
|
||||
return err
|
||||
}
|
||||
if isFile {
|
||||
if n, err := io.Copy(fileWriter, mapEntry.Body); err != nil {
|
||||
return err
|
||||
} else if n != size {
|
||||
return fmt.Errorf("error copying %v; expected to read %d bytes, but read %d", path, size, n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.Printf("error walking tree: %v", err)
|
||||
rtn <- wshutil.RespErr[iochantypes.Packet](err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
// we've handed singleFileResult off to the tar writer, so we don't want to close it
|
||||
closeSingleFileResult = false
|
||||
return rtn
|
||||
}
|
||||
|
||||
func (c S3Client) ListEntries(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) ([]*wshrpc.FileInfo, error) {
|
||||
if conn.Path == "" || conn.Path == "/" {
|
||||
var entries []*wshrpc.FileInfo
|
||||
rtnCh := c.ListEntriesStream(ctx, conn, opts)
|
||||
for respUnion := range rtnCh {
|
||||
if respUnion.Error != nil {
|
||||
return nil, respUnion.Error
|
||||
}
|
||||
resp := respUnion.Response
|
||||
entries = append(entries, resp.FileInfo...)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (c S3Client) ListEntriesStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileListOpts) <-chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
|
||||
bucket := conn.Host
|
||||
objectKeyPrefix := conn.Path
|
||||
if objectKeyPrefix != "" && !strings.HasSuffix(objectKeyPrefix, fspath.Separator) {
|
||||
objectKeyPrefix = objectKeyPrefix + "/"
|
||||
}
|
||||
numToFetch := wshrpc.MaxDirSize
|
||||
if opts != nil && opts.Limit > 0 {
|
||||
numToFetch = min(opts.Limit, wshrpc.MaxDirSize)
|
||||
}
|
||||
numFetched := 0
|
||||
if bucket == "" || bucket == fspath.Separator {
|
||||
buckets, err := awsconn.ListBuckets(ctx, c.client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return wshutil.SendErrCh[wshrpc.CommandRemoteListEntriesRtnData](err)
|
||||
}
|
||||
var entries []*wshrpc.FileInfo
|
||||
for _, bucket := range buckets {
|
||||
log.Printf("bucket: %v", *bucket.Name)
|
||||
if numFetched >= numToFetch {
|
||||
break
|
||||
}
|
||||
if bucket.Name != nil {
|
||||
entries = append(entries, &wshrpc.FileInfo{
|
||||
Path: *bucket.Name,
|
||||
IsDir: true,
|
||||
Path: *bucket.Name,
|
||||
Name: *bucket.Name,
|
||||
Dir: fspath.Separator,
|
||||
ModTime: bucket.CreationDate.UnixMilli(),
|
||||
IsDir: true,
|
||||
MimeType: "directory",
|
||||
})
|
||||
numFetched++
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
rtn := make(chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData], 1)
|
||||
defer close(rtn)
|
||||
rtn <- wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]{Response: wshrpc.CommandRemoteListEntriesRtnData{FileInfo: entries}}
|
||||
return rtn
|
||||
} else {
|
||||
rtn := make(chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData], 16)
|
||||
// keep track of "directories" that have been used to avoid duplicates between pages
|
||||
prevUsedDirKeys := make(map[string]any)
|
||||
go func() {
|
||||
defer close(rtn)
|
||||
entryMap := make(map[string]*wshrpc.FileInfo)
|
||||
if err := c.listFilesPrefix(ctx, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(bucket),
|
||||
Prefix: aws.String(objectKeyPrefix),
|
||||
}, func(obj *types.Object) (bool, error) {
|
||||
if numFetched >= numToFetch {
|
||||
return false, nil
|
||||
}
|
||||
lastModTime := int64(0)
|
||||
if obj.LastModified != nil {
|
||||
lastModTime = obj.LastModified.UnixMilli()
|
||||
}
|
||||
// get the first level directory name or file name
|
||||
name, isDir := fspath.FirstLevelDir(strings.TrimPrefix(*obj.Key, objectKeyPrefix))
|
||||
path := fspath.Join(conn.GetPathWithHost(), name)
|
||||
if isDir {
|
||||
if entryMap[name] == nil {
|
||||
if _, ok := prevUsedDirKeys[name]; !ok {
|
||||
entryMap[name] = &wshrpc.FileInfo{
|
||||
Path: path,
|
||||
Name: name,
|
||||
IsDir: true,
|
||||
Dir: objectKeyPrefix,
|
||||
ModTime: lastModTime,
|
||||
Size: 0,
|
||||
}
|
||||
fileutil.AddMimeTypeToFileInfo(path, entryMap[name])
|
||||
|
||||
prevUsedDirKeys[name] = struct{}{}
|
||||
numFetched++
|
||||
}
|
||||
} else if entryMap[name].ModTime < lastModTime {
|
||||
entryMap[name].ModTime = lastModTime
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
size := int64(0)
|
||||
if obj.Size != nil {
|
||||
size = *obj.Size
|
||||
}
|
||||
entryMap[name] = &wshrpc.FileInfo{
|
||||
Name: name,
|
||||
IsDir: false,
|
||||
Dir: objectKeyPrefix,
|
||||
Path: path,
|
||||
ModTime: lastModTime,
|
||||
Size: size,
|
||||
}
|
||||
fileutil.AddMimeTypeToFileInfo(path, entryMap[name])
|
||||
numFetched++
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
rtn <- wshutil.RespErr[wshrpc.CommandRemoteListEntriesRtnData](err)
|
||||
return
|
||||
}
|
||||
entries := make([]*wshrpc.FileInfo, 0, wshrpc.DirChunkSize)
|
||||
for _, entry := range entryMap {
|
||||
entries = append(entries, entry)
|
||||
if len(entries) == wshrpc.DirChunkSize {
|
||||
rtn <- wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]{Response: wshrpc.CommandRemoteListEntriesRtnData{FileInfo: entries}}
|
||||
entries = make([]*wshrpc.FileInfo, 0, wshrpc.DirChunkSize)
|
||||
}
|
||||
}
|
||||
if len(entries) > 0 {
|
||||
rtn <- wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData]{Response: wshrpc.CommandRemoteListEntriesRtnData{FileInfo: entries}}
|
||||
}
|
||||
}()
|
||||
return rtn
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c S3Client) Stat(ctx context.Context, conn *connparse.Connection) (*wshrpc.FileInfo, error) {
|
||||
return nil, errors.ErrUnsupported
|
||||
bucketName := conn.Host
|
||||
objectKey := conn.Path
|
||||
if bucketName == "" || bucketName == fspath.Separator {
|
||||
// root, refers to list all buckets
|
||||
return &wshrpc.FileInfo{
|
||||
Name: fspath.Separator,
|
||||
IsDir: true,
|
||||
Size: 0,
|
||||
ModTime: 0,
|
||||
Path: fspath.Separator,
|
||||
Dir: fspath.Separator,
|
||||
MimeType: "directory",
|
||||
}, nil
|
||||
}
|
||||
if objectKey == "" || objectKey == fspath.Separator {
|
||||
_, err := c.client.HeadBucket(ctx, &s3.HeadBucketInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
})
|
||||
exists := true
|
||||
if err != nil {
|
||||
var apiError smithy.APIError
|
||||
if errors.As(err, &apiError) {
|
||||
switch apiError.(type) {
|
||||
case *types.NotFound:
|
||||
exists = false
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if exists {
|
||||
return &wshrpc.FileInfo{
|
||||
Name: bucketName,
|
||||
Path: bucketName,
|
||||
Dir: fspath.Separator,
|
||||
IsDir: true,
|
||||
Size: 0,
|
||||
ModTime: 0,
|
||||
MimeType: "directory",
|
||||
}, nil
|
||||
} else {
|
||||
return &wshrpc.FileInfo{
|
||||
Name: bucketName,
|
||||
Path: bucketName,
|
||||
Dir: fspath.Separator,
|
||||
NotFound: true,
|
||||
IsDir: true,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
result, err := c.client.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{
|
||||
Bucket: aws.String(bucketName),
|
||||
Key: aws.String(objectKey),
|
||||
ObjectAttributes: []types.ObjectAttributes{
|
||||
types.ObjectAttributesObjectSize,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
var noKey *types.NoSuchKey
|
||||
var notFound *types.NotFound
|
||||
if errors.As(err, &noKey) || errors.As(err, ¬Found) {
|
||||
// try to list a single object to see if the prefix exists
|
||||
if !strings.HasSuffix(objectKey, fspath.Separator) {
|
||||
objectKey += fspath.Separator
|
||||
}
|
||||
entries, err := c.client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(bucketName),
|
||||
Prefix: aws.String(objectKey),
|
||||
MaxKeys: aws.Int32(1),
|
||||
})
|
||||
if err == nil {
|
||||
if entries.Contents != nil {
|
||||
return &wshrpc.FileInfo{
|
||||
Name: objectKey,
|
||||
Path: conn.GetPathWithHost(),
|
||||
Dir: fsutil.GetParentPath(conn),
|
||||
IsDir: true,
|
||||
Size: 0,
|
||||
Mode: fstype.DirMode,
|
||||
MimeType: "directory",
|
||||
}, nil
|
||||
}
|
||||
} else if !errors.As(err, &noKey) && !errors.As(err, ¬Found) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &wshrpc.FileInfo{
|
||||
Name: objectKey,
|
||||
Path: conn.GetPathWithHost(),
|
||||
Dir: fsutil.GetParentPath(conn),
|
||||
IsDir: true,
|
||||
NotFound: true,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
size := int64(0)
|
||||
if result.ObjectSize != nil {
|
||||
size = *result.ObjectSize
|
||||
}
|
||||
lastModified := int64(0)
|
||||
if result.LastModified != nil {
|
||||
lastModified = result.LastModified.UnixMilli()
|
||||
}
|
||||
rtn := &wshrpc.FileInfo{
|
||||
Name: objectKey,
|
||||
Path: conn.GetPathWithHost(),
|
||||
Dir: fsutil.GetParentPath(conn),
|
||||
IsDir: false,
|
||||
Size: size,
|
||||
ModTime: lastModified,
|
||||
}
|
||||
fileutil.AddMimeTypeToFileInfo(rtn.Path, rtn)
|
||||
return rtn, nil
|
||||
}
|
||||
|
||||
func (c S3Client) PutFile(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) error {
|
||||
return errors.ErrUnsupported
|
||||
if data.At != nil {
|
||||
return errors.Join(errors.ErrUnsupported, fmt.Errorf("file data offset and size not supported"))
|
||||
}
|
||||
bucket := conn.Host
|
||||
objectKey := conn.Path
|
||||
if bucket == "" || bucket == "/" || objectKey == "" || objectKey == "/" {
|
||||
return errors.Join(errors.ErrUnsupported, fmt.Errorf("bucket and object key must be specified"))
|
||||
}
|
||||
contentMaxLength := base64.StdEncoding.DecodedLen(len(data.Data64))
|
||||
var decodedBody []byte
|
||||
var contentLength int
|
||||
var err error
|
||||
if contentMaxLength > 0 {
|
||||
decodedBody = make([]byte, contentMaxLength)
|
||||
contentLength, err = base64.StdEncoding.Decode(decodedBody, []byte(data.Data64))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
decodedBody = []byte("\n")
|
||||
contentLength = 1
|
||||
}
|
||||
bodyReaderSeeker := bytes.NewReader(decodedBody[:contentLength])
|
||||
_, err = c.client.PutObject(ctx, &s3.PutObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(objectKey),
|
||||
Body: bodyReaderSeeker,
|
||||
ContentLength: aws.Int64(int64(contentLength)),
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("PutFile: error putting object %v:%v: %v", bucket, objectKey, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c S3Client) AppendFile(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) error {
|
||||
return errors.ErrUnsupported
|
||||
return errors.Join(errors.ErrUnsupported, fmt.Errorf("append file not supported"))
|
||||
}
|
||||
|
||||
func (c S3Client) Mkdir(ctx context.Context, conn *connparse.Connection) error {
|
||||
return errors.ErrUnsupported
|
||||
return errors.Join(errors.ErrUnsupported, fmt.Errorf("mkdir not supported"))
|
||||
}
|
||||
|
||||
func (c S3Client) MoveInternal(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error {
|
||||
return errors.ErrUnsupported
|
||||
isDir, err := c.CopyInternal(ctx, srcConn, destConn, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
recursive := opts != nil && opts.Recursive
|
||||
return c.Delete(ctx, srcConn, recursive && isDir)
|
||||
}
|
||||
|
||||
func (c S3Client) CopyRemote(ctx context.Context, srcConn, destConn *connparse.Connection, srcClient fstype.FileShareClient, opts *wshrpc.FileCopyOpts) error {
|
||||
return errors.ErrUnsupported
|
||||
func (c S3Client) CopyRemote(ctx context.Context, srcConn, destConn *connparse.Connection, srcClient fstype.FileShareClient, opts *wshrpc.FileCopyOpts) (bool, error) {
|
||||
if srcConn.Scheme == connparse.ConnectionTypeS3 && destConn.Scheme == connparse.ConnectionTypeS3 {
|
||||
return c.CopyInternal(ctx, srcConn, destConn, opts)
|
||||
}
|
||||
destBucket := destConn.Host
|
||||
if destBucket == "" || destBucket == fspath.Separator {
|
||||
return false, fmt.Errorf("destination bucket must be specified")
|
||||
}
|
||||
return fsutil.PrefixCopyRemote(ctx, srcConn, destConn, srcClient, c, func(bucket, path string, size int64, reader io.Reader) error {
|
||||
_, err := c.client.PutObject(ctx, &s3.PutObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(path),
|
||||
Body: reader,
|
||||
ContentLength: aws.Int64(size),
|
||||
})
|
||||
return err
|
||||
}, opts)
|
||||
}
|
||||
|
||||
func (c S3Client) CopyInternal(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error {
|
||||
return errors.ErrUnsupported
|
||||
func (c S3Client) CopyInternal(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) (bool, error) {
|
||||
srcBucket := srcConn.Host
|
||||
destBucket := destConn.Host
|
||||
if srcBucket == "" || srcBucket == fspath.Separator || destBucket == "" || destBucket == fspath.Separator {
|
||||
return false, fmt.Errorf("source and destination bucket must be specified")
|
||||
}
|
||||
return fsutil.PrefixCopyInternal(ctx, srcConn, destConn, c, opts, func(ctx context.Context, bucket, prefix string) ([]string, error) {
|
||||
var entries []string
|
||||
err := c.listFilesPrefix(ctx, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(bucket),
|
||||
Prefix: aws.String(prefix),
|
||||
}, func(obj *types.Object) (bool, error) {
|
||||
entries = append(entries, *obj.Key)
|
||||
return true, nil
|
||||
})
|
||||
return entries, err
|
||||
}, func(ctx context.Context, srcPath, destPath string) error {
|
||||
_, err := c.client.CopyObject(ctx, &s3.CopyObjectInput{
|
||||
Bucket: aws.String(destBucket),
|
||||
Key: aws.String(destPath),
|
||||
CopySource: aws.String(fspath.Join(srcBucket, srcPath)),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error copying %v:%v to %v:%v: %w", srcBucket, srcPath, destBucket, destPath, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (c S3Client) Delete(ctx context.Context, conn *connparse.Connection, recursive bool) error {
|
||||
return errors.ErrUnsupported
|
||||
bucket := conn.Host
|
||||
objectKey := conn.Path
|
||||
if bucket == "" || bucket == fspath.Separator {
|
||||
return errors.Join(errors.ErrUnsupported, fmt.Errorf("bucket must be specified"))
|
||||
}
|
||||
if objectKey == "" || objectKey == fspath.Separator {
|
||||
return errors.Join(errors.ErrUnsupported, fmt.Errorf("object key must be specified"))
|
||||
}
|
||||
var err error
|
||||
if recursive {
|
||||
log.Printf("Deleting objects with prefix %v:%v", bucket, objectKey)
|
||||
if !strings.HasSuffix(objectKey, fspath.Separator) {
|
||||
objectKey = objectKey + fspath.Separator
|
||||
}
|
||||
objects := make([]types.ObjectIdentifier, 0)
|
||||
err = c.listFilesPrefix(ctx, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(bucket),
|
||||
Prefix: aws.String(objectKey),
|
||||
}, func(obj *types.Object) (bool, error) {
|
||||
objects = append(objects, types.ObjectIdentifier{Key: obj.Key})
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(objects) == 0 {
|
||||
return nil
|
||||
}
|
||||
_, err = c.client.DeleteObjects(ctx, &s3.DeleteObjectsInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Delete: &types.Delete{
|
||||
Objects: objects,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
log.Printf("Deleting object %v:%v", bucket, objectKey)
|
||||
_, err = c.client.DeleteObject(ctx, &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(objectKey),
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// verify the object was deleted
|
||||
finfo, err := c.Stat(ctx, conn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !finfo.NotFound {
|
||||
if finfo.IsDir {
|
||||
return fmt.Errorf(fstype.RecursiveRequiredError)
|
||||
}
|
||||
return fmt.Errorf("object was not successfully deleted %v:%v", bucket, objectKey)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c S3Client) Join(ctx context.Context, conn *connparse.Connection, parts ...string) (string, error) {
|
||||
return "", errors.ErrUnsupported
|
||||
func (c S3Client) listFilesPrefix(ctx context.Context, input *s3.ListObjectsV2Input, fileCallback func(*types.Object) (bool, error)) error {
|
||||
var err error
|
||||
var output *s3.ListObjectsV2Output
|
||||
objectPaginator := s3.NewListObjectsV2Paginator(c.client, input)
|
||||
for objectPaginator.HasMorePages() {
|
||||
output, err = objectPaginator.NextPage(ctx)
|
||||
if err != nil {
|
||||
var noBucket *types.NoSuchBucket
|
||||
if !awsconn.CheckAccessDeniedErr(&err) && errors.As(err, &noBucket) {
|
||||
err = noBucket
|
||||
}
|
||||
return err
|
||||
} else {
|
||||
for _, obj := range output.Contents {
|
||||
if cont, err := fileCallback(&obj); err != nil {
|
||||
return err
|
||||
} else if !cont {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c S3Client) Join(ctx context.Context, conn *connparse.Connection, parts ...string) (*wshrpc.FileInfo, error) {
|
||||
var joinParts []string
|
||||
if conn.Path == "" || conn.Path == fspath.Separator {
|
||||
joinParts = parts
|
||||
} else {
|
||||
joinParts = append([]string{conn.Path}, parts...)
|
||||
}
|
||||
|
||||
conn.Path = fspath.Join(joinParts...)
|
||||
return c.Stat(ctx, conn)
|
||||
}
|
||||
|
||||
func (c S3Client) GetConnectionType() string {
|
||||
return connparse.ConnectionTypeS3
|
||||
}
|
||||
|
||||
func (c S3Client) GetCapability() wshrpc.FileShareCapability {
|
||||
return wshrpc.FileShareCapability{
|
||||
CanAppend: false,
|
||||
CanMkdir: false,
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,6 @@
|
||||
package wavefs
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
@ -12,13 +11,16 @@ import (
|
||||
"io"
|
||||
"io/fs"
|
||||
"log"
|
||||
"path"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/wavetermdev/waveterm/pkg/filestore"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fspath"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fstype"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fsutil"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/fileutil"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/tarcopy"
|
||||
@ -30,7 +32,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultTimeout = 30 * time.Second
|
||||
DirMode os.FileMode = 0755 | os.ModeDir
|
||||
)
|
||||
|
||||
type WaveClient struct{}
|
||||
@ -54,7 +56,7 @@ func (c WaveClient) ReadStream(ctx context.Context, conn *connparse.Connection,
|
||||
if !rtnData.Info.IsDir {
|
||||
for i := 0; i < dataLen; i += wshrpc.FileChunkSize {
|
||||
if ctx.Err() != nil {
|
||||
ch <- wshutil.RespErr[wshrpc.FileData](ctx.Err())
|
||||
ch <- wshutil.RespErr[wshrpc.FileData](context.Cause(ctx))
|
||||
return
|
||||
}
|
||||
dataEnd := min(i+wshrpc.FileChunkSize, dataLen)
|
||||
@ -63,7 +65,7 @@ func (c WaveClient) ReadStream(ctx context.Context, conn *connparse.Connection,
|
||||
} else {
|
||||
for i := 0; i < len(rtnData.Entries); i += wshrpc.DirChunkSize {
|
||||
if ctx.Err() != nil {
|
||||
ch <- wshutil.RespErr[wshrpc.FileData](ctx.Err())
|
||||
ch <- wshutil.RespErr[wshrpc.FileData](context.Cause(ctx))
|
||||
return
|
||||
}
|
||||
ch <- wshrpc.RespOrErrorUnion[wshrpc.FileData]{Response: wshrpc.FileData{Entries: rtnData.Entries[i:min(i+wshrpc.DirChunkSize, len(rtnData.Entries))], Info: rtnData.Info}}
|
||||
@ -103,20 +105,57 @@ func (c WaveClient) Read(ctx context.Context, conn *connparse.Connection, data w
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing blockfiles: %w", err)
|
||||
}
|
||||
if len(list) == 0 {
|
||||
return &wshrpc.FileData{
|
||||
Info: &wshrpc.FileInfo{
|
||||
Name: fspath.Base(fileName),
|
||||
Path: fileName,
|
||||
Dir: fspath.Dir(fileName),
|
||||
NotFound: true,
|
||||
IsDir: true,
|
||||
}}, nil
|
||||
}
|
||||
return &wshrpc.FileData{Info: data.Info, Entries: list}, nil
|
||||
}
|
||||
|
||||
func (c WaveClient) ReadTarStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileCopyOpts) <-chan wshrpc.RespOrErrorUnion[iochantypes.Packet] {
|
||||
log.Printf("ReadTarStream: conn: %v, opts: %v\n", conn, opts)
|
||||
list, err := c.ListEntries(ctx, conn, nil)
|
||||
path := conn.Path
|
||||
srcHasSlash := strings.HasSuffix(path, "/")
|
||||
cleanedPath, err := cleanPath(path)
|
||||
if err != nil {
|
||||
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf("error listing blockfiles: %w", err))
|
||||
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf("error cleaning path: %w", err))
|
||||
}
|
||||
|
||||
finfo, err := c.Stat(ctx, conn)
|
||||
exists := err == nil && !finfo.NotFound
|
||||
if err != nil {
|
||||
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf("error getting file info: %w", err))
|
||||
}
|
||||
if !exists {
|
||||
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf("file not found: %s", conn.GetFullURI()))
|
||||
}
|
||||
singleFile := finfo != nil && !finfo.IsDir
|
||||
var pathPrefix string
|
||||
if !singleFile && srcHasSlash {
|
||||
pathPrefix = cleanedPath
|
||||
} else {
|
||||
pathPrefix = filepath.Dir(cleanedPath)
|
||||
}
|
||||
|
||||
pathPrefix := getPathPrefix(conn)
|
||||
schemeAndHost := conn.GetSchemeAndHost() + "/"
|
||||
|
||||
timeout := DefaultTimeout
|
||||
var entries []*wshrpc.FileInfo
|
||||
if singleFile {
|
||||
entries = []*wshrpc.FileInfo{finfo}
|
||||
} else {
|
||||
entries, err = c.ListEntries(ctx, conn, nil)
|
||||
if err != nil {
|
||||
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf("error listing blockfiles: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
timeout := fstype.DefaultTimeout
|
||||
if opts.Timeout > 0 {
|
||||
timeout = time.Duration(opts.Timeout) * time.Millisecond
|
||||
}
|
||||
@ -128,14 +167,14 @@ func (c WaveClient) ReadTarStream(ctx context.Context, conn *connparse.Connectio
|
||||
tarClose()
|
||||
cancel()
|
||||
}()
|
||||
for _, file := range list {
|
||||
for _, file := range entries {
|
||||
if readerCtx.Err() != nil {
|
||||
rtn <- wshutil.RespErr[iochantypes.Packet](readerCtx.Err())
|
||||
rtn <- wshutil.RespErr[iochantypes.Packet](context.Cause(readerCtx))
|
||||
return
|
||||
}
|
||||
file.Mode = 0644
|
||||
|
||||
if err = writeHeader(fileutil.ToFsFileInfo(file), file.Path); err != nil {
|
||||
if err = writeHeader(fileutil.ToFsFileInfo(file), file.Path, singleFile); err != nil {
|
||||
rtn <- wshutil.RespErr[iochantypes.Packet](fmt.Errorf("error writing tar header: %w", err))
|
||||
return
|
||||
}
|
||||
@ -191,50 +230,37 @@ func (c WaveClient) ListEntries(ctx context.Context, conn *connparse.Connection,
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error cleaning path: %w", err)
|
||||
}
|
||||
fileListOrig, err := filestore.WFS.ListFiles(ctx, zoneId)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing blockfiles: %w", err)
|
||||
}
|
||||
prefix += fspath.Separator
|
||||
var fileList []*wshrpc.FileInfo
|
||||
for _, wf := range fileListOrig {
|
||||
dirMap := make(map[string]*wshrpc.FileInfo)
|
||||
if err := listFilesPrefix(ctx, zoneId, prefix, func(wf *filestore.WaveFile) error {
|
||||
if !opts.All {
|
||||
name, isDir := fspath.FirstLevelDir(strings.TrimPrefix(wf.Name, prefix))
|
||||
if isDir {
|
||||
path := fspath.Join(conn.GetPathWithHost(), name)
|
||||
if _, ok := dirMap[path]; ok {
|
||||
if dirMap[path].ModTime < wf.ModTs {
|
||||
dirMap[path].ModTime = wf.ModTs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
dirMap[path] = &wshrpc.FileInfo{
|
||||
Path: path,
|
||||
Name: name,
|
||||
Dir: fspath.Dir(path),
|
||||
Size: 0,
|
||||
IsDir: true,
|
||||
SupportsMkdir: false,
|
||||
Mode: DirMode,
|
||||
}
|
||||
fileList = append(fileList, dirMap[path])
|
||||
return nil
|
||||
}
|
||||
}
|
||||
fileList = append(fileList, wavefileutil.WaveFileToFileInfo(wf))
|
||||
}
|
||||
if prefix != "" {
|
||||
var filteredList []*wshrpc.FileInfo
|
||||
for _, file := range fileList {
|
||||
if strings.HasPrefix(file.Name, prefix) {
|
||||
filteredList = append(filteredList, file)
|
||||
}
|
||||
}
|
||||
fileList = filteredList
|
||||
}
|
||||
if !opts.All {
|
||||
var filteredList []*wshrpc.FileInfo
|
||||
dirMap := make(map[string]any) // the value is max modtime
|
||||
for _, file := range fileList {
|
||||
// if there is an extra "/" after the prefix, don't include it
|
||||
// first strip the prefix
|
||||
relPath := strings.TrimPrefix(file.Name, prefix)
|
||||
// then check if there is a "/" after the prefix
|
||||
if strings.Contains(relPath, "/") {
|
||||
dirPath := strings.Split(relPath, "/")[0]
|
||||
dirMap[dirPath] = struct{}{}
|
||||
continue
|
||||
}
|
||||
filteredList = append(filteredList, file)
|
||||
}
|
||||
for dir := range dirMap {
|
||||
dirName := prefix + dir + "/"
|
||||
filteredList = append(filteredList, &wshrpc.FileInfo{
|
||||
Path: fmt.Sprintf(wavefileutil.WaveFilePathPattern, zoneId, dirName),
|
||||
Name: dirName,
|
||||
Dir: dirName,
|
||||
Size: 0,
|
||||
IsDir: true,
|
||||
SupportsMkdir: false,
|
||||
})
|
||||
}
|
||||
fileList = filteredList
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("error listing entries: %w", err)
|
||||
}
|
||||
if opts.Offset > 0 {
|
||||
if opts.Offset >= len(fileList) {
|
||||
@ -256,14 +282,34 @@ func (c WaveClient) Stat(ctx context.Context, conn *connparse.Connection) (*wshr
|
||||
if zoneId == "" {
|
||||
return nil, fmt.Errorf("zoneid not found in connection")
|
||||
}
|
||||
fileName, err := cleanPath(conn.Path)
|
||||
fileName, err := fsutil.CleanPathPrefix(conn.Path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error cleaning path: %w", err)
|
||||
}
|
||||
fileInfo, err := filestore.WFS.Stat(ctx, zoneId, fileName)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, fmt.Errorf("NOTFOUND: %w", err)
|
||||
// attempt to list the directory
|
||||
entries, err := c.ListEntries(ctx, conn, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing entries: %w", err)
|
||||
}
|
||||
if len(entries) > 0 {
|
||||
return &wshrpc.FileInfo{
|
||||
Path: conn.GetPathWithHost(),
|
||||
Name: fileName,
|
||||
Dir: fsutil.GetParentPathString(fileName),
|
||||
Size: 0,
|
||||
IsDir: true,
|
||||
Mode: DirMode,
|
||||
}, nil
|
||||
} else {
|
||||
return &wshrpc.FileInfo{
|
||||
Path: conn.GetPathWithHost(),
|
||||
Name: fileName,
|
||||
Dir: fsutil.GetParentPathString(fileName),
|
||||
NotFound: true}, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("error getting file info: %w", err)
|
||||
}
|
||||
@ -283,8 +329,7 @@ func (c WaveClient) PutFile(ctx context.Context, conn *connparse.Connection, dat
|
||||
if err != nil {
|
||||
return fmt.Errorf("error cleaning path: %w", err)
|
||||
}
|
||||
_, err = filestore.WFS.Stat(ctx, zoneId, fileName)
|
||||
if err != nil {
|
||||
if _, err := filestore.WFS.Stat(ctx, zoneId, fileName); err != nil {
|
||||
if !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("error getting blockfile info: %w", err)
|
||||
}
|
||||
@ -298,25 +343,20 @@ func (c WaveClient) PutFile(ctx context.Context, conn *connparse.Connection, dat
|
||||
meta = *data.Info.Meta
|
||||
}
|
||||
}
|
||||
err := filestore.WFS.MakeFile(ctx, zoneId, fileName, meta, opts)
|
||||
if err != nil {
|
||||
if err := filestore.WFS.MakeFile(ctx, zoneId, fileName, meta, opts); err != nil {
|
||||
return fmt.Errorf("error making blockfile: %w", err)
|
||||
}
|
||||
}
|
||||
if data.At != nil && data.At.Offset >= 0 {
|
||||
err = filestore.WFS.WriteAt(ctx, zoneId, fileName, data.At.Offset, dataBuf)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
if err := filestore.WFS.WriteAt(ctx, zoneId, fileName, data.At.Offset, dataBuf); errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("NOTFOUND: %w", err)
|
||||
}
|
||||
if err != nil {
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("error writing to blockfile: %w", err)
|
||||
}
|
||||
} else {
|
||||
err = filestore.WFS.WriteFile(ctx, zoneId, fileName, dataBuf)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
if err := filestore.WFS.WriteFile(ctx, zoneId, fileName, dataBuf); errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("NOTFOUND: %w", err)
|
||||
}
|
||||
if err != nil {
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("error writing to blockfile: %w", err)
|
||||
}
|
||||
}
|
||||
@ -360,8 +400,7 @@ func (c WaveClient) AppendFile(ctx context.Context, conn *connparse.Connection,
|
||||
meta = *data.Info.Meta
|
||||
}
|
||||
}
|
||||
err := filestore.WFS.MakeFile(ctx, zoneId, fileName, meta, opts)
|
||||
if err != nil {
|
||||
if err := filestore.WFS.MakeFile(ctx, zoneId, fileName, meta, opts); err != nil {
|
||||
return fmt.Errorf("error making blockfile: %w", err)
|
||||
}
|
||||
}
|
||||
@ -393,93 +432,78 @@ func (c WaveClient) MoveInternal(ctx context.Context, srcConn, destConn *connpar
|
||||
if srcConn.Host != destConn.Host {
|
||||
return fmt.Errorf("move internal, src and dest hosts do not match")
|
||||
}
|
||||
err := c.CopyInternal(ctx, srcConn, destConn, opts)
|
||||
isDir, err := c.CopyInternal(ctx, srcConn, destConn, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error copying blockfile: %w", err)
|
||||
}
|
||||
err = c.Delete(ctx, srcConn, opts.Recursive)
|
||||
if err != nil {
|
||||
recursive := opts != nil && opts.Recursive && isDir
|
||||
if err := c.Delete(ctx, srcConn, recursive); err != nil {
|
||||
return fmt.Errorf("error deleting blockfile: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c WaveClient) CopyInternal(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error {
|
||||
if srcConn.Host == destConn.Host {
|
||||
host := srcConn.Host
|
||||
srcFileName, err := cleanPath(srcConn.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error cleaning source path: %w", err)
|
||||
func (c WaveClient) CopyInternal(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) (bool, error) {
|
||||
return fsutil.PrefixCopyInternal(ctx, srcConn, destConn, c, opts, func(ctx context.Context, zoneId, prefix string) ([]string, error) {
|
||||
entryList := make([]string, 0)
|
||||
if err := listFilesPrefix(ctx, zoneId, prefix, func(wf *filestore.WaveFile) error {
|
||||
entryList = append(entryList, wf.Name)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
destFileName, err := cleanPath(destConn.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error cleaning destination path: %w", err)
|
||||
}
|
||||
err = filestore.WFS.MakeFile(ctx, host, destFileName, wshrpc.FileMeta{}, wshrpc.FileOpts{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making source blockfile: %w", err)
|
||||
}
|
||||
_, dataBuf, err := filestore.WFS.ReadFile(ctx, host, srcFileName)
|
||||
return entryList, nil
|
||||
}, func(ctx context.Context, srcPath, destPath string) error {
|
||||
srcHost := srcConn.Host
|
||||
srcFileName := strings.TrimPrefix(srcPath, srcHost+fspath.Separator)
|
||||
destHost := destConn.Host
|
||||
destFileName := strings.TrimPrefix(destPath, destHost+fspath.Separator)
|
||||
_, dataBuf, err := filestore.WFS.ReadFile(ctx, srcHost, srcFileName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading source blockfile: %w", err)
|
||||
}
|
||||
err = filestore.WFS.WriteFile(ctx, host, destFileName, dataBuf)
|
||||
if err != nil {
|
||||
if err := filestore.WFS.WriteFile(ctx, destHost, destFileName, dataBuf); err != nil {
|
||||
return fmt.Errorf("error writing to destination blockfile: %w", err)
|
||||
}
|
||||
wps.Broker.Publish(wps.WaveEvent{
|
||||
Event: wps.Event_BlockFile,
|
||||
Scopes: []string{waveobj.MakeORef(waveobj.OType_Block, host).String()},
|
||||
Scopes: []string{waveobj.MakeORef(waveobj.OType_Block, destHost).String()},
|
||||
Data: &wps.WSFileEventData{
|
||||
ZoneId: host,
|
||||
ZoneId: destHost,
|
||||
FileName: destFileName,
|
||||
FileOp: wps.FileOp_Invalidate,
|
||||
},
|
||||
})
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("copy between different hosts not supported")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (c WaveClient) CopyRemote(ctx context.Context, srcConn, destConn *connparse.Connection, srcClient fstype.FileShareClient, opts *wshrpc.FileCopyOpts) error {
|
||||
func (c WaveClient) CopyRemote(ctx context.Context, srcConn, destConn *connparse.Connection, srcClient fstype.FileShareClient, opts *wshrpc.FileCopyOpts) (bool, error) {
|
||||
if srcConn.Scheme == connparse.ConnectionTypeWave && destConn.Scheme == connparse.ConnectionTypeWave {
|
||||
return c.CopyInternal(ctx, srcConn, destConn, opts)
|
||||
}
|
||||
zoneId := destConn.Host
|
||||
if zoneId == "" {
|
||||
return fmt.Errorf("zoneid not found in connection")
|
||||
return false, fmt.Errorf("zoneid not found in connection")
|
||||
}
|
||||
destPrefix := getPathPrefix(destConn)
|
||||
destPrefix = strings.TrimPrefix(destPrefix, destConn.GetSchemeAndHost()+"/")
|
||||
log.Printf("CopyRemote: srcConn: %v, destConn: %v, destPrefix: %s\n", srcConn, destConn, destPrefix)
|
||||
readCtx, cancel := context.WithCancelCause(ctx)
|
||||
ioch := srcClient.ReadTarStream(readCtx, srcConn, opts)
|
||||
err := tarcopy.TarCopyDest(readCtx, cancel, ioch, func(next *tar.Header, reader *tar.Reader) error {
|
||||
if next.Typeflag == tar.TypeDir {
|
||||
return nil
|
||||
}
|
||||
fileName, err := cleanPath(path.Join(destPrefix, next.Name))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error cleaning path: %w", err)
|
||||
}
|
||||
_, err = filestore.WFS.Stat(ctx, zoneId, fileName)
|
||||
if err != nil {
|
||||
if !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("error getting blockfile info: %w", err)
|
||||
}
|
||||
err := filestore.WFS.MakeFile(ctx, zoneId, fileName, nil, wshrpc.FileOpts{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making blockfile: %w", err)
|
||||
}
|
||||
}
|
||||
log.Printf("CopyRemote: writing file: %s; size: %d\n", fileName, next.Size)
|
||||
dataBuf := make([]byte, next.Size)
|
||||
_, err = reader.Read(dataBuf)
|
||||
if err != nil {
|
||||
return fsutil.PrefixCopyRemote(ctx, srcConn, destConn, srcClient, c, func(zoneId, path string, size int64, reader io.Reader) error {
|
||||
dataBuf := make([]byte, size)
|
||||
if _, err := reader.Read(dataBuf); err != nil {
|
||||
if !errors.Is(err, io.EOF) {
|
||||
return fmt.Errorf("error reading tar data: %w", err)
|
||||
}
|
||||
}
|
||||
err = filestore.WFS.WriteFile(ctx, zoneId, fileName, dataBuf)
|
||||
if err != nil {
|
||||
if _, err := filestore.WFS.Stat(ctx, zoneId, path); err != nil {
|
||||
if !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("error getting blockfile info: %w", err)
|
||||
} else {
|
||||
if err := filestore.WFS.MakeFile(ctx, zoneId, path, wshrpc.FileMeta{}, wshrpc.FileOpts{}); err != nil {
|
||||
return fmt.Errorf("error making blockfile: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := filestore.WFS.WriteFile(ctx, zoneId, path, dataBuf); err != nil {
|
||||
return fmt.Errorf("error writing to blockfile: %w", err)
|
||||
}
|
||||
wps.Broker.Publish(wps.WaveEvent{
|
||||
@ -487,16 +511,12 @@ func (c WaveClient) CopyRemote(ctx context.Context, srcConn, destConn *connparse
|
||||
Scopes: []string{waveobj.MakeORef(waveobj.OType_Block, zoneId).String()},
|
||||
Data: &wps.WSFileEventData{
|
||||
ZoneId: zoneId,
|
||||
FileName: fileName,
|
||||
FileName: path,
|
||||
FileOp: wps.FileOp_Invalidate,
|
||||
},
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error copying tar stream: %w", err)
|
||||
}
|
||||
return nil
|
||||
}, opts)
|
||||
}
|
||||
|
||||
func (c WaveClient) Delete(ctx context.Context, conn *connparse.Connection, recursive bool) error {
|
||||
@ -504,22 +524,40 @@ func (c WaveClient) Delete(ctx context.Context, conn *connparse.Connection, recu
|
||||
if zoneId == "" {
|
||||
return fmt.Errorf("zoneid not found in connection")
|
||||
}
|
||||
schemeAndHost := conn.GetSchemeAndHost() + "/"
|
||||
prefix := conn.Path
|
||||
|
||||
entries, err := c.ListEntries(ctx, conn, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing blockfiles: %w", err)
|
||||
finfo, err := c.Stat(ctx, conn)
|
||||
exists := err == nil && !finfo.NotFound
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("error getting file info: %w", err)
|
||||
}
|
||||
if len(entries) > 0 {
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
pathsToDelete := make([]string, 0)
|
||||
|
||||
if finfo.IsDir {
|
||||
if !recursive {
|
||||
return fmt.Errorf("more than one entry, use recursive flag to delete")
|
||||
return fmt.Errorf("%v is not empty, use recursive flag to delete", prefix)
|
||||
}
|
||||
if !strings.HasSuffix(prefix, fspath.Separator) {
|
||||
prefix += fspath.Separator
|
||||
}
|
||||
if err := listFilesPrefix(ctx, zoneId, prefix, func(wf *filestore.WaveFile) error {
|
||||
pathsToDelete = append(pathsToDelete, wf.Name)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error listing blockfiles: %w", err)
|
||||
}
|
||||
} else {
|
||||
pathsToDelete = append(pathsToDelete, prefix)
|
||||
}
|
||||
if len(pathsToDelete) > 0 {
|
||||
errs := make([]error, 0)
|
||||
for _, entry := range entries {
|
||||
fileName := strings.TrimPrefix(entry.Path, schemeAndHost)
|
||||
err = filestore.WFS.DeleteFile(ctx, zoneId, fileName)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("error deleting blockfile %s/%s: %w", zoneId, fileName, err))
|
||||
for _, entry := range pathsToDelete {
|
||||
if err := filestore.WFS.DeleteFile(ctx, zoneId, entry); err != nil {
|
||||
errs = append(errs, fmt.Errorf("error deleting blockfile %s/%s: %w", zoneId, entry, err))
|
||||
continue
|
||||
}
|
||||
wps.Broker.Publish(wps.WaveEvent{
|
||||
@ -527,7 +565,7 @@ func (c WaveClient) Delete(ctx context.Context, conn *connparse.Connection, recu
|
||||
Scopes: []string{waveobj.MakeORef(waveobj.OType_Block, zoneId).String()},
|
||||
Data: &wps.WSFileEventData{
|
||||
ZoneId: zoneId,
|
||||
FileName: fileName,
|
||||
FileName: entry,
|
||||
FileOp: wps.FileOp_Delete,
|
||||
},
|
||||
})
|
||||
@ -539,27 +577,51 @@ func (c WaveClient) Delete(ctx context.Context, conn *connparse.Connection, recu
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c WaveClient) Join(ctx context.Context, conn *connparse.Connection, parts ...string) (string, error) {
|
||||
newPath := path.Join(append([]string{conn.Path}, parts...)...)
|
||||
func listFilesPrefix(ctx context.Context, zoneId, prefix string, entryCallback func(*filestore.WaveFile) error) error {
|
||||
if zoneId == "" {
|
||||
return fmt.Errorf("zoneid not found in connection")
|
||||
}
|
||||
fileListOrig, err := filestore.WFS.ListFiles(ctx, zoneId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing blockfiles: %w", err)
|
||||
}
|
||||
for _, wf := range fileListOrig {
|
||||
if prefix == "" || strings.HasPrefix(wf.Name, prefix) {
|
||||
entryCallback(wf)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c WaveClient) Join(ctx context.Context, conn *connparse.Connection, parts ...string) (*wshrpc.FileInfo, error) {
|
||||
newPath := fspath.Join(append([]string{conn.Path}, parts...)...)
|
||||
newPath, err := cleanPath(newPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error cleaning path: %w", err)
|
||||
return nil, fmt.Errorf("error cleaning path: %w", err)
|
||||
}
|
||||
conn.Path = newPath
|
||||
return c.Stat(ctx, conn)
|
||||
}
|
||||
|
||||
func (c WaveClient) GetCapability() wshrpc.FileShareCapability {
|
||||
return wshrpc.FileShareCapability{
|
||||
CanAppend: true,
|
||||
CanMkdir: false,
|
||||
}
|
||||
return newPath, nil
|
||||
}
|
||||
|
||||
func cleanPath(path string) (string, error) {
|
||||
if path == "" {
|
||||
return "", fmt.Errorf("path is empty")
|
||||
if path == "" || path == fspath.Separator {
|
||||
return "", nil
|
||||
}
|
||||
if strings.HasPrefix(path, "/") {
|
||||
if strings.HasPrefix(path, fspath.Separator) {
|
||||
path = path[1:]
|
||||
}
|
||||
if strings.HasPrefix(path, "~") || strings.HasPrefix(path, ".") || strings.HasPrefix(path, "..") {
|
||||
return "", fmt.Errorf("wavefile path cannot start with ~, ., or ..")
|
||||
}
|
||||
var newParts []string
|
||||
for _, part := range strings.Split(path, "/") {
|
||||
for _, part := range strings.Split(path, fspath.Separator) {
|
||||
if part == ".." {
|
||||
if len(newParts) > 0 {
|
||||
newParts = newParts[:len(newParts)-1]
|
||||
@ -568,19 +630,9 @@ func cleanPath(path string) (string, error) {
|
||||
newParts = append(newParts, part)
|
||||
}
|
||||
}
|
||||
return strings.Join(newParts, "/"), nil
|
||||
return fspath.Join(newParts...), nil
|
||||
}
|
||||
|
||||
func (c WaveClient) GetConnectionType() string {
|
||||
return connparse.ConnectionTypeWave
|
||||
}
|
||||
|
||||
func getPathPrefix(conn *connparse.Connection) string {
|
||||
fullUri := conn.GetFullURI()
|
||||
pathPrefix := fullUri
|
||||
lastSlash := strings.LastIndex(fullUri, "/")
|
||||
if lastSlash > 10 && lastSlash < len(fullUri)-1 {
|
||||
pathPrefix = fullUri[:lastSlash+1]
|
||||
}
|
||||
return pathPrefix
|
||||
}
|
||||
|
@ -4,24 +4,18 @@
|
||||
package wshfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fstype"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fsutil"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes"
|
||||
"github.com/wavetermdev/waveterm/pkg/wshrpc"
|
||||
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
|
||||
"github.com/wavetermdev/waveterm/pkg/wshutil"
|
||||
)
|
||||
|
||||
const (
|
||||
ThirtySeconds = 30 * 1000
|
||||
)
|
||||
|
||||
// This needs to be set by whoever initializes the client, either main-server or wshcmd-connserver
|
||||
var RpcClient *wshutil.WshRpc
|
||||
|
||||
@ -35,47 +29,7 @@ func NewWshClient() *WshClient {
|
||||
|
||||
func (c WshClient) Read(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) (*wshrpc.FileData, error) {
|
||||
rtnCh := c.ReadStream(ctx, conn, data)
|
||||
var fileData *wshrpc.FileData
|
||||
firstPk := true
|
||||
isDir := false
|
||||
var fileBuf bytes.Buffer
|
||||
for respUnion := range rtnCh {
|
||||
if respUnion.Error != nil {
|
||||
return nil, respUnion.Error
|
||||
}
|
||||
resp := respUnion.Response
|
||||
if firstPk {
|
||||
firstPk = false
|
||||
// first packet has the fileinfo
|
||||
if resp.Info == nil {
|
||||
return nil, fmt.Errorf("stream file protocol error, first pk fileinfo is empty")
|
||||
}
|
||||
fileData = &resp
|
||||
if fileData.Info.IsDir {
|
||||
isDir = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
if isDir {
|
||||
if len(resp.Entries) == 0 {
|
||||
continue
|
||||
}
|
||||
fileData.Entries = append(fileData.Entries, resp.Entries...)
|
||||
} else {
|
||||
if resp.Data64 == "" {
|
||||
continue
|
||||
}
|
||||
decoder := base64.NewDecoder(base64.StdEncoding, bytes.NewReader([]byte(resp.Data64)))
|
||||
_, err := io.Copy(&fileBuf, decoder)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stream file, failed to decode base64 data: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !isDir {
|
||||
fileData.Data64 = base64.StdEncoding.EncodeToString(fileBuf.Bytes())
|
||||
}
|
||||
return fileData, nil
|
||||
return fsutil.ReadStreamToFileData(ctx, rtnCh)
|
||||
}
|
||||
|
||||
func (c WshClient) ReadStream(ctx context.Context, conn *connparse.Connection, data wshrpc.FileData) <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData] {
|
||||
@ -90,7 +44,7 @@ func (c WshClient) ReadStream(ctx context.Context, conn *connparse.Connection, d
|
||||
func (c WshClient) ReadTarStream(ctx context.Context, conn *connparse.Connection, opts *wshrpc.FileCopyOpts) <-chan wshrpc.RespOrErrorUnion[iochantypes.Packet] {
|
||||
timeout := opts.Timeout
|
||||
if timeout == 0 {
|
||||
timeout = ThirtySeconds
|
||||
timeout = fstype.DefaultTimeout.Milliseconds()
|
||||
}
|
||||
return wshclient.RemoteTarStreamCommand(RpcClient, wshrpc.CommandRemoteStreamTarData{Path: conn.Path, Opts: opts}, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host), Timeout: timeout})
|
||||
}
|
||||
@ -155,38 +109,38 @@ func (c WshClient) MoveInternal(ctx context.Context, srcConn, destConn *connpars
|
||||
}
|
||||
timeout := opts.Timeout
|
||||
if timeout == 0 {
|
||||
timeout = ThirtySeconds
|
||||
timeout = fstype.DefaultTimeout.Milliseconds()
|
||||
}
|
||||
return wshclient.RemoteFileMoveCommand(RpcClient, wshrpc.CommandRemoteFileCopyData{SrcUri: srcConn.GetFullURI(), DestUri: destConn.GetFullURI(), Opts: opts}, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(destConn.Host), Timeout: timeout})
|
||||
return wshclient.RemoteFileMoveCommand(RpcClient, wshrpc.CommandFileCopyData{SrcUri: srcConn.GetFullURI(), DestUri: destConn.GetFullURI(), Opts: opts}, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(destConn.Host), Timeout: timeout})
|
||||
}
|
||||
|
||||
func (c WshClient) CopyRemote(ctx context.Context, srcConn, destConn *connparse.Connection, _ fstype.FileShareClient, opts *wshrpc.FileCopyOpts) error {
|
||||
func (c WshClient) CopyRemote(ctx context.Context, srcConn, destConn *connparse.Connection, _ fstype.FileShareClient, opts *wshrpc.FileCopyOpts) (bool, error) {
|
||||
return c.CopyInternal(ctx, srcConn, destConn, opts)
|
||||
}
|
||||
|
||||
func (c WshClient) CopyInternal(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) error {
|
||||
func (c WshClient) CopyInternal(ctx context.Context, srcConn, destConn *connparse.Connection, opts *wshrpc.FileCopyOpts) (bool, error) {
|
||||
if opts == nil {
|
||||
opts = &wshrpc.FileCopyOpts{}
|
||||
}
|
||||
timeout := opts.Timeout
|
||||
if timeout == 0 {
|
||||
timeout = ThirtySeconds
|
||||
timeout = fstype.DefaultTimeout.Milliseconds()
|
||||
}
|
||||
return wshclient.RemoteFileCopyCommand(RpcClient, wshrpc.CommandRemoteFileCopyData{SrcUri: srcConn.GetFullURI(), DestUri: destConn.GetFullURI(), Opts: opts}, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(destConn.Host), Timeout: timeout})
|
||||
return wshclient.RemoteFileCopyCommand(RpcClient, wshrpc.CommandFileCopyData{SrcUri: srcConn.GetFullURI(), DestUri: destConn.GetFullURI(), Opts: opts}, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(destConn.Host), Timeout: timeout})
|
||||
}
|
||||
|
||||
func (c WshClient) Delete(ctx context.Context, conn *connparse.Connection, recursive bool) error {
|
||||
return wshclient.RemoteFileDeleteCommand(RpcClient, wshrpc.CommandDeleteFileData{Path: conn.Path, Recursive: recursive}, &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
|
||||
}
|
||||
|
||||
func (c WshClient) Join(ctx context.Context, conn *connparse.Connection, parts ...string) (string, error) {
|
||||
finfo, err := wshclient.RemoteFileJoinCommand(RpcClient, append([]string{conn.Path}, parts...), &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return finfo.Path, nil
|
||||
func (c WshClient) Join(ctx context.Context, conn *connparse.Connection, parts ...string) (*wshrpc.FileInfo, error) {
|
||||
return wshclient.RemoteFileJoinCommand(RpcClient, append([]string{conn.Path}, parts...), &wshrpc.RpcOpts{Route: wshutil.MakeConnectionRouteId(conn.Host)})
|
||||
}
|
||||
|
||||
func (c WshClient) GetConnectionType() string {
|
||||
return connparse.ConnectionTypeWsh
|
||||
}
|
||||
|
||||
func (c WshClient) GetCapability() wshrpc.FileShareCapability {
|
||||
return wshrpc.FileShareCapability{CanAppend: true, CanMkdir: true}
|
||||
}
|
||||
|
@ -18,6 +18,8 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"maps"
|
||||
|
||||
"github.com/creack/pty"
|
||||
"github.com/wavetermdev/waveterm/pkg/blocklogger"
|
||||
"github.com/wavetermdev/waveterm/pkg/panichandler"
|
||||
@ -633,11 +635,10 @@ func tryGetPamEnvVars() map[string]string {
|
||||
if err != nil {
|
||||
log.Printf("error parsing %s: %v", userEnvironmentPath, err)
|
||||
}
|
||||
for k, v := range envVars2 {
|
||||
envVars[k] = v
|
||||
}
|
||||
for k, v := range envVars3 {
|
||||
envVars[k] = v
|
||||
maps.Copy(envVars, envVars2)
|
||||
maps.Copy(envVars, envVars3)
|
||||
if runtime_dir, ok := envVars["XDG_RUNTIME_DIR"]; !ok || runtime_dir == "" {
|
||||
envVars["XDG_RUNTIME_DIR"] = "/run/user/" + fmt.Sprint(os.Getuid())
|
||||
}
|
||||
return envVars
|
||||
}
|
||||
|
@ -4,61 +4,231 @@
|
||||
package suggestion
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
|
||||
"github.com/wavetermdev/waveterm/pkg/wshrpc"
|
||||
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
|
||||
"golang.org/x/sync/singleflight"
|
||||
)
|
||||
|
||||
const ListDirChanSize = 50
|
||||
|
||||
// cache settings
|
||||
const (
|
||||
maxCacheEntries = 20
|
||||
cacheTTL = 60 * time.Second
|
||||
)
|
||||
|
||||
type cacheEntry struct {
|
||||
key string
|
||||
value []DirEntryResult
|
||||
expiration time.Time
|
||||
lruElement *list.Element
|
||||
}
|
||||
|
||||
var (
|
||||
cache = make(map[string]*cacheEntry)
|
||||
cacheLRU = list.New()
|
||||
cacheMu sync.Mutex
|
||||
|
||||
// group ensures only one listing per key is executed concurrently.
|
||||
group singleflight.Group
|
||||
)
|
||||
|
||||
func init() {
|
||||
go func() {
|
||||
ticker := time.NewTicker(60 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
cleanCache()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func cleanCache() {
|
||||
cacheMu.Lock()
|
||||
defer cacheMu.Unlock()
|
||||
now := time.Now()
|
||||
for key, entry := range cache {
|
||||
if now.After(entry.expiration) {
|
||||
cacheLRU.Remove(entry.lruElement)
|
||||
delete(cache, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getCache(key string) ([]DirEntryResult, bool) {
|
||||
cacheMu.Lock()
|
||||
defer cacheMu.Unlock()
|
||||
entry, ok := cache[key]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
if time.Now().After(entry.expiration) {
|
||||
// expired
|
||||
cacheLRU.Remove(entry.lruElement)
|
||||
delete(cache, key)
|
||||
return nil, false
|
||||
}
|
||||
// update LRU order
|
||||
cacheLRU.MoveToFront(entry.lruElement)
|
||||
return entry.value, true
|
||||
}
|
||||
|
||||
func setCache(key string, value []DirEntryResult) {
|
||||
cacheMu.Lock()
|
||||
defer cacheMu.Unlock()
|
||||
// if already exists, update it
|
||||
if entry, ok := cache[key]; ok {
|
||||
entry.value = value
|
||||
entry.expiration = time.Now().Add(cacheTTL)
|
||||
cacheLRU.MoveToFront(entry.lruElement)
|
||||
return
|
||||
}
|
||||
// evict if at capacity
|
||||
if cacheLRU.Len() >= maxCacheEntries {
|
||||
oldest := cacheLRU.Back()
|
||||
if oldest != nil {
|
||||
oldestKey := oldest.Value.(string)
|
||||
if oldEntry, ok := cache[oldestKey]; ok {
|
||||
cacheLRU.Remove(oldEntry.lruElement)
|
||||
delete(cache, oldestKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
// add new entry
|
||||
elem := cacheLRU.PushFront(key)
|
||||
cache[key] = &cacheEntry{
|
||||
key: key,
|
||||
value: value,
|
||||
expiration: time.Now().Add(cacheTTL),
|
||||
lruElement: elem,
|
||||
}
|
||||
}
|
||||
|
||||
// cacheDispose clears all cache entries for the provided widgetId.
|
||||
func cacheDispose(widgetId string) {
|
||||
cacheMu.Lock()
|
||||
defer cacheMu.Unlock()
|
||||
prefix := widgetId + "|"
|
||||
for key, entry := range cache {
|
||||
if strings.HasPrefix(key, prefix) {
|
||||
cacheLRU.Remove(entry.lruElement)
|
||||
delete(cache, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type DirEntryResult struct {
|
||||
Entry fs.DirEntry
|
||||
Err error
|
||||
}
|
||||
|
||||
func listDirectory(ctx context.Context, dir string, maxFiles int) (<-chan DirEntryResult, error) {
|
||||
// Open the directory outside the goroutine for early error reporting.
|
||||
f, err := os.Open(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func listS3Directory(ctx context.Context, widgetId string, conn string, dir string, maxFiles int) (<-chan DirEntryResult, error) {
|
||||
if !strings.HasPrefix(conn, "aws:") {
|
||||
return nil, fmt.Errorf("invalid S3 connection: %s", conn)
|
||||
}
|
||||
key := widgetId + "|" + dir
|
||||
if cached, ok := getCache(key); ok {
|
||||
ch := make(chan DirEntryResult, ListDirChanSize)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
for _, r := range cached {
|
||||
select {
|
||||
case ch <- r:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
// Ensure we have a directory.
|
||||
fi, err := f.Stat()
|
||||
// Ensure only one operation populates the cache for this key.
|
||||
value, err, _ := group.Do(key, func() (interface{}, error) {
|
||||
path := conn + ":s3://" + dir
|
||||
entries, err := wshclient.FileListCommand(wshclient.GetBareRpcClient(), wshrpc.FileListData{Path: path, Opts: &wshrpc.FileListOpts{Limit: maxFiles}}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var results []DirEntryResult
|
||||
for _, entry := range entries {
|
||||
mockEntry := &MockDirEntry{
|
||||
NameStr: entry.Name,
|
||||
IsDirVal: entry.IsDir,
|
||||
FileMode: entry.Mode,
|
||||
}
|
||||
results = append(results, DirEntryResult{Entry: mockEntry})
|
||||
}
|
||||
return results, nil
|
||||
})
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
f.Close()
|
||||
return nil, fmt.Errorf("%s is not a directory", dir)
|
||||
}
|
||||
results := value.([]DirEntryResult)
|
||||
setCache(key, results)
|
||||
|
||||
ch := make(chan DirEntryResult, ListDirChanSize)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
// Make sure to close the directory when done.
|
||||
defer f.Close()
|
||||
|
||||
// Read up to maxFiles entries.
|
||||
entries, err := f.ReadDir(maxFiles)
|
||||
if err != nil {
|
||||
utilfn.SendWithCtxCheck(ctx, ch, DirEntryResult{Err: err})
|
||||
return
|
||||
}
|
||||
|
||||
// Send each entry over the channel.
|
||||
for _, entry := range entries {
|
||||
ok := utilfn.SendWithCtxCheck(ctx, ch, DirEntryResult{Entry: entry})
|
||||
if !ok {
|
||||
for _, r := range results {
|
||||
select {
|
||||
case ch <- r:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
func listDirectory(ctx context.Context, widgetId string, dir string, maxFiles int) (<-chan DirEntryResult, error) {
|
||||
key := widgetId + "|" + dir
|
||||
if cached, ok := getCache(key); ok {
|
||||
ch := make(chan DirEntryResult, ListDirChanSize)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
for _, r := range cached {
|
||||
select {
|
||||
case ch <- r:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
// Use singleflight to ensure only one listing operation occurs per key.
|
||||
value, err, _ := group.Do(key, func() (interface{}, error) {
|
||||
f, err := os.Open(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return nil, fmt.Errorf("%s is not a directory", dir)
|
||||
}
|
||||
entries, err := f.ReadDir(maxFiles)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var results []DirEntryResult
|
||||
for _, entry := range entries {
|
||||
results = append(results, DirEntryResult{Entry: entry})
|
||||
}
|
||||
// Add parent directory (“..”) entry if not at the filesystem root.
|
||||
if filepath.Dir(dir) != dir {
|
||||
mockDir := &MockDirEntry{
|
||||
@ -66,7 +236,25 @@ func listDirectory(ctx context.Context, dir string, maxFiles int) (<-chan DirEnt
|
||||
IsDirVal: true,
|
||||
FileMode: fs.ModeDir | 0755,
|
||||
}
|
||||
utilfn.SendWithCtxCheck(ctx, ch, DirEntryResult{Entry: mockDir})
|
||||
results = append(results, DirEntryResult{Entry: mockDir})
|
||||
}
|
||||
return results, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results := value.([]DirEntryResult)
|
||||
setCache(key, results)
|
||||
|
||||
ch := make(chan DirEntryResult, ListDirChanSize)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
for _, r := range results {
|
||||
select {
|
||||
case ch <- r:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch, nil
|
||||
|
@ -132,6 +132,10 @@ func resolveFileQuery(cwd string, query string) (string, string, string, error)
|
||||
return cwd, "", query, nil
|
||||
}
|
||||
|
||||
func DisposeSuggestions(ctx context.Context, widgetId string) {
|
||||
cacheDispose(widgetId)
|
||||
}
|
||||
|
||||
func FetchSuggestions(ctx context.Context, data wshrpc.FetchSuggestionsData) (*wshrpc.FetchSuggestionsResponse, error) {
|
||||
if data.SuggestionType == "file" {
|
||||
return fetchFileSuggestions(ctx, data)
|
||||
@ -353,7 +357,7 @@ func (h *scoredEntryHeap) Pop() interface{} {
|
||||
return x
|
||||
}
|
||||
|
||||
func fetchFileSuggestions(_ context.Context, data wshrpc.FetchSuggestionsData) (*wshrpc.FetchSuggestionsResponse, error) {
|
||||
func fetchFileSuggestions(ctx context.Context, data wshrpc.FetchSuggestionsData) (*wshrpc.FetchSuggestionsResponse, error) {
|
||||
// Only support file suggestions.
|
||||
if data.SuggestionType != "file" {
|
||||
return nil, fmt.Errorf("unsupported suggestion type: %q", data.SuggestionType)
|
||||
@ -366,12 +370,20 @@ func fetchFileSuggestions(_ context.Context, data wshrpc.FetchSuggestionsData) (
|
||||
}
|
||||
|
||||
// Use a cancellable context for directory listing.
|
||||
listingCtx, cancelFn := context.WithCancel(context.Background())
|
||||
listingCtx, cancelFn := context.WithCancel(ctx)
|
||||
defer cancelFn()
|
||||
|
||||
entriesCh, err := listDirectory(listingCtx, baseDir, 1000)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing directory: %w", err)
|
||||
var entriesCh <-chan DirEntryResult
|
||||
if strings.HasPrefix(data.FileConnection, "aws:") {
|
||||
entriesCh, err = listS3Directory(listingCtx, data.WidgetId, data.FileConnection, baseDir, 1000)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing S3 directory: %w", err)
|
||||
}
|
||||
} else {
|
||||
entriesCh, err = listDirectory(listingCtx, data.WidgetId, baseDir, 1000)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing directory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
const maxEntries = MaxSuggestions // top-k entries
|
||||
|
@ -19,6 +19,7 @@ import (
|
||||
)
|
||||
|
||||
func FixPath(path string) (string, error) {
|
||||
origPath := path
|
||||
var err error
|
||||
if strings.HasPrefix(path, "~") {
|
||||
path = filepath.Join(wavebase.GetHomeDir(), path[1:])
|
||||
@ -28,6 +29,9 @@ func FixPath(path string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(origPath, "/") && !strings.HasSuffix(path, "/") {
|
||||
path += "/"
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
@ -61,7 +65,6 @@ func WinSymlinkDir(path string, bits os.FileMode) bool {
|
||||
// does not return "application/octet-stream" as this is considered a detection failure
|
||||
// can pass an existing fileInfo to avoid re-statting the file
|
||||
// falls back to text/plain for 0 byte files
|
||||
|
||||
func DetectMimeType(path string, fileInfo fs.FileInfo, extended bool) string {
|
||||
if fileInfo == nil {
|
||||
statRtn, err := os.Stat(path)
|
||||
@ -140,6 +143,15 @@ func DetectMimeTypeWithDirEnt(path string, dirEnt fs.DirEntry) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func AddMimeTypeToFileInfo(path string, fileInfo *wshrpc.FileInfo) {
|
||||
if fileInfo == nil {
|
||||
return
|
||||
}
|
||||
if fileInfo.MimeType == "" {
|
||||
fileInfo.MimeType = DetectMimeType(path, ToFsFileInfo(fileInfo), false)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
systemBinDirs = []string{
|
||||
"/bin/",
|
||||
|
@ -11,8 +11,10 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
|
||||
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
|
||||
"github.com/wavetermdev/waveterm/pkg/wshrpc"
|
||||
"github.com/wavetermdev/waveterm/pkg/wshutil"
|
||||
)
|
||||
@ -22,6 +24,7 @@ func ReaderChan(ctx context.Context, r io.Reader, chunkSize int64, callback func
|
||||
ch := make(chan wshrpc.RespOrErrorUnion[iochantypes.Packet], 32)
|
||||
go func() {
|
||||
defer func() {
|
||||
log.Printf("Closing ReaderChan\n")
|
||||
close(ch)
|
||||
callback()
|
||||
}()
|
||||
@ -60,7 +63,7 @@ func WriterChan(ctx context.Context, w io.Writer, ch <-chan wshrpc.RespOrErrorUn
|
||||
go func() {
|
||||
defer func() {
|
||||
if ctx.Err() != nil {
|
||||
drainChannel(ch)
|
||||
utilfn.DrainChannelSafe(ch, "WriterChan")
|
||||
}
|
||||
callback()
|
||||
}()
|
||||
@ -97,10 +100,3 @@ func WriterChan(ctx context.Context, w io.Writer, ch <-chan wshrpc.RespOrErrorUn
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func drainChannel(ch <-chan wshrpc.RespOrErrorUnion[iochantypes.Packet]) {
|
||||
go func() {
|
||||
for range ch {
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
@ -244,11 +244,10 @@ func UpdateCmdEnv(cmd *exec.Cmd, envVars map[string]string) {
|
||||
envKey := GetEnvStrKey(envStr)
|
||||
newEnvVal, ok := envVars[envKey]
|
||||
if ok {
|
||||
if newEnvVal == "" {
|
||||
continue
|
||||
}
|
||||
newEnv = append(newEnv, envKey+"="+newEnvVal)
|
||||
found[envKey] = true
|
||||
if newEnvVal != "" {
|
||||
newEnv = append(newEnv, envKey+"="+newEnvVal)
|
||||
}
|
||||
} else {
|
||||
newEnv = append(newEnv, envStr)
|
||||
}
|
||||
|
@ -14,78 +14,96 @@ import (
|
||||
"log"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/wavetermdev/waveterm/pkg/util/iochan"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
|
||||
"github.com/wavetermdev/waveterm/pkg/wshrpc"
|
||||
)
|
||||
|
||||
const (
|
||||
maxRetries = 5
|
||||
retryDelay = 10 * time.Millisecond
|
||||
tarCopySrcName = "TarCopySrc"
|
||||
tarCopyDestName = "TarCopyDest"
|
||||
pipeReaderName = "pipe reader"
|
||||
pipeWriterName = "pipe writer"
|
||||
tarWriterName = "tar writer"
|
||||
|
||||
// custom flag to indicate that the source is a single file
|
||||
SingleFile = "singlefile"
|
||||
)
|
||||
|
||||
// TarCopySrc creates a tar stream writer and returns a channel to send the tar stream to.
|
||||
// writeHeader is a function that writes the tar header for the file.
|
||||
// writeHeader is a function that writes the tar header for the file. If only a single file is being written, the singleFile flag should be set to true.
|
||||
// writer is the tar writer to write the file data to.
|
||||
// close is a function that closes the tar writer and internal pipe writer.
|
||||
func TarCopySrc(ctx context.Context, pathPrefix string) (outputChan chan wshrpc.RespOrErrorUnion[iochantypes.Packet], writeHeader func(fi fs.FileInfo, file string) error, writer io.Writer, close func()) {
|
||||
func TarCopySrc(ctx context.Context, pathPrefix string) (outputChan chan wshrpc.RespOrErrorUnion[iochantypes.Packet], writeHeader func(fi fs.FileInfo, file string, singleFile bool) error, writer io.Writer, close func()) {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
tarWriter := tar.NewWriter(pipeWriter)
|
||||
rtnChan := iochan.ReaderChan(ctx, pipeReader, wshrpc.FileChunkSize, func() {
|
||||
gracefulClose(pipeReader, tarCopySrcName, pipeReaderName)
|
||||
log.Printf("Closing pipe reader\n")
|
||||
utilfn.GracefulClose(pipeReader, tarCopySrcName, pipeReaderName)
|
||||
})
|
||||
|
||||
return rtnChan, func(fi fs.FileInfo, file string) error {
|
||||
singleFileFlagSet := false
|
||||
|
||||
return rtnChan, func(fi fs.FileInfo, path string, singleFile bool) error {
|
||||
// generate tar header
|
||||
header, err := tar.FileInfoHeader(fi, file)
|
||||
header, err := tar.FileInfoHeader(fi, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
header.Name = filepath.Clean(strings.TrimPrefix(file, pathPrefix))
|
||||
if err := validatePath(header.Name); err != nil {
|
||||
if singleFile {
|
||||
if singleFileFlagSet {
|
||||
return errors.New("attempting to write multiple files to a single file tar stream")
|
||||
}
|
||||
|
||||
header.PAXRecords = map[string]string{SingleFile: "true"}
|
||||
singleFileFlagSet = true
|
||||
}
|
||||
|
||||
path, err = fixPath(path, pathPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// skip if path is empty, which means the file is the root directory
|
||||
if path == "" {
|
||||
return nil
|
||||
}
|
||||
header.Name = path
|
||||
|
||||
// write header
|
||||
if err := tarWriter.WriteHeader(header); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, tarWriter, func() {
|
||||
gracefulClose(tarWriter, tarCopySrcName, tarWriterName)
|
||||
gracefulClose(pipeWriter, tarCopySrcName, pipeWriterName)
|
||||
log.Printf("Closing tar writer\n")
|
||||
utilfn.GracefulClose(tarWriter, tarCopySrcName, tarWriterName)
|
||||
utilfn.GracefulClose(pipeWriter, tarCopySrcName, pipeWriterName)
|
||||
}
|
||||
}
|
||||
|
||||
func validatePath(path string) error {
|
||||
func fixPath(path, prefix string) (string, error) {
|
||||
path = strings.TrimPrefix(strings.TrimPrefix(filepath.Clean(strings.TrimPrefix(path, prefix)), "/"), "\\")
|
||||
if strings.Contains(path, "..") {
|
||||
return fmt.Errorf("invalid tar path containing directory traversal: %s", path)
|
||||
return "", fmt.Errorf("invalid tar path containing directory traversal: %s", path)
|
||||
}
|
||||
if strings.HasPrefix(path, "/") {
|
||||
return fmt.Errorf("invalid tar path starting with /: %s", path)
|
||||
}
|
||||
return nil
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// TarCopyDest reads a tar stream from a channel and writes the files to the destination.
|
||||
// readNext is a function that is called for each file in the tar stream to read the file data. It should return an error if the file cannot be read.
|
||||
// readNext is a function that is called for each file in the tar stream to read the file data. If only a single file is being written from the tar src, the singleFile flag will be set in this callback. It should return an error if the file cannot be read.
|
||||
// The function returns an error if the tar stream cannot be read.
|
||||
func TarCopyDest(ctx context.Context, cancel context.CancelCauseFunc, ch <-chan wshrpc.RespOrErrorUnion[iochantypes.Packet], readNext func(next *tar.Header, reader *tar.Reader) error) error {
|
||||
func TarCopyDest(ctx context.Context, cancel context.CancelCauseFunc, ch <-chan wshrpc.RespOrErrorUnion[iochantypes.Packet], readNext func(next *tar.Header, reader *tar.Reader, singleFile bool) error) error {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
iochan.WriterChan(ctx, pipeWriter, ch, func() {
|
||||
gracefulClose(pipeWriter, tarCopyDestName, pipeWriterName)
|
||||
utilfn.GracefulClose(pipeWriter, tarCopyDestName, pipeWriterName)
|
||||
}, cancel)
|
||||
tarReader := tar.NewReader(pipeReader)
|
||||
defer func() {
|
||||
if !gracefulClose(pipeReader, tarCopyDestName, pipeReaderName) {
|
||||
if !utilfn.GracefulClose(pipeReader, tarCopyDestName, pipeReaderName) {
|
||||
// If the pipe reader cannot be closed, cancel the context. This should kill the writer goroutine.
|
||||
cancel(nil)
|
||||
}
|
||||
@ -110,27 +128,15 @@ func TarCopyDest(ctx context.Context, cancel context.CancelCauseFunc, ch <-chan
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = readNext(next, tarReader)
|
||||
|
||||
// Check for directory traversal
|
||||
if strings.Contains(next.Name, "..") {
|
||||
return fmt.Errorf("invalid tar path containing directory traversal: %s", next.Name)
|
||||
}
|
||||
err = readNext(next, tarReader, next.PAXRecords != nil && next.PAXRecords[SingleFile] == "true")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func gracefulClose(closer io.Closer, debugName string, closerName string) bool {
|
||||
closed := false
|
||||
for retries := 0; retries < maxRetries; retries++ {
|
||||
if err := closer.Close(); err != nil {
|
||||
log.Printf("%s: error closing %s: %v, trying again in %dms\n", debugName, closerName, err, retryDelay.Milliseconds())
|
||||
time.Sleep(retryDelay)
|
||||
continue
|
||||
}
|
||||
closed = true
|
||||
break
|
||||
}
|
||||
if !closed {
|
||||
log.Printf("%s: unable to close %s after %d retries\n", debugName, closerName, maxRetries)
|
||||
}
|
||||
return closed
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
mathrand "math/rand"
|
||||
"os"
|
||||
@ -1032,3 +1033,44 @@ func SendWithCtxCheck[T any](ctx context.Context, ch chan<- T, val T) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
maxRetries = 5
|
||||
retryDelay = 10 * time.Millisecond
|
||||
)
|
||||
|
||||
func GracefulClose(closer io.Closer, debugName, closerName string) bool {
|
||||
closed := false
|
||||
for retries := 0; retries < maxRetries; retries++ {
|
||||
if err := closer.Close(); err != nil {
|
||||
log.Printf("%s: error closing %s: %v, trying again in %dms\n", debugName, closerName, err, retryDelay.Milliseconds())
|
||||
time.Sleep(retryDelay)
|
||||
continue
|
||||
}
|
||||
closed = true
|
||||
break
|
||||
}
|
||||
if !closed {
|
||||
log.Printf("%s: unable to close %s after %d retries\n", debugName, closerName, maxRetries)
|
||||
}
|
||||
return closed
|
||||
}
|
||||
|
||||
// DrainChannelSafe will drain a channel until it is empty or until a timeout is reached.
|
||||
// WARNING: This function will panic if the channel is not drained within the timeout.
|
||||
func DrainChannelSafe[T any](ch <-chan T, debugName string) {
|
||||
drainTimeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
go func() {
|
||||
defer cancel()
|
||||
for {
|
||||
select {
|
||||
case <-drainTimeoutCtx.Done():
|
||||
panic(debugName + ": timeout draining channel")
|
||||
case _, ok := <-ch:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
@ -4,6 +4,8 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/wavetermdev/waveterm/pkg/filestore"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fsutil"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/fileutil"
|
||||
"github.com/wavetermdev/waveterm/pkg/wshrpc"
|
||||
)
|
||||
|
||||
@ -13,14 +15,17 @@ const (
|
||||
|
||||
func WaveFileToFileInfo(wf *filestore.WaveFile) *wshrpc.FileInfo {
|
||||
path := fmt.Sprintf(WaveFilePathPattern, wf.ZoneId, wf.Name)
|
||||
return &wshrpc.FileInfo{
|
||||
rtn := &wshrpc.FileInfo{
|
||||
Path: path,
|
||||
Dir: fsutil.GetParentPathString(path),
|
||||
Name: wf.Name,
|
||||
Opts: &wf.Opts,
|
||||
Size: wf.Size,
|
||||
Meta: &wf.Meta,
|
||||
SupportsMkdir: false,
|
||||
}
|
||||
fileutil.AddMimeTypeToFileInfo(path, rtn)
|
||||
return rtn
|
||||
}
|
||||
|
||||
func WaveFileListToFileInfoList(wfList []*filestore.WaveFile) []*wshrpc.FileInfo {
|
||||
|
@ -25,8 +25,10 @@ import (
|
||||
"github.com/wavetermdev/waveterm/pkg/docsite"
|
||||
"github.com/wavetermdev/waveterm/pkg/filestore"
|
||||
"github.com/wavetermdev/waveterm/pkg/panichandler"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare"
|
||||
"github.com/wavetermdev/waveterm/pkg/schema"
|
||||
"github.com/wavetermdev/waveterm/pkg/service"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
|
||||
"github.com/wavetermdev/waveterm/pkg/wavebase"
|
||||
"github.com/wavetermdev/waveterm/pkg/wshrpc"
|
||||
"github.com/wavetermdev/waveterm/pkg/wshrpc/wshclient"
|
||||
@ -250,6 +252,10 @@ func handleRemoteStreamFile(w http.ResponseWriter, req *http.Request, conn strin
|
||||
route := wshutil.MakeConnectionRouteId(conn)
|
||||
rpcOpts := &wshrpc.RpcOpts{Route: route, Timeout: 60 * 1000}
|
||||
rtnCh := wshclient.RemoteStreamFileCommand(client, streamFileData, rpcOpts)
|
||||
return handleRemoteStreamFileFromCh(w, req, path, rtnCh, rpcOpts.StreamCancelFn, no404)
|
||||
}
|
||||
|
||||
func handleRemoteStreamFileFromCh(w http.ResponseWriter, req *http.Request, path string, rtnCh <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData], streamCancelFn func(), no404 bool) error {
|
||||
firstPk := true
|
||||
var fileInfo *wshrpc.FileInfo
|
||||
loopDone := false
|
||||
@ -258,16 +264,15 @@ func handleRemoteStreamFile(w http.ResponseWriter, req *http.Request, conn strin
|
||||
return
|
||||
}
|
||||
// if loop didn't finish naturally clear it out
|
||||
go func() {
|
||||
for range rtnCh {
|
||||
}
|
||||
}()
|
||||
utilfn.DrainChannelSafe(rtnCh, "handleRemoteStreamFile")
|
||||
}()
|
||||
ctx := req.Context()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
rpcOpts.StreamCancelFn()
|
||||
if streamCancelFn != nil {
|
||||
streamCancelFn()
|
||||
}
|
||||
return ctx.Err()
|
||||
case respUnion, ok := <-rtnCh:
|
||||
if !ok {
|
||||
@ -313,6 +318,16 @@ func handleRemoteStreamFile(w http.ResponseWriter, req *http.Request, conn strin
|
||||
}
|
||||
}
|
||||
|
||||
func handleStreamLocalFile(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Query().Get("path")
|
||||
if path == "" {
|
||||
http.Error(w, "path is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
no404 := r.URL.Query().Get("no404")
|
||||
handleLocalStreamFile(w, r, path, no404 != "")
|
||||
}
|
||||
|
||||
func handleStreamFile(w http.ResponseWriter, r *http.Request) {
|
||||
conn := r.URL.Query().Get("connection")
|
||||
if conn == "" {
|
||||
@ -324,14 +339,16 @@ func handleStreamFile(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
no404 := r.URL.Query().Get("no404")
|
||||
if conn == wshrpc.LocalConnName {
|
||||
handleLocalStreamFile(w, r, path, no404 != "")
|
||||
} else {
|
||||
err := handleRemoteStreamFile(w, r, conn, path, no404 != "")
|
||||
if err != nil {
|
||||
log.Printf("error streaming remote file %q %q: %v\n", conn, path, err)
|
||||
http.Error(w, fmt.Sprintf("error streaming file: %v", err), http.StatusInternalServerError)
|
||||
}
|
||||
data := wshrpc.FileData{
|
||||
Info: &wshrpc.FileInfo{
|
||||
Path: path,
|
||||
},
|
||||
}
|
||||
rtnCh := fileshare.ReadStream(r.Context(), data)
|
||||
err := handleRemoteStreamFileFromCh(w, r, path, rtnCh, nil, no404 != "")
|
||||
if err != nil {
|
||||
log.Printf("error streaming file %q %q: %v\n", conn, path, err)
|
||||
http.Error(w, fmt.Sprintf("error streaming file: %v", err), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
@ -425,7 +442,9 @@ const schemaPrefix = "/schema/"
|
||||
// blocking
|
||||
func RunWebServer(listener net.Listener) {
|
||||
gr := mux.NewRouter()
|
||||
gr.HandleFunc("/wave/stream-local-file", WebFnWrap(WebFnOpts{AllowCaching: true}, handleStreamLocalFile))
|
||||
gr.HandleFunc("/wave/stream-file", WebFnWrap(WebFnOpts{AllowCaching: true}, handleStreamFile))
|
||||
gr.PathPrefix("/wave/stream-file/").HandlerFunc(WebFnWrap(WebFnOpts{AllowCaching: true}, handleStreamFile))
|
||||
gr.HandleFunc("/wave/file", WebFnWrap(WebFnOpts{AllowCaching: false}, handleWaveFile))
|
||||
gr.HandleFunc("/wave/service", WebFnWrap(WebFnOpts{JsonErrors: true}, handleService))
|
||||
gr.HandleFunc("/vdom/{uuid}/{path:.*}", WebFnWrap(WebFnOpts{AllowCaching: true}, handleVDom))
|
||||
|
@ -70,6 +70,12 @@ func ConnListCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) ([]string, error)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// command "connlistaws", wshserver.ConnListAWSCommand
|
||||
func ConnListAWSCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) ([]string, error) {
|
||||
resp, err := sendRpcRequestCallHelper[[]string](w, "connlistaws", nil, opts)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// command "connreinstallwsh", wshserver.ConnReinstallWshCommand
|
||||
func ConnReinstallWshCommand(w *wshutil.WshRpc, data wshrpc.ConnExtData, opts *wshrpc.RpcOpts) error {
|
||||
_, err := sendRpcRequestCallHelper[any](w, "connreinstallwsh", data, opts)
|
||||
@ -154,6 +160,12 @@ func DisposeCommand(w *wshutil.WshRpc, data wshrpc.CommandDisposeData, opts *wsh
|
||||
return err
|
||||
}
|
||||
|
||||
// command "disposesuggestions", wshserver.DisposeSuggestionsCommand
|
||||
func DisposeSuggestionsCommand(w *wshutil.WshRpc, data string, opts *wshrpc.RpcOpts) error {
|
||||
_, err := sendRpcRequestCallHelper[any](w, "disposesuggestions", data, opts)
|
||||
return err
|
||||
}
|
||||
|
||||
// command "eventpublish", wshserver.EventPublishCommand
|
||||
func EventPublishCommand(w *wshutil.WshRpc, data wps.WaveEvent, opts *wshrpc.RpcOpts) error {
|
||||
_, err := sendRpcRequestCallHelper[any](w, "eventpublish", data, opts)
|
||||
@ -232,6 +244,12 @@ func FileInfoCommand(w *wshutil.WshRpc, data wshrpc.FileData, opts *wshrpc.RpcOp
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// command "filejoin", wshserver.FileJoinCommand
|
||||
func FileJoinCommand(w *wshutil.WshRpc, data []string, opts *wshrpc.RpcOpts) (*wshrpc.FileInfo, error) {
|
||||
resp, err := sendRpcRequestCallHelper[*wshrpc.FileInfo](w, "filejoin", data, opts)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// command "filelist", wshserver.FileListCommand
|
||||
func FileListCommand(w *wshutil.WshRpc, data wshrpc.FileListData, opts *wshrpc.RpcOpts) ([]*wshrpc.FileInfo, error) {
|
||||
resp, err := sendRpcRequestCallHelper[[]*wshrpc.FileInfo](w, "filelist", data, opts)
|
||||
@ -261,6 +279,17 @@ func FileReadCommand(w *wshutil.WshRpc, data wshrpc.FileData, opts *wshrpc.RpcOp
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// command "filereadstream", wshserver.FileReadStreamCommand
|
||||
func FileReadStreamCommand(w *wshutil.WshRpc, data wshrpc.FileData, opts *wshrpc.RpcOpts) chan wshrpc.RespOrErrorUnion[wshrpc.FileData] {
|
||||
return sendRpcRequestResponseStreamHelper[wshrpc.FileData](w, "filereadstream", data, opts)
|
||||
}
|
||||
|
||||
// command "filesharecapability", wshserver.FileShareCapabilityCommand
|
||||
func FileShareCapabilityCommand(w *wshutil.WshRpc, data string, opts *wshrpc.RpcOpts) (wshrpc.FileShareCapability, error) {
|
||||
resp, err := sendRpcRequestCallHelper[wshrpc.FileShareCapability](w, "filesharecapability", data, opts)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// command "filestreamtar", wshserver.FileStreamTarCommand
|
||||
func FileStreamTarCommand(w *wshutil.WshRpc, data wshrpc.CommandRemoteStreamTarData, opts *wshrpc.RpcOpts) chan wshrpc.RespOrErrorUnion[iochantypes.Packet] {
|
||||
return sendRpcRequestResponseStreamHelper[iochantypes.Packet](w, "filestreamtar", data, opts)
|
||||
@ -290,6 +319,12 @@ func GetMetaCommand(w *wshutil.WshRpc, data wshrpc.CommandGetMetaData, opts *wsh
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// command "gettab", wshserver.GetTabCommand
|
||||
func GetTabCommand(w *wshutil.WshRpc, data string, opts *wshrpc.RpcOpts) (*waveobj.Tab, error) {
|
||||
resp, err := sendRpcRequestCallHelper[*waveobj.Tab](w, "gettab", data, opts)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// command "getupdatechannel", wshserver.GetUpdateChannelCommand
|
||||
func GetUpdateChannelCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) (string, error) {
|
||||
resp, err := sendRpcRequestCallHelper[string](w, "getupdatechannel", nil, opts)
|
||||
@ -327,9 +362,9 @@ func RecordTEventCommand(w *wshutil.WshRpc, data telemetrydata.TEvent, opts *wsh
|
||||
}
|
||||
|
||||
// command "remotefilecopy", wshserver.RemoteFileCopyCommand
|
||||
func RemoteFileCopyCommand(w *wshutil.WshRpc, data wshrpc.CommandRemoteFileCopyData, opts *wshrpc.RpcOpts) error {
|
||||
_, err := sendRpcRequestCallHelper[any](w, "remotefilecopy", data, opts)
|
||||
return err
|
||||
func RemoteFileCopyCommand(w *wshutil.WshRpc, data wshrpc.CommandFileCopyData, opts *wshrpc.RpcOpts) (bool, error) {
|
||||
resp, err := sendRpcRequestCallHelper[bool](w, "remotefilecopy", data, opts)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// command "remotefiledelete", wshserver.RemoteFileDeleteCommand
|
||||
@ -351,7 +386,7 @@ func RemoteFileJoinCommand(w *wshutil.WshRpc, data []string, opts *wshrpc.RpcOpt
|
||||
}
|
||||
|
||||
// command "remotefilemove", wshserver.RemoteFileMoveCommand
|
||||
func RemoteFileMoveCommand(w *wshutil.WshRpc, data wshrpc.CommandRemoteFileCopyData, opts *wshrpc.RpcOpts) error {
|
||||
func RemoteFileMoveCommand(w *wshutil.WshRpc, data wshrpc.CommandFileCopyData, opts *wshrpc.RpcOpts) error {
|
||||
_, err := sendRpcRequestCallHelper[any](w, "remotefilemove", data, opts)
|
||||
return err
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/connparse"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/fstype"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare/wshfs"
|
||||
"github.com/wavetermdev/waveterm/pkg/suggestion"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/fileutil"
|
||||
@ -30,10 +31,6 @@ import (
|
||||
"github.com/wavetermdev/waveterm/pkg/wshutil"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
type ServerImpl struct {
|
||||
LogWriter io.Writer
|
||||
}
|
||||
@ -111,13 +108,6 @@ func (impl *ServerImpl) remoteStreamFileDir(ctx context.Context, path string, by
|
||||
}
|
||||
}
|
||||
var fileInfoArr []*wshrpc.FileInfo
|
||||
parent := filepath.Dir(path)
|
||||
parentFileInfo, err := impl.fileInfoInternal(parent, false)
|
||||
if err == nil && parent != path {
|
||||
parentFileInfo.Name = ".."
|
||||
parentFileInfo.Size = -1
|
||||
fileInfoArr = append(fileInfoArr, parentFileInfo)
|
||||
}
|
||||
for _, innerFileEntry := range innerFilesEntries {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
@ -129,13 +119,11 @@ func (impl *ServerImpl) remoteStreamFileDir(ctx context.Context, path string, by
|
||||
innerFileInfo := statToFileInfo(filepath.Join(path, innerFileInfoInt.Name()), innerFileInfoInt, false)
|
||||
fileInfoArr = append(fileInfoArr, innerFileInfo)
|
||||
if len(fileInfoArr) >= wshrpc.DirChunkSize {
|
||||
logPrintfDev("sending %d entries\n", len(fileInfoArr))
|
||||
dataCallback(fileInfoArr, nil, byteRange)
|
||||
fileInfoArr = nil
|
||||
}
|
||||
}
|
||||
if len(fileInfoArr) > 0 {
|
||||
logPrintfDev("sending %d entries\n", len(fileInfoArr))
|
||||
dataCallback(fileInfoArr, nil, byteRange)
|
||||
}
|
||||
return nil
|
||||
@ -146,7 +134,7 @@ func (impl *ServerImpl) remoteStreamFileRegular(ctx context.Context, path string
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open file %q: %w", path, err)
|
||||
}
|
||||
defer fd.Close()
|
||||
defer utilfn.GracefulClose(fd, "remoteStreamFileRegular", path)
|
||||
var filePos int64
|
||||
if !byteRange.All && byteRange.Start > 0 {
|
||||
_, err := fd.Seek(byteRange.Start, io.SeekStart)
|
||||
@ -240,8 +228,8 @@ func (impl *ServerImpl) RemoteTarStreamCommand(ctx context.Context, data wshrpc.
|
||||
if opts == nil {
|
||||
opts = &wshrpc.FileCopyOpts{}
|
||||
}
|
||||
recursive := opts.Recursive
|
||||
logPrintfDev("RemoteTarStreamCommand: path=%s\n", path)
|
||||
log.Printf("RemoteTarStreamCommand: path=%s\n", path)
|
||||
srcHasSlash := strings.HasSuffix(path, "/")
|
||||
path, err := wavebase.ExpandHomeDir(path)
|
||||
if err != nil {
|
||||
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf("cannot expand path %q: %w", path, err))
|
||||
@ -253,18 +241,14 @@ func (impl *ServerImpl) RemoteTarStreamCommand(ctx context.Context, data wshrpc.
|
||||
}
|
||||
|
||||
var pathPrefix string
|
||||
if finfo.IsDir() && strings.HasSuffix(cleanedPath, "/") {
|
||||
singleFile := !finfo.IsDir()
|
||||
if !singleFile && srcHasSlash {
|
||||
pathPrefix = cleanedPath
|
||||
} else {
|
||||
pathPrefix = filepath.Dir(cleanedPath) + "/"
|
||||
}
|
||||
if finfo.IsDir() {
|
||||
if !recursive {
|
||||
return wshutil.SendErrCh[iochantypes.Packet](fmt.Errorf("cannot create tar stream for %q: %w", path, errors.New("directory copy requires recursive option")))
|
||||
}
|
||||
pathPrefix = filepath.Dir(cleanedPath)
|
||||
}
|
||||
|
||||
timeout := DefaultTimeout
|
||||
timeout := fstype.DefaultTimeout
|
||||
if opts.Timeout > 0 {
|
||||
timeout = time.Duration(opts.Timeout) * time.Millisecond
|
||||
}
|
||||
@ -283,7 +267,7 @@ func (impl *ServerImpl) RemoteTarStreamCommand(ctx context.Context, data wshrpc.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = writeHeader(info, path); err != nil {
|
||||
if err = writeHeader(info, path, singleFile); err != nil {
|
||||
return err
|
||||
}
|
||||
// if not a dir, write file content
|
||||
@ -292,6 +276,7 @@ func (impl *ServerImpl) RemoteTarStreamCommand(ctx context.Context, data wshrpc.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer utilfn.GracefulClose(data, "RemoteTarStreamCommand", path)
|
||||
if _, err := io.Copy(fileWriter, data); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -300,10 +285,10 @@ func (impl *ServerImpl) RemoteTarStreamCommand(ctx context.Context, data wshrpc.
|
||||
}
|
||||
log.Printf("RemoteTarStreamCommand: starting\n")
|
||||
err = nil
|
||||
if finfo.IsDir() {
|
||||
err = filepath.Walk(path, walkFunc)
|
||||
if singleFile {
|
||||
err = walkFunc(cleanedPath, finfo, nil)
|
||||
} else {
|
||||
err = walkFunc(path, finfo, nil)
|
||||
err = filepath.Walk(cleanedPath, walkFunc)
|
||||
}
|
||||
if err != nil {
|
||||
rtn <- wshutil.RespErr[iochantypes.Packet](err)
|
||||
@ -314,7 +299,7 @@ func (impl *ServerImpl) RemoteTarStreamCommand(ctx context.Context, data wshrpc.
|
||||
return rtn
|
||||
}
|
||||
|
||||
func (impl *ServerImpl) RemoteFileCopyCommand(ctx context.Context, data wshrpc.CommandRemoteFileCopyData) error {
|
||||
func (impl *ServerImpl) RemoteFileCopyCommand(ctx context.Context, data wshrpc.CommandFileCopyData) (bool, error) {
|
||||
log.Printf("RemoteFileCopyCommand: src=%s, dest=%s\n", data.SrcUri, data.DestUri)
|
||||
opts := data.Opts
|
||||
if opts == nil {
|
||||
@ -324,40 +309,49 @@ func (impl *ServerImpl) RemoteFileCopyCommand(ctx context.Context, data wshrpc.C
|
||||
srcUri := data.SrcUri
|
||||
merge := opts.Merge
|
||||
overwrite := opts.Overwrite
|
||||
if overwrite && merge {
|
||||
return false, fmt.Errorf("cannot specify both overwrite and merge")
|
||||
}
|
||||
|
||||
destConn, err := connparse.ParseURIAndReplaceCurrentHost(ctx, destUri)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse destination URI %q: %w", srcUri, err)
|
||||
return false, fmt.Errorf("cannot parse destination URI %q: %w", destUri, err)
|
||||
}
|
||||
destPathCleaned := filepath.Clean(wavebase.ExpandHomeDirSafe(destConn.Path))
|
||||
destinfo, err := os.Stat(destPathCleaned)
|
||||
if err == nil {
|
||||
if !destinfo.IsDir() {
|
||||
if !overwrite {
|
||||
return fmt.Errorf("destination %q already exists, use overwrite option", destPathCleaned)
|
||||
} else {
|
||||
err := os.Remove(destPathCleaned)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot remove file %q: %w", destPathCleaned, err)
|
||||
}
|
||||
if err != nil {
|
||||
if !errors.Is(err, fs.ErrNotExist) {
|
||||
return false, fmt.Errorf("cannot stat destination %q: %w", destPathCleaned, err)
|
||||
}
|
||||
}
|
||||
|
||||
destExists := destinfo != nil
|
||||
destIsDir := destExists && destinfo.IsDir()
|
||||
destHasSlash := strings.HasSuffix(destUri, "/")
|
||||
|
||||
if destExists && !destIsDir {
|
||||
if !overwrite {
|
||||
return false, fmt.Errorf(fstype.OverwriteRequiredError, destPathCleaned)
|
||||
} else {
|
||||
err := os.Remove(destPathCleaned)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cannot remove file %q: %w", destPathCleaned, err)
|
||||
}
|
||||
}
|
||||
} else if !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("cannot stat destination %q: %w", destPathCleaned, err)
|
||||
}
|
||||
srcConn, err := connparse.ParseURIAndReplaceCurrentHost(ctx, srcUri)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse source URI %q: %w", srcUri, err)
|
||||
return false, fmt.Errorf("cannot parse source URI %q: %w", srcUri, err)
|
||||
}
|
||||
|
||||
copyFileFunc := func(path string, finfo fs.FileInfo, srcFile io.Reader) (int64, error) {
|
||||
destinfo, err = os.Stat(path)
|
||||
nextinfo, err := os.Stat(path)
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return 0, fmt.Errorf("cannot stat file %q: %w", path, err)
|
||||
}
|
||||
|
||||
if destinfo != nil {
|
||||
if destinfo.IsDir() {
|
||||
if nextinfo != nil {
|
||||
if nextinfo.IsDir() {
|
||||
if !finfo.IsDir() {
|
||||
// try to create file in directory
|
||||
path = filepath.Join(path, filepath.Base(finfo.Name()))
|
||||
@ -366,28 +360,24 @@ func (impl *ServerImpl) RemoteFileCopyCommand(ctx context.Context, data wshrpc.C
|
||||
return 0, fmt.Errorf("cannot stat file %q: %w", path, err)
|
||||
}
|
||||
if newdestinfo != nil && !overwrite {
|
||||
return 0, fmt.Errorf("cannot create file %q, file exists at path, overwrite not specified", path)
|
||||
return 0, fmt.Errorf(fstype.OverwriteRequiredError, path)
|
||||
}
|
||||
} else if !merge && !overwrite {
|
||||
return 0, fmt.Errorf("cannot create directory %q, directory exists at path, neither overwrite nor merge specified", path)
|
||||
} else if overwrite {
|
||||
err := os.RemoveAll(path)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot remove directory %q: %w", path, err)
|
||||
}
|
||||
} else if !merge {
|
||||
return 0, fmt.Errorf(fstype.MergeRequiredError, path)
|
||||
}
|
||||
} else {
|
||||
if finfo.IsDir() {
|
||||
if !overwrite {
|
||||
return 0, fmt.Errorf("cannot create file %q, directory exists at path, overwrite not specified", path)
|
||||
} else {
|
||||
err := os.RemoveAll(path)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot remove directory %q: %w", path, err)
|
||||
}
|
||||
if !overwrite {
|
||||
return 0, fmt.Errorf(fstype.OverwriteRequiredError, path)
|
||||
} else if finfo.IsDir() {
|
||||
err := os.RemoveAll(path)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot remove directory %q: %w", path, err)
|
||||
}
|
||||
} else if !overwrite {
|
||||
return 0, fmt.Errorf("cannot create file %q, file exists at path, overwrite not specified", path)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -397,6 +387,7 @@ func (impl *ServerImpl) RemoteFileCopyCommand(ctx context.Context, data wshrpc.C
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot create directory %q: %w", path, err)
|
||||
}
|
||||
return 0, nil
|
||||
} else {
|
||||
err := os.MkdirAll(filepath.Dir(path), 0755)
|
||||
if err != nil {
|
||||
@ -408,7 +399,7 @@ func (impl *ServerImpl) RemoteFileCopyCommand(ctx context.Context, data wshrpc.C
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot create new file %q: %w", path, err)
|
||||
}
|
||||
defer file.Close()
|
||||
defer utilfn.GracefulClose(file, "RemoteFileCopyCommand", path)
|
||||
_, err = io.Copy(file, srcFile)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot write file %q: %w", path, err)
|
||||
@ -417,48 +408,62 @@ func (impl *ServerImpl) RemoteFileCopyCommand(ctx context.Context, data wshrpc.C
|
||||
return finfo.Size(), nil
|
||||
}
|
||||
|
||||
srcIsDir := false
|
||||
if srcConn.Host == destConn.Host {
|
||||
srcPathCleaned := filepath.Clean(wavebase.ExpandHomeDirSafe(srcConn.Path))
|
||||
|
||||
srcFileStat, err := os.Stat(srcPathCleaned)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot stat file %q: %w", srcPathCleaned, err)
|
||||
return false, fmt.Errorf("cannot stat file %q: %w", srcPathCleaned, err)
|
||||
}
|
||||
|
||||
if srcFileStat.IsDir() {
|
||||
srcIsDir = true
|
||||
var srcPathPrefix string
|
||||
if destIsDir {
|
||||
srcPathPrefix = filepath.Dir(srcPathCleaned)
|
||||
} else {
|
||||
srcPathPrefix = srcPathCleaned
|
||||
}
|
||||
err = filepath.Walk(srcPathCleaned, func(path string, info fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcFilePath := path
|
||||
destFilePath := filepath.Join(destPathCleaned, strings.TrimPrefix(path, srcPathCleaned))
|
||||
destFilePath := filepath.Join(destPathCleaned, strings.TrimPrefix(path, srcPathPrefix))
|
||||
var file *os.File
|
||||
if !info.IsDir() {
|
||||
file, err = os.Open(srcFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open file %q: %w", srcFilePath, err)
|
||||
}
|
||||
defer file.Close()
|
||||
defer utilfn.GracefulClose(file, "RemoteFileCopyCommand", srcFilePath)
|
||||
}
|
||||
_, err = copyFileFunc(destFilePath, info, file)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot copy %q to %q: %w", srcUri, destUri, err)
|
||||
return false, fmt.Errorf("cannot copy %q to %q: %w", srcUri, destUri, err)
|
||||
}
|
||||
} else {
|
||||
file, err := os.Open(srcPathCleaned)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open file %q: %w", srcPathCleaned, err)
|
||||
return false, fmt.Errorf("cannot open file %q: %w", srcPathCleaned, err)
|
||||
}
|
||||
defer file.Close()
|
||||
_, err = copyFileFunc(destPathCleaned, srcFileStat, file)
|
||||
defer utilfn.GracefulClose(file, "RemoteFileCopyCommand", srcPathCleaned)
|
||||
var destFilePath string
|
||||
if destHasSlash {
|
||||
destFilePath = filepath.Join(destPathCleaned, filepath.Base(srcPathCleaned))
|
||||
} else {
|
||||
destFilePath = destPathCleaned
|
||||
}
|
||||
_, err = copyFileFunc(destFilePath, srcFileStat, file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot copy %q to %q: %w", srcUri, destUri, err)
|
||||
return false, fmt.Errorf("cannot copy %q to %q: %w", srcUri, destUri, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
timeout := DefaultTimeout
|
||||
timeout := fstype.DefaultTimeout
|
||||
if opts.Timeout > 0 {
|
||||
timeout = time.Duration(opts.Timeout) * time.Millisecond
|
||||
}
|
||||
@ -470,16 +475,17 @@ func (impl *ServerImpl) RemoteFileCopyCommand(ctx context.Context, data wshrpc.C
|
||||
numFiles := 0
|
||||
numSkipped := 0
|
||||
totalBytes := int64(0)
|
||||
err := tarcopy.TarCopyDest(readCtx, cancel, ioch, func(next *tar.Header, reader *tar.Reader) error {
|
||||
// Check for directory traversal
|
||||
if strings.Contains(next.Name, "..") {
|
||||
log.Printf("skipping file with unsafe path: %q\n", next.Name)
|
||||
numSkipped++
|
||||
return nil
|
||||
}
|
||||
|
||||
err := tarcopy.TarCopyDest(readCtx, cancel, ioch, func(next *tar.Header, reader *tar.Reader, singleFile bool) error {
|
||||
numFiles++
|
||||
nextpath := filepath.Join(destPathCleaned, next.Name)
|
||||
srcIsDir = !singleFile
|
||||
if singleFile && !destHasSlash {
|
||||
// custom flag to indicate that the source is a single file, not a directory the contents of a directory
|
||||
nextpath = destPathCleaned
|
||||
}
|
||||
finfo := next.FileInfo()
|
||||
n, err := copyFileFunc(filepath.Join(destPathCleaned, next.Name), finfo, reader)
|
||||
n, err := copyFileFunc(nextpath, finfo, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot copy file %q: %w", next.Name, err)
|
||||
}
|
||||
@ -487,7 +493,7 @@ func (impl *ServerImpl) RemoteFileCopyCommand(ctx context.Context, data wshrpc.C
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot copy %q to %q: %w", srcUri, destUri, err)
|
||||
return false, fmt.Errorf("cannot copy %q to %q: %w", srcUri, destUri, err)
|
||||
}
|
||||
totalTime := time.Since(copyStart).Seconds()
|
||||
totalMegaBytes := float64(totalBytes) / 1024 / 1024
|
||||
@ -497,7 +503,7 @@ func (impl *ServerImpl) RemoteFileCopyCommand(ctx context.Context, data wshrpc.C
|
||||
}
|
||||
log.Printf("RemoteFileCopyCommand: done; %d files copied in %.3fs, total of %.4f MB, %.2f MB/s, %d files skipped\n", numFiles, totalTime, totalMegaBytes, rate, numSkipped)
|
||||
}
|
||||
return nil
|
||||
return srcIsDir, nil
|
||||
}
|
||||
|
||||
func (impl *ServerImpl) RemoteListEntriesCommand(ctx context.Context, data wshrpc.CommandRemoteListEntriesData) chan wshrpc.RespOrErrorUnion[wshrpc.CommandRemoteListEntriesRtnData] {
|
||||
@ -572,7 +578,7 @@ func statToFileInfo(fullPath string, finfo fs.FileInfo, extended bool) *wshrpc.F
|
||||
mimeType := fileutil.DetectMimeType(fullPath, finfo, extended)
|
||||
rtn := &wshrpc.FileInfo{
|
||||
Path: wavebase.ReplaceHomeDir(fullPath),
|
||||
Dir: computeDirPart(fullPath, finfo.IsDir()),
|
||||
Dir: computeDirPart(fullPath),
|
||||
Name: finfo.Name(),
|
||||
Size: finfo.Size(),
|
||||
Mode: finfo.Mode(),
|
||||
@ -602,7 +608,7 @@ func checkIsReadOnly(path string, fileInfo fs.FileInfo, exists bool) bool {
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
fd.Close()
|
||||
utilfn.GracefulClose(fd, "checkIsReadOnly", tmpFileName)
|
||||
os.Remove(tmpFileName)
|
||||
return false
|
||||
}
|
||||
@ -611,20 +617,16 @@ func checkIsReadOnly(path string, fileInfo fs.FileInfo, exists bool) bool {
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
file.Close()
|
||||
utilfn.GracefulClose(file, "checkIsReadOnly", path)
|
||||
return false
|
||||
}
|
||||
|
||||
func computeDirPart(path string, isDir bool) string {
|
||||
func computeDirPart(path string) string {
|
||||
path = filepath.Clean(wavebase.ExpandHomeDirSafe(path))
|
||||
path = filepath.ToSlash(path)
|
||||
if path == "/" {
|
||||
return "/"
|
||||
}
|
||||
path = strings.TrimSuffix(path, "/")
|
||||
if isDir {
|
||||
return path
|
||||
}
|
||||
return filepath.Dir(path)
|
||||
}
|
||||
|
||||
@ -634,7 +636,7 @@ func (*ServerImpl) fileInfoInternal(path string, extended bool) (*wshrpc.FileInf
|
||||
if os.IsNotExist(err) {
|
||||
return &wshrpc.FileInfo{
|
||||
Path: wavebase.ReplaceHomeDir(path),
|
||||
Dir: computeDirPart(path, false),
|
||||
Dir: computeDirPart(path),
|
||||
NotFound: true,
|
||||
ReadOnly: checkIsReadOnly(cleanedPath, finfo, false),
|
||||
SupportsMkdir: true,
|
||||
@ -689,12 +691,12 @@ func (impl *ServerImpl) RemoteFileTouchCommand(ctx context.Context, path string)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (impl *ServerImpl) RemoteFileMoveCommand(ctx context.Context, data wshrpc.CommandRemoteFileCopyData) error {
|
||||
logPrintfDev("RemoteFileCopyCommand: src=%s, dest=%s\n", data.SrcUri, data.DestUri)
|
||||
func (impl *ServerImpl) RemoteFileMoveCommand(ctx context.Context, data wshrpc.CommandFileCopyData) error {
|
||||
opts := data.Opts
|
||||
destUri := data.DestUri
|
||||
srcUri := data.SrcUri
|
||||
overwrite := opts != nil && opts.Overwrite
|
||||
recursive := opts != nil && opts.Recursive
|
||||
|
||||
destConn, err := connparse.ParseURIAndReplaceCurrentHost(ctx, destUri)
|
||||
if err != nil {
|
||||
@ -722,7 +724,14 @@ func (impl *ServerImpl) RemoteFileMoveCommand(ctx context.Context, data wshrpc.C
|
||||
}
|
||||
if srcConn.Host == destConn.Host {
|
||||
srcPathCleaned := filepath.Clean(wavebase.ExpandHomeDirSafe(srcConn.Path))
|
||||
err := os.Rename(srcPathCleaned, destPathCleaned)
|
||||
finfo, err := os.Stat(srcPathCleaned)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot stat file %q: %w", srcPathCleaned, err)
|
||||
}
|
||||
if finfo.IsDir() && !recursive {
|
||||
return fmt.Errorf(fstype.RecursiveRequiredError)
|
||||
}
|
||||
err = os.Rename(srcPathCleaned, destPathCleaned)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot move file %q to %q: %w", srcPathCleaned, destPathCleaned, err)
|
||||
}
|
||||
@ -799,7 +808,7 @@ func (*ServerImpl) RemoteWriteFileCommand(ctx context.Context, data wshrpc.FileD
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open file %q: %w", path, err)
|
||||
}
|
||||
defer file.Close()
|
||||
defer utilfn.GracefulClose(file, "RemoteWriteFileCommand", path)
|
||||
if atOffset > 0 && !append {
|
||||
n, err = file.WriteAt(dataBytes[:n], atOffset)
|
||||
} else {
|
||||
@ -808,7 +817,6 @@ func (*ServerImpl) RemoteWriteFileCommand(ctx context.Context, data wshrpc.FileD
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot write to file %q: %w", path, err)
|
||||
}
|
||||
logPrintfDev("wrote %d bytes to file %q at offset %d\n", n, path, atOffset)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -824,7 +832,7 @@ func (*ServerImpl) RemoteFileDeleteCommand(ctx context.Context, data wshrpc.Comm
|
||||
finfo, _ := os.Stat(cleanedPath)
|
||||
if finfo != nil && finfo.IsDir() {
|
||||
if !data.Recursive {
|
||||
return fmt.Errorf("cannot delete directory %q, recursive option not specified", data.Path)
|
||||
return fmt.Errorf(fstype.RecursiveRequiredError)
|
||||
}
|
||||
err = os.RemoveAll(cleanedPath)
|
||||
if err != nil {
|
||||
@ -849,8 +857,7 @@ func (*ServerImpl) FetchSuggestionsCommand(ctx context.Context, data wshrpc.Fetc
|
||||
return suggestion.FetchSuggestions(ctx, data)
|
||||
}
|
||||
|
||||
func logPrintfDev(format string, args ...interface{}) {
|
||||
if wavebase.IsDevMode() {
|
||||
log.Printf(format, args...)
|
||||
}
|
||||
func (*ServerImpl) DisposeSuggestionsCommand(ctx context.Context, widgetId string) error {
|
||||
suggestion.DisposeSuggestions(ctx, widgetId)
|
||||
return nil
|
||||
}
|
||||
|
@ -51,31 +51,36 @@ const (
|
||||
|
||||
// TODO generate these constants from the interface
|
||||
const (
|
||||
Command_Authenticate = "authenticate" // special
|
||||
Command_AuthenticateToken = "authenticatetoken" // special
|
||||
Command_Dispose = "dispose" // special (disposes of the route, for multiproxy only)
|
||||
Command_RouteAnnounce = "routeannounce" // special (for routing)
|
||||
Command_RouteUnannounce = "routeunannounce" // special (for routing)
|
||||
Command_Message = "message"
|
||||
Command_GetMeta = "getmeta"
|
||||
Command_SetMeta = "setmeta"
|
||||
Command_SetView = "setview"
|
||||
Command_ControllerInput = "controllerinput"
|
||||
Command_ControllerRestart = "controllerrestart"
|
||||
Command_ControllerStop = "controllerstop"
|
||||
Command_ControllerResync = "controllerresync"
|
||||
Command_FileAppend = "fileappend"
|
||||
Command_FileAppendIJson = "fileappendijson"
|
||||
Command_Mkdir = "mkdir"
|
||||
Command_ResolveIds = "resolveids"
|
||||
Command_BlockInfo = "blockinfo"
|
||||
Command_CreateBlock = "createblock"
|
||||
Command_DeleteBlock = "deleteblock"
|
||||
Command_FileWrite = "filewrite"
|
||||
Command_FileRead = "fileread"
|
||||
Command_FileMove = "filemove"
|
||||
Command_FileCopy = "filecopy"
|
||||
Command_FileStreamTar = "filestreamtar"
|
||||
Command_Authenticate = "authenticate" // special
|
||||
Command_AuthenticateToken = "authenticatetoken" // special
|
||||
Command_Dispose = "dispose" // special (disposes of the route, for multiproxy only)
|
||||
Command_RouteAnnounce = "routeannounce" // special (for routing)
|
||||
Command_RouteUnannounce = "routeunannounce" // special (for routing)
|
||||
Command_Message = "message"
|
||||
Command_GetMeta = "getmeta"
|
||||
Command_SetMeta = "setmeta"
|
||||
Command_SetView = "setview"
|
||||
Command_ControllerInput = "controllerinput"
|
||||
Command_ControllerRestart = "controllerrestart"
|
||||
Command_ControllerStop = "controllerstop"
|
||||
Command_ControllerResync = "controllerresync"
|
||||
Command_Mkdir = "mkdir"
|
||||
Command_ResolveIds = "resolveids"
|
||||
Command_BlockInfo = "blockinfo"
|
||||
Command_CreateBlock = "createblock"
|
||||
Command_DeleteBlock = "deleteblock"
|
||||
|
||||
Command_FileWrite = "filewrite"
|
||||
Command_FileRead = "fileread"
|
||||
Command_FileReadStream = "filereadstream"
|
||||
Command_FileMove = "filemove"
|
||||
Command_FileCopy = "filecopy"
|
||||
Command_FileStreamTar = "filestreamtar"
|
||||
Command_FileAppend = "fileappend"
|
||||
Command_FileAppendIJson = "fileappendijson"
|
||||
Command_FileJoin = "filejoin"
|
||||
Command_FileShareCapability = "filesharecapability"
|
||||
|
||||
Command_EventPublish = "eventpublish"
|
||||
Command_EventRecv = "eventrecv"
|
||||
Command_EventSub = "eventsub"
|
||||
@ -113,6 +118,7 @@ const (
|
||||
Command_ConnConnect = "connconnect"
|
||||
Command_ConnDisconnect = "conndisconnect"
|
||||
Command_ConnList = "connlist"
|
||||
Command_ConnListAWS = "connlistaws"
|
||||
Command_WslList = "wsllist"
|
||||
Command_WslDefaultDistro = "wsldefaultdistro"
|
||||
Command_DismissWshFail = "dismisswshfail"
|
||||
@ -160,6 +166,7 @@ type WshRpcInterface interface {
|
||||
DeleteBlockCommand(ctx context.Context, data CommandDeleteBlockData) error
|
||||
DeleteSubBlockCommand(ctx context.Context, data CommandDeleteBlockData) error
|
||||
WaitForRouteCommand(ctx context.Context, data CommandWaitForRouteData) (bool, error)
|
||||
|
||||
FileMkdirCommand(ctx context.Context, data FileData) error
|
||||
FileCreateCommand(ctx context.Context, data FileData) error
|
||||
FileDeleteCommand(ctx context.Context, data CommandDeleteFileData) error
|
||||
@ -167,12 +174,16 @@ type WshRpcInterface interface {
|
||||
FileAppendIJsonCommand(ctx context.Context, data CommandAppendIJsonData) error
|
||||
FileWriteCommand(ctx context.Context, data FileData) error
|
||||
FileReadCommand(ctx context.Context, data FileData) (*FileData, error)
|
||||
FileReadStreamCommand(ctx context.Context, data FileData) <-chan RespOrErrorUnion[FileData]
|
||||
FileStreamTarCommand(ctx context.Context, data CommandRemoteStreamTarData) <-chan RespOrErrorUnion[iochantypes.Packet]
|
||||
FileMoveCommand(ctx context.Context, data CommandFileCopyData) error
|
||||
FileCopyCommand(ctx context.Context, data CommandFileCopyData) error
|
||||
FileInfoCommand(ctx context.Context, data FileData) (*FileInfo, error)
|
||||
FileListCommand(ctx context.Context, data FileListData) ([]*FileInfo, error)
|
||||
FileJoinCommand(ctx context.Context, paths []string) (*FileInfo, error)
|
||||
FileListStreamCommand(ctx context.Context, data FileListData) <-chan RespOrErrorUnion[CommandRemoteListEntriesRtnData]
|
||||
|
||||
FileShareCapabilityCommand(ctx context.Context, path string) (FileShareCapability, error)
|
||||
EventPublishCommand(ctx context.Context, data wps.WaveEvent) error
|
||||
EventSubCommand(ctx context.Context, data wps.SubscriptionRequest) error
|
||||
EventUnsubCommand(ctx context.Context, data string) error
|
||||
@ -195,6 +206,8 @@ type WshRpcInterface interface {
|
||||
PathCommand(ctx context.Context, data PathCommandData) (string, error)
|
||||
SendTelemetryCommand(ctx context.Context) error
|
||||
FetchSuggestionsCommand(ctx context.Context, data FetchSuggestionsData) (*FetchSuggestionsResponse, error)
|
||||
DisposeSuggestionsCommand(ctx context.Context, widgetId string) error
|
||||
GetTabCommand(ctx context.Context, tabId string) (*waveobj.Tab, error)
|
||||
|
||||
// connection functions
|
||||
ConnStatusCommand(ctx context.Context) ([]ConnStatus, error)
|
||||
@ -204,6 +217,7 @@ type WshRpcInterface interface {
|
||||
ConnConnectCommand(ctx context.Context, connRequest ConnRequest) error
|
||||
ConnDisconnectCommand(ctx context.Context, connName string) error
|
||||
ConnListCommand(ctx context.Context) ([]string, error)
|
||||
ConnListAWSCommand(ctx context.Context) ([]string, error)
|
||||
WslListCommand(ctx context.Context) ([]string, error)
|
||||
WslDefaultDistroCommand(ctx context.Context) (string, error)
|
||||
DismissWshFailCommand(ctx context.Context, connName string) error
|
||||
@ -215,11 +229,11 @@ type WshRpcInterface interface {
|
||||
// remotes
|
||||
RemoteStreamFileCommand(ctx context.Context, data CommandRemoteStreamFileData) chan RespOrErrorUnion[FileData]
|
||||
RemoteTarStreamCommand(ctx context.Context, data CommandRemoteStreamTarData) <-chan RespOrErrorUnion[iochantypes.Packet]
|
||||
RemoteFileCopyCommand(ctx context.Context, data CommandRemoteFileCopyData) error
|
||||
RemoteFileCopyCommand(ctx context.Context, data CommandFileCopyData) (bool, error)
|
||||
RemoteListEntriesCommand(ctx context.Context, data CommandRemoteListEntriesData) chan RespOrErrorUnion[CommandRemoteListEntriesRtnData]
|
||||
RemoteFileInfoCommand(ctx context.Context, path string) (*FileInfo, error)
|
||||
RemoteFileTouchCommand(ctx context.Context, path string) error
|
||||
RemoteFileMoveCommand(ctx context.Context, data CommandRemoteFileCopyData) error
|
||||
RemoteFileMoveCommand(ctx context.Context, data CommandFileCopyData) error
|
||||
RemoteFileDeleteCommand(ctx context.Context, data CommandDeleteFileData) error
|
||||
RemoteWriteFileCommand(ctx context.Context, data FileData) error
|
||||
RemoteFileJoinCommand(ctx context.Context, paths []string) (*FileInfo, error)
|
||||
@ -527,12 +541,6 @@ type CommandFileCopyData struct {
|
||||
Opts *FileCopyOpts `json:"opts,omitempty"`
|
||||
}
|
||||
|
||||
type CommandRemoteFileCopyData struct {
|
||||
SrcUri string `json:"srcuri"`
|
||||
DestUri string `json:"desturi"`
|
||||
Opts *FileCopyOpts `json:"opts,omitempty"`
|
||||
}
|
||||
|
||||
type CommandRemoteStreamTarData struct {
|
||||
Path string `json:"path"`
|
||||
Opts *FileCopyOpts `json:"opts,omitempty"`
|
||||
@ -540,7 +548,7 @@ type CommandRemoteStreamTarData struct {
|
||||
|
||||
type FileCopyOpts struct {
|
||||
Overwrite bool `json:"overwrite,omitempty"`
|
||||
Recursive bool `json:"recursive,omitempty"`
|
||||
Recursive bool `json:"recursive,omitempty"` // only used for move, always true for copy
|
||||
Merge bool `json:"merge,omitempty"`
|
||||
Timeout int64 `json:"timeout,omitempty"`
|
||||
}
|
||||
@ -766,6 +774,14 @@ type SuggestionType struct {
|
||||
UrlUrl string `json:"url:url,omitempty"`
|
||||
}
|
||||
|
||||
// FileShareCapability represents the capabilities of a file share
|
||||
type FileShareCapability struct {
|
||||
// CanAppend indicates whether the file share supports appending to files
|
||||
CanAppend bool `json:"canappend"`
|
||||
// CanMkdir indicates whether the file share supports creating directories
|
||||
CanMkdir bool `json:"canmkdir"`
|
||||
}
|
||||
|
||||
type JsonCompare struct {
|
||||
Original waveobj.MetaMapType `json:"original"`
|
||||
Modified waveobj.MetaMapType `json:"modified"`
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"github.com/wavetermdev/waveterm/pkg/genconn"
|
||||
"github.com/wavetermdev/waveterm/pkg/panichandler"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/awsconn"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/conncontroller"
|
||||
"github.com/wavetermdev/waveterm/pkg/remote/fileshare"
|
||||
"github.com/wavetermdev/waveterm/pkg/suggestion"
|
||||
@ -31,6 +32,7 @@ import (
|
||||
"github.com/wavetermdev/waveterm/pkg/telemetry/telemetrydata"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/envutil"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/iochan/iochantypes"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/iterfn"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/shellutil"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/utilfn"
|
||||
"github.com/wavetermdev/waveterm/pkg/util/wavefileutil"
|
||||
@ -384,6 +386,10 @@ func (ws *WshServer) FileReadCommand(ctx context.Context, data wshrpc.FileData)
|
||||
return fileshare.Read(ctx, data)
|
||||
}
|
||||
|
||||
func (ws *WshServer) FileReadStreamCommand(ctx context.Context, data wshrpc.FileData) <-chan wshrpc.RespOrErrorUnion[wshrpc.FileData] {
|
||||
return fileshare.ReadStream(ctx, data)
|
||||
}
|
||||
|
||||
func (ws *WshServer) FileCopyCommand(ctx context.Context, data wshrpc.CommandFileCopyData) error {
|
||||
return fileshare.Copy(ctx, data)
|
||||
}
|
||||
@ -425,6 +431,20 @@ func (ws *WshServer) FileAppendIJsonCommand(ctx context.Context, data wshrpc.Com
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ws *WshServer) FileJoinCommand(ctx context.Context, paths []string) (*wshrpc.FileInfo, error) {
|
||||
if len(paths) < 2 {
|
||||
if len(paths) == 0 {
|
||||
return nil, fmt.Errorf("no paths provided")
|
||||
}
|
||||
return fileshare.Stat(ctx, paths[0])
|
||||
}
|
||||
return fileshare.Join(ctx, paths[0], paths[1:]...)
|
||||
}
|
||||
|
||||
func (ws *WshServer) FileShareCapabilityCommand(ctx context.Context, path string) (wshrpc.FileShareCapability, error) {
|
||||
return fileshare.GetCapability(ctx, path)
|
||||
}
|
||||
|
||||
func (ws *WshServer) DeleteSubBlockCommand(ctx context.Context, data wshrpc.CommandDeleteBlockData) error {
|
||||
err := wcore.DeleteBlock(ctx, data.BlockId, false)
|
||||
if err != nil {
|
||||
@ -551,6 +571,15 @@ func termCtxWithLogBlockId(ctx context.Context, logBlockId string) context.Conte
|
||||
}
|
||||
|
||||
func (ws *WshServer) ConnEnsureCommand(ctx context.Context, data wshrpc.ConnExtData) error {
|
||||
// TODO: if we add proper wsh connections via aws, we'll need to handle that here
|
||||
if strings.HasPrefix(data.ConnName, "aws:") {
|
||||
profiles := awsconn.ParseProfiles()
|
||||
for profile := range profiles {
|
||||
if strings.HasPrefix(data.ConnName, profile) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx = genconn.ContextWithConnData(ctx, data.LogBlockId)
|
||||
ctx = termCtxWithLogBlockId(ctx, data.LogBlockId)
|
||||
if strings.HasPrefix(data.ConnName, "wsl://") {
|
||||
@ -561,6 +590,10 @@ func (ws *WshServer) ConnEnsureCommand(ctx context.Context, data wshrpc.ConnExtD
|
||||
}
|
||||
|
||||
func (ws *WshServer) ConnDisconnectCommand(ctx context.Context, connName string) error {
|
||||
// TODO: if we add proper wsh connections via aws, we'll need to handle that here
|
||||
if strings.HasPrefix(connName, "aws:") {
|
||||
return nil
|
||||
}
|
||||
if strings.HasPrefix(connName, "wsl://") {
|
||||
distroName := strings.TrimPrefix(connName, "wsl://")
|
||||
conn := wslconn.GetWslConn(distroName)
|
||||
@ -581,6 +614,10 @@ func (ws *WshServer) ConnDisconnectCommand(ctx context.Context, connName string)
|
||||
}
|
||||
|
||||
func (ws *WshServer) ConnConnectCommand(ctx context.Context, connRequest wshrpc.ConnRequest) error {
|
||||
// TODO: if we add proper wsh connections via aws, we'll need to handle that here
|
||||
if strings.HasPrefix(connRequest.Host, "aws:") {
|
||||
return nil
|
||||
}
|
||||
ctx = genconn.ContextWithConnData(ctx, connRequest.LogBlockId)
|
||||
ctx = termCtxWithLogBlockId(ctx, connRequest.LogBlockId)
|
||||
connName := connRequest.Host
|
||||
@ -604,6 +641,10 @@ func (ws *WshServer) ConnConnectCommand(ctx context.Context, connRequest wshrpc.
|
||||
}
|
||||
|
||||
func (ws *WshServer) ConnReinstallWshCommand(ctx context.Context, data wshrpc.ConnExtData) error {
|
||||
// TODO: if we add proper wsh connections via aws, we'll need to handle that here
|
||||
if strings.HasPrefix(data.ConnName, "aws:") {
|
||||
return nil
|
||||
}
|
||||
ctx = genconn.ContextWithConnData(ctx, data.LogBlockId)
|
||||
ctx = termCtxWithLogBlockId(ctx, data.LogBlockId)
|
||||
connName := data.ConnName
|
||||
@ -673,6 +714,11 @@ func (ws *WshServer) ConnListCommand(ctx context.Context) ([]string, error) {
|
||||
return conncontroller.GetConnectionsList()
|
||||
}
|
||||
|
||||
func (ws *WshServer) ConnListAWSCommand(ctx context.Context) ([]string, error) {
|
||||
profilesMap := awsconn.ParseProfiles()
|
||||
return iterfn.MapKeysToSorted(profilesMap), nil
|
||||
}
|
||||
|
||||
func (ws *WshServer) WslListCommand(ctx context.Context) ([]string, error) {
|
||||
distros, err := wsl.RegisteredDistros(ctx)
|
||||
if err != nil {
|
||||
@ -915,3 +961,16 @@ func (ws *WshServer) PathCommand(ctx context.Context, data wshrpc.PathCommandDat
|
||||
func (ws *WshServer) FetchSuggestionsCommand(ctx context.Context, data wshrpc.FetchSuggestionsData) (*wshrpc.FetchSuggestionsResponse, error) {
|
||||
return suggestion.FetchSuggestions(ctx, data)
|
||||
}
|
||||
|
||||
func (ws *WshServer) DisposeSuggestionsCommand(ctx context.Context, widgetId string) error {
|
||||
suggestion.DisposeSuggestions(ctx, widgetId)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ws *WshServer) GetTabCommand(ctx context.Context, tabId string) (*waveobj.Tab, error) {
|
||||
tab, err := wstore.DBGet[*waveobj.Tab](ctx, tabId)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting tab: %w", err)
|
||||
}
|
||||
return tab, nil
|
||||
}
|
||||
|
@ -208,7 +208,7 @@ func (router *WshRouter) sendRoutedMessage(msgBytes []byte, routeId string) bool
|
||||
localRouteId := router.getAnnouncedRoute(routeId)
|
||||
rpc := router.GetRpc(localRouteId)
|
||||
if rpc == nil {
|
||||
log.Printf("[router] no rpc for local route id %q\n", localRouteId)
|
||||
log.Printf("[router] no rpc for route id %q\n", routeId)
|
||||
return false
|
||||
}
|
||||
rpc.SendRpcMessage(msgBytes)
|
||||
|
@ -730,9 +730,7 @@ func (w *WshRpc) setServerDone() {
|
||||
defer w.Lock.Unlock()
|
||||
w.ServerDone = true
|
||||
close(w.CtxDoneCh)
|
||||
for range w.CtxDoneCh {
|
||||
// drain channel
|
||||
}
|
||||
utilfn.DrainChannelSafe(w.InputCh, "wshrpc.setServerDone")
|
||||
}
|
||||
|
||||
func (w *WshRpc) retrySendTimeout(resId string) {
|
||||
|
@ -25,10 +25,7 @@ func AdaptOutputChToStream(outputCh chan []byte, output io.Writer) error {
|
||||
drain := false
|
||||
defer func() {
|
||||
if drain {
|
||||
go func() {
|
||||
for range outputCh {
|
||||
}
|
||||
}()
|
||||
utilfn.DrainChannelSafe(outputCh, "AdaptOutputChToStream")
|
||||
}
|
||||
}()
|
||||
for msg := range outputCh {
|
||||
|
@ -7,9 +7,9 @@ cd "$HOME/testcp"
|
||||
touch foo.txt
|
||||
|
||||
# this is different from cp behavior
|
||||
wsh file copy foo.txt baz/ >/dev/null 2>&1 && echo "command should have failed" && exit 1
|
||||
wsh file copy foo.txt baz/
|
||||
|
||||
if [ -f baz/foo.txt ]; then
|
||||
echo "baz/foo.txt should not exist"
|
||||
if [ ! -f baz/foo.txt ]; then
|
||||
echo "baz/foo.txt does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
19
tests/copytests/cases/test048.sh
Executable file
19
tests/copytests/cases/test048.sh
Executable file
@ -0,0 +1,19 @@
|
||||
# copy the current directory into an existing directory
|
||||
# ensure the copy succeeds and the output exists
|
||||
|
||||
set -e
|
||||
cd "$HOME/testcp"
|
||||
mkdir foo
|
||||
touch foo/bar.txt
|
||||
mkdir baz
|
||||
cd foo
|
||||
|
||||
|
||||
wsh file copy . ../baz
|
||||
cd ..
|
||||
|
||||
|
||||
if [ ! -f baz/bar.txt ]; then
|
||||
echo "baz/bar.txt does not exist"
|
||||
exit 1
|
||||
fi
|
@ -1,11 +1,10 @@
|
||||
# copy the current directory into an existing directory
|
||||
# copy the current directory into a non-existing directory
|
||||
# ensure the copy succeeds and the output exists
|
||||
|
||||
set -e
|
||||
cd "$HOME/testcp"
|
||||
mkdir foo
|
||||
touch foo/bar.txt
|
||||
mkdir baz
|
||||
cd foo
|
||||
|
||||
wsh file copy . ../baz
|
||||
|
Loading…
Reference in New Issue
Block a user