checkpoint

This commit is contained in:
sawka 2024-02-13 08:43:51 -05:00
parent e589d9c4fa
commit eab8e3be02
3 changed files with 80 additions and 6 deletions

View File

@ -477,9 +477,7 @@ func GetConnectUpdate(ctx context.Context) (*ConnectUpdate, error) {
}
query := `SELECT * FROM screen ORDER BY archived, screenidx, archivedts`
screens := dbutil.SelectMapsGen[*ScreenType](tx, query)
for _, screen := range screens {
update.Screens = append(update.Screens, screen)
}
update.Screens = append(update.Screens, screens...)
query = `SELECT * FROM remote_instance`
riArr := dbutil.SelectMapsGen[*RemoteInstance](tx, query)
for _, ri := range riArr {
@ -674,7 +672,7 @@ func fmtUniqueName(name string, defaultFmtStr string, startIdx int, strs []strin
} else {
fmtStr = defaultFmtStr
}
if strings.Index(fmtStr, "%d") == -1 {
if !strings.Contains(fmtStr, "%d") {
panic("invalid fmtStr: " + fmtStr)
}
for {
@ -1159,7 +1157,7 @@ func ArchiveScreen(ctx context.Context, sessionId string, screenId string) (Upda
return fmt.Errorf("cannot close screen (not found)")
}
if isWebShare(tx, screenId) {
return fmt.Errorf("cannot archive screen while web-sharing. stop web-sharing before trying to archive.")
return fmt.Errorf("cannot archive screen while web-sharing. stop web-sharing before trying to archive")
}
query = `SELECT archived FROM screen WHERE sessionid = ? AND screenid = ?`
closeVal := tx.GetBool(query, sessionId, screenId)

View File

@ -13,17 +13,93 @@ import (
"os"
"path"
"sync"
"sync/atomic"
"time"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"github.com/wavetermdev/waveterm/waveshell/pkg/cirfile"
"github.com/wavetermdev/waveterm/waveshell/pkg/shexec"
"github.com/wavetermdev/waveterm/wavesrv/pkg/scbase"
)
const MaxDBFileSize = 10 * 1024
var screenDirLock = &sync.Mutex{}
var screenDirCache = make(map[string]string) // locked with screenDirLock
var globalDBFileCache = makeDBFileCache()
type dbFileCacheEntry struct {
DBLock *sync.Mutex
DB *sqlx.DB
InUse atomic.Bool
}
type DBFileCache struct {
Lock *sync.Mutex
Cache map[string]*dbFileCacheEntry
}
func makeDBFileCache() *DBFileCache {
return &DBFileCache{
Lock: &sync.Mutex{},
Cache: make(map[string]*dbFileCacheEntry),
}
}
func (c *DBFileCache) GetDB(screenId string) (*sqlx.DB, error) {
c.Lock.Lock()
defer c.Lock.Unlock()
entry := c.Cache[screenId]
if entry != nil {
entry.DBLock.Lock()
entry.InUse.Store(true)
return entry.DB, nil
}
_, err := EnsureScreenDir(screenId)
if err != nil {
return nil, err
}
return nil, nil
}
func (c *DBFileCache) ReleaseDB(screenId string, db *sqlx.DB) {
entry := c.Cache[screenId]
if entry == nil {
// this shouldn't happen (error)
log.Printf("[db] error missing cache entry for dbfile %s", screenId)
return
}
entry.DBLock.Unlock()
entry.InUse.Store(false)
// noop for now
}
// fulfills the txwrap DBGetter interface
type DBFileGetter struct {
ScreenId string
}
func (g DBFileGetter) GetDB(ctx context.Context) (*sqlx.DB, error) {
return globalDBFileCache.GetDB(g.ScreenId)
}
func (g DBFileGetter) ReleaseDB(db *sqlx.DB) {
globalDBFileCache.ReleaseDB(g.ScreenId, db)
}
func TryConvertPtyFile(ctx context.Context, screenId string, lineId string) error {
stat, err := StatCmdPtyFile(ctx, screenId, lineId)
if err != nil {
return fmt.Errorf("convert ptyfile, cannot stat: %w", err)
}
if stat.DataSize > MaxDBFileSize {
return nil
}
return nil
}
func CreateCmdPtyFile(ctx context.Context, screenId string, lineId string, maxSize int64) error {
ptyOutFileName, err := PtyOutFile(screenId, lineId)
if err != nil {

View File

@ -142,7 +142,7 @@ func RunMigration13() error {
}
err := os.RemoveAll(scbase.GetSessionsDir())
if err != nil {
return fmt.Errorf("cannot remove old sessions dir %s: %w\n", scbase.GetSessionsDir(), err)
return fmt.Errorf("cannot remove old sessions dir %s: %w", scbase.GetSessionsDir(), err)
}
txErr = WithTx(ctx, func(tx *TxWrap) error {
query := `UPDATE client SET cmdstoretype = 'screen'`