mirror of
https://github.com/wavetermdev/waveterm.git
synced 2024-12-21 16:38:23 +01:00
progress, working on flush
This commit is contained in:
parent
b5f7ff699c
commit
a10f0d99ea
@ -11,6 +11,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -19,11 +20,11 @@ const DefaultFlushTime = 5 * time.Second
|
|||||||
const NoPartIdx = -1
|
const NoPartIdx = -1
|
||||||
|
|
||||||
var partDataSize int64 = DefaultPartDataSize // overridden in tests
|
var partDataSize int64 = DefaultPartDataSize // overridden in tests
|
||||||
|
var stopFlush = &atomic.Bool{}
|
||||||
|
|
||||||
var GBS *BlockStore = &BlockStore{
|
var GBS *BlockStore = &BlockStore{
|
||||||
Lock: &sync.Mutex{},
|
Lock: &sync.Mutex{},
|
||||||
Cache: make(map[cacheKey]*CacheEntry),
|
Cache: make(map[cacheKey]*CacheEntry),
|
||||||
FlushTime: DefaultFlushTime,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type FileOptsType struct {
|
type FileOptsType struct {
|
||||||
|
@ -5,10 +5,10 @@ package blockstore
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type cacheKey struct {
|
type cacheKey struct {
|
||||||
@ -17,9 +17,10 @@ type cacheKey struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type DataCacheEntry struct {
|
type DataCacheEntry struct {
|
||||||
Dirty *atomic.Bool
|
Dirty *atomic.Bool
|
||||||
PartIdx int
|
Flushing *atomic.Bool
|
||||||
Data []byte // capacity is always BlockDataPartSize
|
PartIdx int
|
||||||
|
Data []byte // capacity is always BlockDataPartSize
|
||||||
}
|
}
|
||||||
|
|
||||||
type FileCacheEntry struct {
|
type FileCacheEntry struct {
|
||||||
@ -70,7 +71,15 @@ func (s *BlockStore) dump() string {
|
|||||||
buf.WriteString("\n")
|
buf.WriteString("\n")
|
||||||
}
|
}
|
||||||
return buf.String()
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeDataCacheEntry(partIdx int) *DataCacheEntry {
|
||||||
|
return &DataCacheEntry{
|
||||||
|
Dirty: &atomic.Bool{},
|
||||||
|
Flushing: &atomic.Bool{},
|
||||||
|
PartIdx: partIdx,
|
||||||
|
Data: make([]byte, 0, partDataSize),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// for testing
|
// for testing
|
||||||
@ -92,16 +101,24 @@ func (e *CacheEntry) ensurePart(partIdx int, create bool) *DataCacheEntry {
|
|||||||
e.DataEntries = append(e.DataEntries, nil)
|
e.DataEntries = append(e.DataEntries, nil)
|
||||||
}
|
}
|
||||||
if create && e.DataEntries[partIdx] == nil {
|
if create && e.DataEntries[partIdx] == nil {
|
||||||
e.DataEntries[partIdx] = &DataCacheEntry{
|
e.DataEntries[partIdx] = makeDataCacheEntry(partIdx)
|
||||||
PartIdx: partIdx,
|
|
||||||
Data: make([]byte, 0, partDataSize),
|
|
||||||
Dirty: &atomic.Bool{},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return e.DataEntries[partIdx]
|
return e.DataEntries[partIdx]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dce *DataCacheEntry) writeToPart(offset int64, data []byte) int64 {
|
func (dce *DataCacheEntry) clonePart() *DataCacheEntry {
|
||||||
|
rtn := makeDataCacheEntry(dce.PartIdx)
|
||||||
|
copy(rtn.Data, dce.Data)
|
||||||
|
if dce.Dirty.Load() {
|
||||||
|
rtn.Dirty.Store(true)
|
||||||
|
}
|
||||||
|
return rtn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dce *DataCacheEntry) writeToPart(offset int64, data []byte) (int64, *DataCacheEntry) {
|
||||||
|
if dce.Flushing.Load() {
|
||||||
|
dce = dce.clonePart()
|
||||||
|
}
|
||||||
leftInPart := partDataSize - offset
|
leftInPart := partDataSize - offset
|
||||||
toWrite := int64(len(data))
|
toWrite := int64(len(data))
|
||||||
if toWrite > leftInPart {
|
if toWrite > leftInPart {
|
||||||
@ -112,7 +129,7 @@ func (dce *DataCacheEntry) writeToPart(offset int64, data []byte) int64 {
|
|||||||
}
|
}
|
||||||
copy(dce.Data[offset:], data[:toWrite])
|
copy(dce.Data[offset:], data[:toWrite])
|
||||||
dce.Dirty.Store(true)
|
dce.Dirty.Store(true)
|
||||||
return toWrite
|
return toWrite, dce
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *CacheEntry) writeAt(offset int64, data []byte) {
|
func (entry *CacheEntry) writeAt(offset int64, data []byte) {
|
||||||
@ -124,16 +141,16 @@ func (entry *CacheEntry) writeAt(offset int64, data []byte) {
|
|||||||
}
|
}
|
||||||
partOffset := offset % partDataSize
|
partOffset := offset % partDataSize
|
||||||
partData := entry.ensurePart(partIdx, true)
|
partData := entry.ensurePart(partIdx, true)
|
||||||
nw := partData.writeToPart(partOffset, data)
|
nw, newDce := partData.writeToPart(partOffset, data)
|
||||||
|
entry.DataEntries[partIdx] = newDce
|
||||||
data = data[nw:]
|
data = data[nw:]
|
||||||
offset += nw
|
offset += nw
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type BlockStore struct {
|
type BlockStore struct {
|
||||||
Lock *sync.Mutex
|
Lock *sync.Mutex
|
||||||
Cache map[cacheKey]*CacheEntry
|
Cache map[cacheKey]*CacheEntry
|
||||||
FlushTime time.Duration
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BlockStore) withLock(blockId string, name string, shouldCreate bool, f func(*CacheEntry)) {
|
func (s *BlockStore) withLock(blockId string, name string, shouldCreate bool, f func(*CacheEntry)) {
|
||||||
@ -236,3 +253,40 @@ func (e *CacheEntry) copyOrCreateFileEntry(dbFile *BlockFile) *FileCacheEntry {
|
|||||||
File: *e.FileEntry.File.DeepCopy(),
|
File: *e.FileEntry.File.DeepCopy(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// also sets Flushing to true
|
||||||
|
func (s *BlockStore) getDirtyDataEntries(entry *CacheEntry) (*FileCacheEntry, []*DataCacheEntry) {
|
||||||
|
s.Lock.Lock()
|
||||||
|
defer s.Lock.Unlock()
|
||||||
|
if entry.Deleted || entry.FileEntry == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
var dirtyData []*DataCacheEntry
|
||||||
|
for _, dce := range entry.DataEntries {
|
||||||
|
if dce != nil && dce.Dirty.Load() {
|
||||||
|
dirtyData = append(dirtyData, dce)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !entry.FileEntry.Dirty.Load() && len(dirtyData) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
for _, data := range dirtyData {
|
||||||
|
data.Flushing.Store(true)
|
||||||
|
}
|
||||||
|
return entry.FileEntry, dirtyData
|
||||||
|
}
|
||||||
|
|
||||||
|
// clean is true if the block was clean (nothing to write)
|
||||||
|
// returns (clean, error)
|
||||||
|
func (s *BlockStore) flushEntry(ctx context.Context, entry *CacheEntry) error {
|
||||||
|
fileEntry, dirtyData := s.getDirtyDataEntries(entry)
|
||||||
|
if fileEntry == nil && len(dirtyData) == 0 {
|
||||||
|
s.tryDeleteCacheEntry(entry.BlockId, entry.Name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err := dbWriteCacheEntry(ctx, fileEntry, dirtyData)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -36,16 +36,6 @@ func dbGetBlockFileNames(ctx context.Context, blockId string) ([]string, error)
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func dbDeleteBlock(ctx context.Context, blockId string) error {
|
|
||||||
return WithTx(ctx, func(tx *TxWrap) error {
|
|
||||||
query := "DELETE FROM db_block_file WHERE blockid = ?"
|
|
||||||
tx.Exec(query, blockId)
|
|
||||||
query = "DELETE FROM db_block_data WHERE blockid = ?"
|
|
||||||
tx.Exec(query, blockId)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func dbGetBlockFile(ctx context.Context, blockId string, name string) (*BlockFile, error) {
|
func dbGetBlockFile(ctx context.Context, blockId string, name string) (*BlockFile, error) {
|
||||||
return WithTxRtn(ctx, func(tx *TxWrap) (*BlockFile, error) {
|
return WithTxRtn(ctx, func(tx *TxWrap) (*BlockFile, error) {
|
||||||
query := "SELECT * FROM db_block_file WHERE blockid = ? AND name = ?"
|
query := "SELECT * FROM db_block_file WHERE blockid = ? AND name = ?"
|
||||||
@ -112,6 +102,7 @@ func dbWriteCacheEntry(ctx context.Context, fileEntry *FileCacheEntry, dataEntri
|
|||||||
for _, dataEntry := range dataEntries {
|
for _, dataEntry := range dataEntries {
|
||||||
if dataEntry != nil {
|
if dataEntry != nil {
|
||||||
dataEntry.Dirty.Store(false)
|
dataEntry.Dirty.Store(false)
|
||||||
|
dataEntry.Flushing.Store(false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -16,6 +16,7 @@ func initDb(t *testing.T) {
|
|||||||
t.Logf("initializing db for %q", t.Name())
|
t.Logf("initializing db for %q", t.Name())
|
||||||
useTestingDb = true
|
useTestingDb = true
|
||||||
partDataSize = 50
|
partDataSize = 50
|
||||||
|
stopFlush.Store(true)
|
||||||
err := InitBlockstore()
|
err := InitBlockstore()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error initializing blockstore: %v", err)
|
t.Fatalf("error initializing blockstore: %v", err)
|
||||||
|
Loading…
Reference in New Issue
Block a user