mirror of
https://github.com/anyproto/anytype-heart.git
synced 2025-06-10 18:10:49 +09:00
GO-3788 Merge main
This commit is contained in:
commit
e3e43f5123
67 changed files with 6818 additions and 4778 deletions
|
@ -13,6 +13,7 @@ issues:
|
|||
- pb
|
||||
exclude-files:
|
||||
- '.*_test.go'
|
||||
- 'mock*'
|
||||
- 'testMock/*'
|
||||
- 'clientlibrary/service/service.pb.go'
|
||||
|
||||
|
|
|
@ -192,6 +192,9 @@ packages:
|
|||
interfaces:
|
||||
PeerStatusChecker:
|
||||
SyncDetailsUpdater:
|
||||
github.com/anyproto/anytype-heart/core/syncstatus/nodestatus:
|
||||
interfaces:
|
||||
NodeStatus:
|
||||
github.com/anyproto/anytype-heart/core/syncstatus/objectsyncstatus:
|
||||
interfaces:
|
||||
Updater:
|
||||
|
@ -210,4 +213,6 @@ packages:
|
|||
github.com/anyproto/anytype-heart/core/syncstatus/spacesyncstatus:
|
||||
interfaces:
|
||||
SpaceIdGetter:
|
||||
NodeUsage:
|
||||
NetworkConfig:
|
||||
Updater:
|
|
@ -81,6 +81,7 @@ import (
|
|||
"github.com/anyproto/anytype-heart/core/syncstatus/detailsupdater"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/nodestatus"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/spacesyncstatus"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/syncsubscriptions"
|
||||
"github.com/anyproto/anytype-heart/core/wallet"
|
||||
"github.com/anyproto/anytype-heart/metrics"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/core"
|
||||
|
@ -263,7 +264,7 @@ func Bootstrap(a *app.App, components ...app.Component) {
|
|||
Register(treemanager.New()).
|
||||
Register(block.New()).
|
||||
Register(indexer.New()).
|
||||
Register(detailsupdater.NewUpdater()).
|
||||
Register(detailsupdater.New()).
|
||||
Register(session.NewHookRunner()).
|
||||
Register(spacesyncstatus.NewSpaceSyncStatus()).
|
||||
Register(nodestatus.NewNodeStatus()).
|
||||
|
@ -277,6 +278,7 @@ func Bootstrap(a *app.App, components ...app.Component) {
|
|||
Register(debug.New()).
|
||||
Register(collection.New()).
|
||||
Register(subscription.New()).
|
||||
Register(syncsubscriptions.New()).
|
||||
Register(builtinobjects.New()).
|
||||
Register(bookmark.New()).
|
||||
Register(importer.New()).
|
||||
|
|
|
@ -71,6 +71,7 @@ func (s *Service) AccountSelect(ctx context.Context, req *pb.RpcAccountSelectReq
|
|||
if err := s.stop(); err != nil {
|
||||
return nil, errors.Join(ErrFailedToStopApplication, err)
|
||||
}
|
||||
metrics.Service.SetWorkingDir(req.RootPath, req.Id)
|
||||
|
||||
return s.start(ctx, req.Id, req.RootPath, req.DisableLocalNetworkSync, req.PreferYamuxTransport, req.NetworkMode, req.NetworkCustomConfigFilePath)
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/anyproto/anytype-heart/core/block/editor/converter"
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/smartblock"
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/state"
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/table"
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/template"
|
||||
"github.com/anyproto/anytype-heart/core/block/restriction"
|
||||
"github.com/anyproto/anytype-heart/core/block/simple"
|
||||
|
@ -265,6 +266,11 @@ func (bs *basic) Move(srcState, destState *state.State, targetBlockId string, po
|
|||
}
|
||||
}
|
||||
|
||||
targetBlockId, position, err = table.CheckTableBlocksMove(srcState, targetBlockId, position, blockIds)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var replacementCandidate simple.Block
|
||||
for _, id := range blockIds {
|
||||
if b := srcState.Pick(id); b != nil {
|
||||
|
|
|
@ -2,6 +2,7 @@ package basic
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/types"
|
||||
|
@ -12,6 +13,7 @@ import (
|
|||
"github.com/anyproto/anytype-heart/core/block/editor/converter"
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/smartblock"
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/smartblock/smarttest"
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/table"
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/template"
|
||||
"github.com/anyproto/anytype-heart/core/block/restriction"
|
||||
"github.com/anyproto/anytype-heart/core/block/simple"
|
||||
|
@ -357,6 +359,152 @@ func TestBasic_Move(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestBasic_MoveTableBlocks(t *testing.T) {
|
||||
getSB := func() *smarttest.SmartTest {
|
||||
sb := smarttest.New("test")
|
||||
sb.AddBlock(simple.New(&model.Block{Id: "test", ChildrenIds: []string{"upper", "table", "block"}})).
|
||||
AddBlock(simple.New(&model.Block{Id: "table", ChildrenIds: []string{"columns", "rows"}, Content: &model.BlockContentOfTable{Table: &model.BlockContentTable{}}})).
|
||||
AddBlock(simple.New(&model.Block{Id: "columns", ChildrenIds: []string{"column"}, Content: &model.BlockContentOfLayout{Layout: &model.BlockContentLayout{Style: model.BlockContentLayout_TableColumns}}})).
|
||||
AddBlock(simple.New(&model.Block{Id: "column", ChildrenIds: []string{}, Content: &model.BlockContentOfTableColumn{TableColumn: &model.BlockContentTableColumn{}}})).
|
||||
AddBlock(simple.New(&model.Block{Id: "rows", ChildrenIds: []string{"row", "row2"}, Content: &model.BlockContentOfLayout{Layout: &model.BlockContentLayout{Style: model.BlockContentLayout_TableRows}}})).
|
||||
AddBlock(simple.New(&model.Block{Id: "row", ChildrenIds: []string{"column-row"}, Content: &model.BlockContentOfTableRow{TableRow: &model.BlockContentTableRow{IsHeader: false}}})).
|
||||
AddBlock(simple.New(&model.Block{Id: "row2", ChildrenIds: []string{}, Content: &model.BlockContentOfTableRow{TableRow: &model.BlockContentTableRow{IsHeader: false}}})).
|
||||
AddBlock(simple.New(&model.Block{Id: "column-row", ChildrenIds: []string{}})).
|
||||
AddBlock(simple.New(&model.Block{Id: "block", ChildrenIds: []string{}})).
|
||||
AddBlock(simple.New(&model.Block{Id: "upper", ChildrenIds: []string{}}))
|
||||
return sb
|
||||
}
|
||||
|
||||
for _, block := range []string{"columns", "rows", "column", "row", "column-row"} {
|
||||
t.Run("moving non-root table block '"+block+"' leads to error", func(t *testing.T) {
|
||||
// given
|
||||
sb := getSB()
|
||||
b := NewBasic(sb, nil, converter.NewLayoutConverter())
|
||||
st := sb.NewState()
|
||||
|
||||
// when
|
||||
err := b.Move(st, st, "block", model.Block_Bottom, []string{block})
|
||||
|
||||
// then
|
||||
assert.Error(t, err)
|
||||
assert.True(t, errors.Is(err, table.ErrCannotMoveTableBlocks))
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("no error on moving root table block", func(t *testing.T) {
|
||||
// given
|
||||
sb := getSB()
|
||||
b := NewBasic(sb, nil, converter.NewLayoutConverter())
|
||||
st := sb.NewState()
|
||||
|
||||
// when
|
||||
err := b.Move(st, st, "block", model.Block_Bottom, []string{"table"})
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []string{"upper", "block", "table"}, st.Pick("test").Model().ChildrenIds)
|
||||
})
|
||||
|
||||
t.Run("no error on moving one row between another", func(t *testing.T) {
|
||||
// given
|
||||
sb := getSB()
|
||||
b := NewBasic(sb, nil, converter.NewLayoutConverter())
|
||||
st := sb.NewState()
|
||||
|
||||
// when
|
||||
err := b.Move(st, st, "row2", model.Block_Bottom, []string{"row"})
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []string{"row2", "row"}, st.Pick("rows").Model().ChildrenIds)
|
||||
})
|
||||
|
||||
t.Run("moving rows with incorrect position leads to error", func(t *testing.T) {
|
||||
// given
|
||||
sb := getSB()
|
||||
b := NewBasic(sb, nil, converter.NewLayoutConverter())
|
||||
st := sb.NewState()
|
||||
|
||||
// when
|
||||
err := b.Move(st, st, "row2", model.Block_Left, []string{"row"})
|
||||
|
||||
// then
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("moving rows and some other blocks between another leads to error", func(t *testing.T) {
|
||||
// given
|
||||
sb := getSB()
|
||||
b := NewBasic(sb, nil, converter.NewLayoutConverter())
|
||||
st := sb.NewState()
|
||||
|
||||
// when
|
||||
err := b.Move(st, st, "row2", model.Block_Top, []string{"row", "rows"})
|
||||
|
||||
// then
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("moving the row between itself leads to error", func(t *testing.T) {
|
||||
// given
|
||||
sb := getSB()
|
||||
b := NewBasic(sb, nil, converter.NewLayoutConverter())
|
||||
st := sb.NewState()
|
||||
|
||||
// when
|
||||
err := b.Move(st, st, "row2", model.Block_Bottom, []string{"row2"})
|
||||
|
||||
// then
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("moving table block from invalid table leads to error", func(t *testing.T) {
|
||||
// given
|
||||
sb := getSB()
|
||||
b := NewBasic(sb, nil, converter.NewLayoutConverter())
|
||||
st := sb.NewState()
|
||||
st.Unlink("columns")
|
||||
|
||||
// when
|
||||
err := b.Move(st, st, "block", model.Block_Bottom, []string{"column-row"})
|
||||
|
||||
// then
|
||||
assert.Error(t, err)
|
||||
assert.True(t, errors.Is(err, table.ErrCannotMoveTableBlocks))
|
||||
})
|
||||
|
||||
for _, block := range []string{"columns", "rows", "column", "row", "column-row"} {
|
||||
t.Run("moving a block to '"+block+"' block leads to moving it under the table", func(t *testing.T) {
|
||||
// given
|
||||
sb := getSB()
|
||||
b := NewBasic(sb, nil, converter.NewLayoutConverter())
|
||||
st := sb.NewState()
|
||||
|
||||
// when
|
||||
err := b.Move(st, st, block, model.BlockPosition(rand.Intn(len(model.BlockPosition_name))), []string{"upper"})
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []string{"table", "upper", "block"}, st.Pick("test").Model().ChildrenIds)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("moving a block to the invalid table leads to moving it under the table", func(t *testing.T) {
|
||||
// given
|
||||
sb := getSB()
|
||||
b := NewBasic(sb, nil, converter.NewLayoutConverter())
|
||||
st := sb.NewState()
|
||||
st.Unlink("columns")
|
||||
|
||||
// when
|
||||
err := b.Move(st, st, "rows", model.BlockPosition(rand.Intn(6)), []string{"upper"})
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []string{"table", "upper", "block"}, st.Pick("test").Model().ChildrenIds)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBasic_MoveToAnotherObject(t *testing.T) {
|
||||
t.Run("basic", func(t *testing.T) {
|
||||
sb1 := smarttest.New("test1")
|
||||
|
|
|
@ -2,7 +2,6 @@ package basic
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/globalsign/mgo/bson"
|
||||
|
@ -119,31 +118,44 @@ func insertBlocksToState(
|
|||
}
|
||||
|
||||
func (bs *basic) changeToBlockWithLink(newState *state.State, blockToReplace simple.Block, objectID string, linkBlock *model.Block) (string, error) {
|
||||
if linkBlock == nil {
|
||||
linkBlock = &model.Block{
|
||||
Content: &model.BlockContentOfLink{
|
||||
Link: &model.BlockContentLink{
|
||||
TargetBlockId: objectID,
|
||||
Style: model.BlockContentLink_Page,
|
||||
},
|
||||
},
|
||||
}
|
||||
} else {
|
||||
link := linkBlock.GetLink()
|
||||
if link == nil {
|
||||
return "", errors.New("linkBlock content is not a link")
|
||||
} else {
|
||||
link.TargetBlockId = objectID
|
||||
}
|
||||
}
|
||||
linkBlockCopy := pbtypes.CopyBlock(linkBlock)
|
||||
return bs.CreateBlock(newState, pb.RpcBlockCreateRequest{
|
||||
TargetId: blockToReplace.Model().Id,
|
||||
Block: linkBlockCopy,
|
||||
Block: buildBlock(linkBlock, objectID),
|
||||
Position: model.Block_Replace,
|
||||
})
|
||||
}
|
||||
|
||||
func buildBlock(b *model.Block, targetID string) (result *model.Block) {
|
||||
fallback := &model.Block{
|
||||
Content: &model.BlockContentOfLink{
|
||||
Link: &model.BlockContentLink{
|
||||
TargetBlockId: targetID,
|
||||
Style: model.BlockContentLink_Page,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if b == nil {
|
||||
return fallback
|
||||
}
|
||||
result = pbtypes.CopyBlock(b)
|
||||
|
||||
switch v := result.Content.(type) {
|
||||
case *model.BlockContentOfLink:
|
||||
v.Link.TargetBlockId = targetID
|
||||
case *model.BlockContentOfBookmark:
|
||||
v.Bookmark.TargetObjectId = targetID
|
||||
case *model.BlockContentOfFile:
|
||||
v.File.TargetObjectId = targetID
|
||||
case *model.BlockContentOfDataview:
|
||||
v.Dataview.TargetObjectId = targetID
|
||||
default:
|
||||
result = fallback
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func removeBlocks(state *state.State, descendants []simple.Block) {
|
||||
for _, b := range descendants {
|
||||
state.Unlink(b.Model().Id)
|
||||
|
|
|
@ -394,6 +394,84 @@ func TestExtractObjects(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestBuildBlock(t *testing.T) {
|
||||
const target = "target"
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
input, output *model.Block
|
||||
}{
|
||||
{
|
||||
name: "nil",
|
||||
input: nil,
|
||||
output: &model.Block{Content: &model.BlockContentOfLink{Link: &model.BlockContentLink{
|
||||
TargetBlockId: target,
|
||||
Style: model.BlockContentLink_Page,
|
||||
}}},
|
||||
},
|
||||
{
|
||||
name: "link",
|
||||
input: &model.Block{Content: &model.BlockContentOfLink{Link: &model.BlockContentLink{
|
||||
Style: model.BlockContentLink_Dashboard,
|
||||
CardStyle: model.BlockContentLink_Card,
|
||||
}}},
|
||||
output: &model.Block{Content: &model.BlockContentOfLink{Link: &model.BlockContentLink{
|
||||
TargetBlockId: target,
|
||||
Style: model.BlockContentLink_Dashboard,
|
||||
CardStyle: model.BlockContentLink_Card,
|
||||
}}},
|
||||
},
|
||||
{
|
||||
name: "bookmark",
|
||||
input: &model.Block{Content: &model.BlockContentOfBookmark{Bookmark: &model.BlockContentBookmark{
|
||||
Type: model.LinkPreview_Image,
|
||||
State: model.BlockContentBookmark_Fetching,
|
||||
}}},
|
||||
output: &model.Block{Content: &model.BlockContentOfBookmark{Bookmark: &model.BlockContentBookmark{
|
||||
TargetObjectId: target,
|
||||
Type: model.LinkPreview_Image,
|
||||
State: model.BlockContentBookmark_Fetching,
|
||||
}}},
|
||||
},
|
||||
{
|
||||
name: "file",
|
||||
input: &model.Block{Content: &model.BlockContentOfFile{File: &model.BlockContentFile{
|
||||
Type: model.BlockContentFile_Image,
|
||||
}}},
|
||||
output: &model.Block{Content: &model.BlockContentOfFile{File: &model.BlockContentFile{
|
||||
TargetObjectId: target,
|
||||
Type: model.BlockContentFile_Image,
|
||||
}}},
|
||||
},
|
||||
{
|
||||
name: "dataview",
|
||||
input: &model.Block{Content: &model.BlockContentOfDataview{Dataview: &model.BlockContentDataview{
|
||||
IsCollection: true,
|
||||
Source: []string{"ot-note"},
|
||||
}}},
|
||||
output: &model.Block{Content: &model.BlockContentOfDataview{Dataview: &model.BlockContentDataview{
|
||||
TargetObjectId: target,
|
||||
IsCollection: true,
|
||||
Source: []string{"ot-note"},
|
||||
}}},
|
||||
},
|
||||
{
|
||||
name: "other",
|
||||
input: &model.Block{Content: &model.BlockContentOfTableRow{TableRow: &model.BlockContentTableRow{
|
||||
IsHeader: true,
|
||||
}}},
|
||||
output: &model.Block{Content: &model.BlockContentOfLink{Link: &model.BlockContentLink{
|
||||
TargetBlockId: target,
|
||||
Style: model.BlockContentLink_Page,
|
||||
}}},
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
assert.Equal(t, tc.output, buildBlock(tc.input, target))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type fixture struct {
|
||||
t *testing.T
|
||||
ctrl *gomock.Controller
|
||||
|
|
783
core/block/editor/table/editor.go
Normal file
783
core/block/editor/table/editor.go
Normal file
|
@ -0,0 +1,783 @@
|
|||
package table
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/globalsign/mgo/bson"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/smartblock"
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/state"
|
||||
"github.com/anyproto/anytype-heart/core/block/simple"
|
||||
"github.com/anyproto/anytype-heart/core/block/simple/text"
|
||||
"github.com/anyproto/anytype-heart/core/block/source"
|
||||
"github.com/anyproto/anytype-heart/pb"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/pb/model"
|
||||
)
|
||||
|
||||
// nolint:revive,interfacebloat
|
||||
type TableEditor interface {
|
||||
TableCreate(s *state.State, req pb.RpcBlockTableCreateRequest) (string, error)
|
||||
CellCreate(s *state.State, rowID string, colID string, b *model.Block) (string, error)
|
||||
|
||||
RowCreate(s *state.State, req pb.RpcBlockTableRowCreateRequest) (string, error)
|
||||
RowDelete(s *state.State, req pb.RpcBlockTableRowDeleteRequest) error
|
||||
RowDuplicate(s *state.State, req pb.RpcBlockTableRowDuplicateRequest) (newRowID string, err error)
|
||||
// RowMove is done via BlockListMoveToExistingObject
|
||||
RowListFill(s *state.State, req pb.RpcBlockTableRowListFillRequest) error
|
||||
RowListClean(s *state.State, req pb.RpcBlockTableRowListCleanRequest) error
|
||||
RowSetHeader(s *state.State, req pb.RpcBlockTableRowSetHeaderRequest) error
|
||||
|
||||
ColumnCreate(s *state.State, req pb.RpcBlockTableColumnCreateRequest) (string, error)
|
||||
ColumnDelete(s *state.State, req pb.RpcBlockTableColumnDeleteRequest) error
|
||||
ColumnDuplicate(s *state.State, req pb.RpcBlockTableColumnDuplicateRequest) (id string, err error)
|
||||
ColumnMove(s *state.State, req pb.RpcBlockTableColumnMoveRequest) error
|
||||
ColumnListFill(s *state.State, req pb.RpcBlockTableColumnListFillRequest) error
|
||||
|
||||
Expand(s *state.State, req pb.RpcBlockTableExpandRequest) error
|
||||
Sort(s *state.State, req pb.RpcBlockTableSortRequest) error
|
||||
|
||||
cleanupTables(_ smartblock.ApplyInfo) error
|
||||
cloneColumnStyles(s *state.State, srcColID string, targetColID string) error
|
||||
}
|
||||
|
||||
type editor struct {
|
||||
sb smartblock.SmartBlock
|
||||
|
||||
generateRowID func() string
|
||||
generateColID func() string
|
||||
}
|
||||
|
||||
var _ TableEditor = &editor{}
|
||||
|
||||
func NewEditor(sb smartblock.SmartBlock) TableEditor {
|
||||
genID := func() string {
|
||||
return bson.NewObjectId().Hex()
|
||||
}
|
||||
|
||||
t := editor{
|
||||
sb: sb,
|
||||
generateRowID: genID,
|
||||
generateColID: genID,
|
||||
}
|
||||
if sb != nil {
|
||||
sb.AddHook(t.cleanupTables, smartblock.HookOnBlockClose)
|
||||
}
|
||||
return &t
|
||||
}
|
||||
|
||||
func (t *editor) TableCreate(s *state.State, req pb.RpcBlockTableCreateRequest) (string, error) {
|
||||
if t.sb != nil {
|
||||
if err := t.sb.Restrictions().Object.Check(model.Restrictions_Blocks); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
tableBlock := simple.New(&model.Block{
|
||||
Content: &model.BlockContentOfTable{
|
||||
Table: &model.BlockContentTable{},
|
||||
},
|
||||
})
|
||||
if !s.Add(tableBlock) {
|
||||
return "", fmt.Errorf("add table block")
|
||||
}
|
||||
|
||||
if err := s.InsertTo(req.TargetId, req.Position, tableBlock.Model().Id); err != nil {
|
||||
return "", fmt.Errorf("insert block: %w", err)
|
||||
}
|
||||
|
||||
columnIds := make([]string, 0, req.Columns)
|
||||
for i := uint32(0); i < req.Columns; i++ {
|
||||
id, err := t.addColumnHeader(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
columnIds = append(columnIds, id)
|
||||
}
|
||||
columnsLayout := simple.New(&model.Block{
|
||||
ChildrenIds: columnIds,
|
||||
Content: &model.BlockContentOfLayout{
|
||||
Layout: &model.BlockContentLayout{
|
||||
Style: model.BlockContentLayout_TableColumns,
|
||||
},
|
||||
},
|
||||
})
|
||||
if !s.Add(columnsLayout) {
|
||||
return "", fmt.Errorf("add columns block")
|
||||
}
|
||||
|
||||
rowIDs := make([]string, 0, req.Rows)
|
||||
for i := uint32(0); i < req.Rows; i++ {
|
||||
id, err := t.addRow(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
rowIDs = append(rowIDs, id)
|
||||
}
|
||||
|
||||
rowsLayout := simple.New(&model.Block{
|
||||
ChildrenIds: rowIDs,
|
||||
Content: &model.BlockContentOfLayout{
|
||||
Layout: &model.BlockContentLayout{
|
||||
Style: model.BlockContentLayout_TableRows,
|
||||
},
|
||||
},
|
||||
})
|
||||
if !s.Add(rowsLayout) {
|
||||
return "", fmt.Errorf("add rows block")
|
||||
}
|
||||
|
||||
tableBlock.Model().ChildrenIds = []string{columnsLayout.Model().Id, rowsLayout.Model().Id}
|
||||
|
||||
if !req.WithHeaderRow {
|
||||
return tableBlock.Model().Id, nil
|
||||
}
|
||||
|
||||
if len(rowIDs) == 0 {
|
||||
return "", fmt.Errorf("no rows to make header row")
|
||||
}
|
||||
headerID := rowIDs[0]
|
||||
|
||||
if err := t.RowSetHeader(s, pb.RpcBlockTableRowSetHeaderRequest{
|
||||
TargetId: headerID,
|
||||
IsHeader: true,
|
||||
}); err != nil {
|
||||
return "", fmt.Errorf("row set header: %w", err)
|
||||
}
|
||||
|
||||
if err := t.RowListFill(s, pb.RpcBlockTableRowListFillRequest{
|
||||
BlockIds: []string{headerID},
|
||||
}); err != nil {
|
||||
return "", fmt.Errorf("fill header row: %w", err)
|
||||
}
|
||||
|
||||
row, err := getRow(s, headerID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("get header row: %w", err)
|
||||
}
|
||||
|
||||
for _, cellID := range row.Model().ChildrenIds {
|
||||
cell := s.Get(cellID)
|
||||
if cell == nil {
|
||||
return "", fmt.Errorf("get header cell id %s", cellID)
|
||||
}
|
||||
|
||||
cell.Model().BackgroundColor = "grey"
|
||||
}
|
||||
|
||||
return tableBlock.Model().Id, nil
|
||||
}
|
||||
|
||||
func (t *editor) CellCreate(s *state.State, rowID string, colID string, b *model.Block) (string, error) {
|
||||
tb, err := NewTable(s, rowID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("initialize table state: %w", err)
|
||||
}
|
||||
|
||||
row, err := getRow(s, rowID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("get row: %w", err)
|
||||
}
|
||||
if _, err = pickColumn(s, colID); err != nil {
|
||||
return "", fmt.Errorf("pick column: %w", err)
|
||||
}
|
||||
|
||||
cellID, err := addCell(s, rowID, colID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("add cell: %w", err)
|
||||
}
|
||||
cell := s.Get(cellID)
|
||||
cell.Model().Content = b.Content
|
||||
if err := s.InsertTo(rowID, model.Block_Inner, cellID); err != nil {
|
||||
return "", fmt.Errorf("insert to: %w", err)
|
||||
}
|
||||
|
||||
colIdx := tb.MakeColumnIndex()
|
||||
normalizeRow(nil, colIdx, row)
|
||||
|
||||
return cellID, nil
|
||||
}
|
||||
|
||||
func (t *editor) RowCreate(s *state.State, req pb.RpcBlockTableRowCreateRequest) (string, error) {
|
||||
switch req.Position {
|
||||
case model.Block_Top, model.Block_Bottom:
|
||||
case model.Block_Inner:
|
||||
tb, err := NewTable(s, req.TargetId)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("initialize table state: %w", err)
|
||||
}
|
||||
req.TargetId = tb.Rows().Id
|
||||
default:
|
||||
return "", fmt.Errorf("position is not supported")
|
||||
}
|
||||
|
||||
rowID, err := t.addRow(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := s.InsertTo(req.TargetId, req.Position, rowID); err != nil {
|
||||
return "", fmt.Errorf("insert row: %w", err)
|
||||
}
|
||||
return rowID, nil
|
||||
}
|
||||
|
||||
func (t *editor) RowDelete(s *state.State, req pb.RpcBlockTableRowDeleteRequest) error {
|
||||
_, err := pickRow(s, req.TargetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pick target row: %w", err)
|
||||
}
|
||||
|
||||
if !s.Unlink(req.TargetId) {
|
||||
return fmt.Errorf("unlink row block")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *editor) RowDuplicate(s *state.State, req pb.RpcBlockTableRowDuplicateRequest) (newRowID string, err error) {
|
||||
if req.Position != model.Block_Top && req.Position != model.Block_Bottom {
|
||||
return "", fmt.Errorf("position %s is not supported", model.BlockPosition_name[int32(req.Position)])
|
||||
}
|
||||
srcRow, err := pickRow(s, req.BlockId)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("pick source row: %w", err)
|
||||
}
|
||||
|
||||
if _, err = pickRow(s, req.TargetId); err != nil {
|
||||
return "", fmt.Errorf("pick target row: %w", err)
|
||||
}
|
||||
|
||||
newRow := srcRow.Copy()
|
||||
newRow.Model().Id = t.generateRowID()
|
||||
if !s.Add(newRow) {
|
||||
return "", fmt.Errorf("add new row %s", newRow.Model().Id)
|
||||
}
|
||||
if err = s.InsertTo(req.TargetId, req.Position, newRow.Model().Id); err != nil {
|
||||
return "", fmt.Errorf("insert column: %w", err)
|
||||
}
|
||||
|
||||
for i, srcID := range newRow.Model().ChildrenIds {
|
||||
cell := s.Pick(srcID)
|
||||
if cell == nil {
|
||||
return "", fmt.Errorf("cell %s is not found", srcID)
|
||||
}
|
||||
_, colID, err := ParseCellID(srcID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parse cell id %s: %w", srcID, err)
|
||||
}
|
||||
|
||||
newCell := cell.Copy()
|
||||
newCell.Model().Id = MakeCellID(newRow.Model().Id, colID)
|
||||
if !s.Add(newCell) {
|
||||
return "", fmt.Errorf("add new cell %s", newCell.Model().Id)
|
||||
}
|
||||
newRow.Model().ChildrenIds[i] = newCell.Model().Id
|
||||
}
|
||||
|
||||
return newRow.Model().Id, nil
|
||||
}
|
||||
|
||||
func (t *editor) RowListFill(s *state.State, req pb.RpcBlockTableRowListFillRequest) error {
|
||||
if len(req.BlockIds) == 0 {
|
||||
return fmt.Errorf("empty row list")
|
||||
}
|
||||
|
||||
tb, err := NewTable(s, req.BlockIds[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("init table: %w", err)
|
||||
}
|
||||
|
||||
columns := tb.ColumnIDs()
|
||||
|
||||
for _, rowID := range req.BlockIds {
|
||||
row, err := getRow(s, rowID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get row %s: %w", rowID, err)
|
||||
}
|
||||
|
||||
newIds := make([]string, 0, len(columns))
|
||||
for _, colID := range columns {
|
||||
id := MakeCellID(rowID, colID)
|
||||
newIds = append(newIds, id)
|
||||
|
||||
if !s.Exists(id) {
|
||||
_, err := addCell(s, rowID, colID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("add cell %s: %w", id, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
row.Model().ChildrenIds = newIds
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *editor) RowListClean(s *state.State, req pb.RpcBlockTableRowListCleanRequest) error {
|
||||
if len(req.BlockIds) == 0 {
|
||||
return fmt.Errorf("empty row list")
|
||||
}
|
||||
|
||||
for _, rowID := range req.BlockIds {
|
||||
row, err := pickRow(s, rowID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pick row: %w", err)
|
||||
}
|
||||
|
||||
for _, cellID := range row.Model().ChildrenIds {
|
||||
cell := s.Pick(cellID)
|
||||
if v, ok := cell.(text.Block); ok && v.IsEmpty() {
|
||||
s.Unlink(cellID)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *editor) RowSetHeader(s *state.State, req pb.RpcBlockTableRowSetHeaderRequest) error {
|
||||
tb, err := NewTable(s, req.TargetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init table: %w", err)
|
||||
}
|
||||
|
||||
row, err := getRow(s, req.TargetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get target row: %w", err)
|
||||
}
|
||||
|
||||
if row.Model().GetTableRow().IsHeader != req.IsHeader {
|
||||
row.Model().GetTableRow().IsHeader = req.IsHeader
|
||||
|
||||
err = normalizeRows(s, tb)
|
||||
if err != nil {
|
||||
return fmt.Errorf("normalize rows: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *editor) ColumnCreate(s *state.State, req pb.RpcBlockTableColumnCreateRequest) (string, error) {
|
||||
switch req.Position {
|
||||
case model.Block_Left:
|
||||
req.Position = model.Block_Top
|
||||
if _, err := pickColumn(s, req.TargetId); err != nil {
|
||||
return "", fmt.Errorf("pick column: %w", err)
|
||||
}
|
||||
case model.Block_Right:
|
||||
req.Position = model.Block_Bottom
|
||||
if _, err := pickColumn(s, req.TargetId); err != nil {
|
||||
return "", fmt.Errorf("pick column: %w", err)
|
||||
}
|
||||
case model.Block_Inner:
|
||||
tb, err := NewTable(s, req.TargetId)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("initialize table state: %w", err)
|
||||
}
|
||||
req.TargetId = tb.Columns().Id
|
||||
default:
|
||||
return "", fmt.Errorf("position is not supported")
|
||||
}
|
||||
|
||||
colID, err := t.addColumnHeader(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err = s.InsertTo(req.TargetId, req.Position, colID); err != nil {
|
||||
return "", fmt.Errorf("insert column header: %w", err)
|
||||
}
|
||||
|
||||
return colID, t.cloneColumnStyles(s, req.TargetId, colID)
|
||||
}
|
||||
|
||||
func (t *editor) ColumnDelete(s *state.State, req pb.RpcBlockTableColumnDeleteRequest) error {
|
||||
_, err := pickColumn(s, req.TargetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pick target column: %w", err)
|
||||
}
|
||||
|
||||
tb, err := NewTable(s, req.TargetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("initialize table state: %w", err)
|
||||
}
|
||||
|
||||
for _, rowID := range tb.RowIDs() {
|
||||
row, err := pickRow(s, rowID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pick row %s: %w", rowID, err)
|
||||
}
|
||||
|
||||
for _, cellID := range row.Model().ChildrenIds {
|
||||
_, colID, err := ParseCellID(cellID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse cell id %s: %w", cellID, err)
|
||||
}
|
||||
|
||||
if colID == req.TargetId {
|
||||
if !s.Unlink(cellID) {
|
||||
return fmt.Errorf("unlink cell %s", cellID)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !s.Unlink(req.TargetId) {
|
||||
return fmt.Errorf("unlink column header")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *editor) ColumnDuplicate(s *state.State, req pb.RpcBlockTableColumnDuplicateRequest) (id string, err error) {
|
||||
switch req.Position {
|
||||
case model.Block_Left:
|
||||
req.Position = model.Block_Top
|
||||
case model.Block_Right:
|
||||
req.Position = model.Block_Bottom
|
||||
default:
|
||||
return "", fmt.Errorf("position is not supported")
|
||||
}
|
||||
|
||||
srcCol, err := pickColumn(s, req.BlockId)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("pick source column: %w", err)
|
||||
}
|
||||
|
||||
_, err = pickColumn(s, req.TargetId)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("pick target column: %w", err)
|
||||
}
|
||||
|
||||
tb, err := NewTable(s, req.TargetId)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("init table block: %w", err)
|
||||
}
|
||||
|
||||
newCol := srcCol.Copy()
|
||||
newCol.Model().Id = t.generateColID()
|
||||
if !s.Add(newCol) {
|
||||
return "", fmt.Errorf("add column block")
|
||||
}
|
||||
if err = s.InsertTo(req.TargetId, req.Position, newCol.Model().Id); err != nil {
|
||||
return "", fmt.Errorf("insert column: %w", err)
|
||||
}
|
||||
|
||||
colIdx := tb.MakeColumnIndex()
|
||||
|
||||
for _, rowID := range tb.RowIDs() {
|
||||
row, err := getRow(s, rowID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("get row %s: %w", rowID, err)
|
||||
}
|
||||
|
||||
var cellID string
|
||||
for _, id := range row.Model().ChildrenIds {
|
||||
_, colID, err := ParseCellID(id)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parse cell %s in row %s: %w", cellID, rowID, err)
|
||||
}
|
||||
if colID == req.BlockId {
|
||||
cellID = id
|
||||
break
|
||||
}
|
||||
}
|
||||
if cellID == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
cell := s.Pick(cellID)
|
||||
if cell == nil {
|
||||
return "", fmt.Errorf("cell %s is not found", cellID)
|
||||
}
|
||||
cell = cell.Copy()
|
||||
cell.Model().Id = MakeCellID(rowID, newCol.Model().Id)
|
||||
|
||||
if !s.Add(cell) {
|
||||
return "", fmt.Errorf("add cell block")
|
||||
}
|
||||
|
||||
row.Model().ChildrenIds = append(row.Model().ChildrenIds, cell.Model().Id)
|
||||
normalizeRow(nil, colIdx, row)
|
||||
}
|
||||
|
||||
return newCol.Model().Id, nil
|
||||
}
|
||||
|
||||
func (t *editor) ColumnMove(s *state.State, req pb.RpcBlockTableColumnMoveRequest) error {
|
||||
switch req.Position {
|
||||
case model.Block_Left:
|
||||
req.Position = model.Block_Top
|
||||
case model.Block_Right:
|
||||
req.Position = model.Block_Bottom
|
||||
default:
|
||||
return fmt.Errorf("position is not supported")
|
||||
}
|
||||
_, err := pickColumn(s, req.TargetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get target column: %w", err)
|
||||
}
|
||||
_, err = pickColumn(s, req.DropTargetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get drop target column: %w", err)
|
||||
}
|
||||
|
||||
tb, err := NewTable(s, req.TargetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init table block: %w", err)
|
||||
}
|
||||
|
||||
if !s.Unlink(req.TargetId) {
|
||||
return fmt.Errorf("unlink target column")
|
||||
}
|
||||
if err = s.InsertTo(req.DropTargetId, req.Position, req.TargetId); err != nil {
|
||||
return fmt.Errorf("insert column: %w", err)
|
||||
}
|
||||
|
||||
colIdx := tb.MakeColumnIndex()
|
||||
|
||||
for _, id := range tb.RowIDs() {
|
||||
row, err := getRow(s, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get row %s: %w", id, err)
|
||||
}
|
||||
normalizeRow(nil, colIdx, row)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *editor) ColumnListFill(s *state.State, req pb.RpcBlockTableColumnListFillRequest) error {
|
||||
if len(req.BlockIds) == 0 {
|
||||
return fmt.Errorf("empty row list")
|
||||
}
|
||||
|
||||
tb, err := NewTable(s, req.BlockIds[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("init table: %w", err)
|
||||
}
|
||||
|
||||
rows := tb.RowIDs()
|
||||
|
||||
for _, colID := range req.BlockIds {
|
||||
for _, rowID := range rows {
|
||||
id := MakeCellID(rowID, colID)
|
||||
if s.Exists(id) {
|
||||
continue
|
||||
}
|
||||
_, err := addCell(s, rowID, colID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("add cell %s: %w", id, err)
|
||||
}
|
||||
|
||||
row, err := getRow(s, rowID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get row %s: %w", rowID, err)
|
||||
}
|
||||
|
||||
row.Model().ChildrenIds = append(row.Model().ChildrenIds, id)
|
||||
}
|
||||
}
|
||||
|
||||
colIdx := tb.MakeColumnIndex()
|
||||
for _, rowID := range rows {
|
||||
row, err := getRow(s, rowID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get row %s: %w", rowID, err)
|
||||
}
|
||||
normalizeRow(nil, colIdx, row)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *editor) Expand(s *state.State, req pb.RpcBlockTableExpandRequest) error {
|
||||
tb, err := NewTable(s, req.TargetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init table block: %w", err)
|
||||
}
|
||||
|
||||
for i := uint32(0); i < req.Columns; i++ {
|
||||
_, err := t.ColumnCreate(s, pb.RpcBlockTableColumnCreateRequest{
|
||||
TargetId: req.TargetId,
|
||||
Position: model.Block_Inner,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("create column: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for i := uint32(0); i < req.Rows; i++ {
|
||||
rows := tb.Rows()
|
||||
_, err := t.RowCreate(s, pb.RpcBlockTableRowCreateRequest{
|
||||
TargetId: rows.ChildrenIds[len(rows.ChildrenIds)-1],
|
||||
Position: model.Block_Bottom,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("create row: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *editor) Sort(s *state.State, req pb.RpcBlockTableSortRequest) error {
|
||||
_, err := pickColumn(s, req.ColumnId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pick column: %w", err)
|
||||
}
|
||||
|
||||
tb, err := NewTable(s, req.ColumnId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init table block: %w", err)
|
||||
}
|
||||
|
||||
rows := s.Get(tb.Rows().Id)
|
||||
sorter := tableSorter{
|
||||
rowIDs: make([]string, 0, len(rows.Model().ChildrenIds)),
|
||||
values: make([]string, len(rows.Model().ChildrenIds)),
|
||||
}
|
||||
|
||||
var headers []string
|
||||
|
||||
var i int
|
||||
for _, rowID := range rows.Model().ChildrenIds {
|
||||
row, err := pickRow(s, rowID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pick row %s: %w", rowID, err)
|
||||
}
|
||||
if row.Model().GetTableRow().GetIsHeader() {
|
||||
headers = append(headers, rowID)
|
||||
continue
|
||||
}
|
||||
|
||||
sorter.rowIDs = append(sorter.rowIDs, rowID)
|
||||
for _, cellID := range row.Model().ChildrenIds {
|
||||
_, colID, err := ParseCellID(cellID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse cell id %s: %w", cellID, err)
|
||||
}
|
||||
if colID == req.ColumnId {
|
||||
cell := s.Pick(cellID)
|
||||
if cell == nil {
|
||||
return fmt.Errorf("cell %s is not found", cellID)
|
||||
}
|
||||
sorter.values[i] = cell.Model().GetText().GetText()
|
||||
}
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if req.Type == model.BlockContentDataviewSort_Asc {
|
||||
sort.Stable(sorter)
|
||||
} else {
|
||||
sort.Stable(sort.Reverse(sorter))
|
||||
}
|
||||
|
||||
// nolint:gocritic
|
||||
rows.Model().ChildrenIds = append(headers, sorter.rowIDs...)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *editor) cleanupTables(_ smartblock.ApplyInfo) error {
|
||||
if t.sb == nil {
|
||||
return fmt.Errorf("nil smartblock")
|
||||
}
|
||||
s := t.sb.NewState()
|
||||
|
||||
err := s.Iterate(func(b simple.Block) bool {
|
||||
if b.Model().GetTable() == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
tb, err := NewTable(s, b.Model().Id)
|
||||
if err != nil {
|
||||
log.Errorf("cleanup: init table %s: %s", b.Model().Id, err)
|
||||
return true
|
||||
}
|
||||
err = t.RowListClean(s, pb.RpcBlockTableRowListCleanRequest{
|
||||
BlockIds: tb.RowIDs(),
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("cleanup table %s: %s", b.Model().Id, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("cleanup iterate: %s", err)
|
||||
}
|
||||
|
||||
if err = t.sb.Apply(s, smartblock.KeepInternalFlags); err != nil {
|
||||
if errors.Is(err, source.ErrReadOnly) {
|
||||
return nil
|
||||
}
|
||||
log.Errorf("cleanup apply: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *editor) cloneColumnStyles(s *state.State, srcColID, targetColID string) error {
|
||||
tb, err := NewTable(s, srcColID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init table block: %w", err)
|
||||
}
|
||||
colIdx := tb.MakeColumnIndex()
|
||||
|
||||
for _, rowID := range tb.RowIDs() {
|
||||
row, err := pickRow(s, rowID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pick row: %w", err)
|
||||
}
|
||||
|
||||
var protoBlock simple.Block
|
||||
for _, cellID := range row.Model().ChildrenIds {
|
||||
_, colID, err := ParseCellID(cellID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse cell id: %w", err)
|
||||
}
|
||||
|
||||
if colID == srcColID {
|
||||
protoBlock = s.Pick(cellID)
|
||||
}
|
||||
}
|
||||
|
||||
if protoBlock != nil && protoBlock.Model().BackgroundColor != "" {
|
||||
targetCellID := MakeCellID(rowID, targetColID)
|
||||
|
||||
if !s.Exists(targetCellID) {
|
||||
_, err := addCell(s, rowID, targetColID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("add cell: %w", err)
|
||||
}
|
||||
}
|
||||
cell := s.Get(targetCellID)
|
||||
cell.Model().BackgroundColor = protoBlock.Model().BackgroundColor
|
||||
|
||||
row = s.Get(row.Model().Id)
|
||||
row.Model().ChildrenIds = append(row.Model().ChildrenIds, targetCellID)
|
||||
normalizeRow(nil, colIdx, row)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *editor) addColumnHeader(s *state.State) (string, error) {
|
||||
b := simple.New(&model.Block{
|
||||
Id: t.generateColID(),
|
||||
Content: &model.BlockContentOfTableColumn{
|
||||
TableColumn: &model.BlockContentTableColumn{},
|
||||
},
|
||||
})
|
||||
if !s.Add(b) {
|
||||
return "", fmt.Errorf("add column block")
|
||||
}
|
||||
return b.Model().Id, nil
|
||||
}
|
||||
|
||||
func (t *editor) addRow(s *state.State) (string, error) {
|
||||
row := makeRow(t.generateRowID())
|
||||
if !s.Add(row) {
|
||||
return "", fmt.Errorf("add row block")
|
||||
}
|
||||
return row.Model().Id, nil
|
||||
}
|
2128
core/block/editor/table/editor_test.go
Normal file
2128
core/block/editor/table/editor_test.go
Normal file
File diff suppressed because it is too large
Load diff
|
@ -2,751 +2,20 @@ package table
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/globalsign/mgo/bson"
|
||||
"github.com/samber/lo"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/smartblock"
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/state"
|
||||
"github.com/anyproto/anytype-heart/core/block/simple"
|
||||
"github.com/anyproto/anytype-heart/core/block/simple/table"
|
||||
"github.com/anyproto/anytype-heart/core/block/simple/text"
|
||||
"github.com/anyproto/anytype-heart/core/block/source"
|
||||
"github.com/anyproto/anytype-heart/pb"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/logging"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/pb/model"
|
||||
)
|
||||
|
||||
var log = logging.Logger("anytype-simple-tables")
|
||||
|
||||
// nolint:revive,interfacebloat
|
||||
type TableEditor interface {
|
||||
TableCreate(s *state.State, req pb.RpcBlockTableCreateRequest) (string, error)
|
||||
RowCreate(s *state.State, req pb.RpcBlockTableRowCreateRequest) (string, error)
|
||||
RowDelete(s *state.State, req pb.RpcBlockTableRowDeleteRequest) error
|
||||
ColumnDelete(s *state.State, req pb.RpcBlockTableColumnDeleteRequest) error
|
||||
ColumnMove(s *state.State, req pb.RpcBlockTableColumnMoveRequest) error
|
||||
RowDuplicate(s *state.State, req pb.RpcBlockTableRowDuplicateRequest) (newRowID string, err error)
|
||||
RowListFill(s *state.State, req pb.RpcBlockTableRowListFillRequest) error
|
||||
RowListClean(s *state.State, req pb.RpcBlockTableRowListCleanRequest) error
|
||||
RowSetHeader(s *state.State, req pb.RpcBlockTableRowSetHeaderRequest) error
|
||||
ColumnListFill(s *state.State, req pb.RpcBlockTableColumnListFillRequest) error
|
||||
cleanupTables(_ smartblock.ApplyInfo) error
|
||||
ColumnCreate(s *state.State, req pb.RpcBlockTableColumnCreateRequest) (string, error)
|
||||
cloneColumnStyles(s *state.State, srcColID string, targetColID string) error
|
||||
ColumnDuplicate(s *state.State, req pb.RpcBlockTableColumnDuplicateRequest) (id string, err error)
|
||||
Expand(s *state.State, req pb.RpcBlockTableExpandRequest) error
|
||||
Sort(s *state.State, req pb.RpcBlockTableSortRequest) error
|
||||
CellCreate(s *state.State, rowID string, colID string, b *model.Block) (string, error)
|
||||
}
|
||||
|
||||
type Editor struct {
|
||||
sb smartblock.SmartBlock
|
||||
|
||||
generateRowID func() string
|
||||
generateColID func() string
|
||||
}
|
||||
|
||||
var _ TableEditor = &Editor{}
|
||||
|
||||
func NewEditor(sb smartblock.SmartBlock) *Editor {
|
||||
genID := func() string {
|
||||
return bson.NewObjectId().Hex()
|
||||
}
|
||||
|
||||
t := Editor{
|
||||
sb: sb,
|
||||
generateRowID: genID,
|
||||
generateColID: genID,
|
||||
}
|
||||
if sb != nil {
|
||||
sb.AddHook(t.cleanupTables, smartblock.HookOnBlockClose)
|
||||
}
|
||||
return &t
|
||||
}
|
||||
|
||||
func (t *Editor) TableCreate(s *state.State, req pb.RpcBlockTableCreateRequest) (string, error) {
|
||||
if t.sb != nil {
|
||||
if err := t.sb.Restrictions().Object.Check(model.Restrictions_Blocks); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
tableBlock := simple.New(&model.Block{
|
||||
Content: &model.BlockContentOfTable{
|
||||
Table: &model.BlockContentTable{},
|
||||
},
|
||||
})
|
||||
if !s.Add(tableBlock) {
|
||||
return "", fmt.Errorf("add table block")
|
||||
}
|
||||
|
||||
if err := s.InsertTo(req.TargetId, req.Position, tableBlock.Model().Id); err != nil {
|
||||
return "", fmt.Errorf("insert block: %w", err)
|
||||
}
|
||||
|
||||
columnIds := make([]string, 0, req.Columns)
|
||||
for i := uint32(0); i < req.Columns; i++ {
|
||||
id, err := t.addColumnHeader(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
columnIds = append(columnIds, id)
|
||||
}
|
||||
columnsLayout := simple.New(&model.Block{
|
||||
ChildrenIds: columnIds,
|
||||
Content: &model.BlockContentOfLayout{
|
||||
Layout: &model.BlockContentLayout{
|
||||
Style: model.BlockContentLayout_TableColumns,
|
||||
},
|
||||
},
|
||||
})
|
||||
if !s.Add(columnsLayout) {
|
||||
return "", fmt.Errorf("add columns block")
|
||||
}
|
||||
|
||||
rowIDs := make([]string, 0, req.Rows)
|
||||
for i := uint32(0); i < req.Rows; i++ {
|
||||
id, err := t.addRow(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
rowIDs = append(rowIDs, id)
|
||||
}
|
||||
|
||||
rowsLayout := simple.New(&model.Block{
|
||||
ChildrenIds: rowIDs,
|
||||
Content: &model.BlockContentOfLayout{
|
||||
Layout: &model.BlockContentLayout{
|
||||
Style: model.BlockContentLayout_TableRows,
|
||||
},
|
||||
},
|
||||
})
|
||||
if !s.Add(rowsLayout) {
|
||||
return "", fmt.Errorf("add rows block")
|
||||
}
|
||||
|
||||
tableBlock.Model().ChildrenIds = []string{columnsLayout.Model().Id, rowsLayout.Model().Id}
|
||||
|
||||
if req.WithHeaderRow {
|
||||
headerID := rowIDs[0]
|
||||
|
||||
if err := t.RowSetHeader(s, pb.RpcBlockTableRowSetHeaderRequest{
|
||||
TargetId: headerID,
|
||||
IsHeader: true,
|
||||
}); err != nil {
|
||||
return "", fmt.Errorf("row set header: %w", err)
|
||||
}
|
||||
|
||||
if err := t.RowListFill(s, pb.RpcBlockTableRowListFillRequest{
|
||||
BlockIds: []string{headerID},
|
||||
}); err != nil {
|
||||
return "", fmt.Errorf("fill header row: %w", err)
|
||||
}
|
||||
|
||||
row, err := getRow(s, headerID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("get header row: %w", err)
|
||||
}
|
||||
|
||||
for _, cellID := range row.Model().ChildrenIds {
|
||||
cell := s.Get(cellID)
|
||||
if cell == nil {
|
||||
return "", fmt.Errorf("get header cell id %s", cellID)
|
||||
}
|
||||
|
||||
cell.Model().BackgroundColor = "grey"
|
||||
}
|
||||
}
|
||||
|
||||
return tableBlock.Model().Id, nil
|
||||
}
|
||||
|
||||
func (t *Editor) RowCreate(s *state.State, req pb.RpcBlockTableRowCreateRequest) (string, error) {
|
||||
switch req.Position {
|
||||
case model.Block_Top, model.Block_Bottom:
|
||||
case model.Block_Inner:
|
||||
tb, err := NewTable(s, req.TargetId)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("initialize table state: %w", err)
|
||||
}
|
||||
req.TargetId = tb.Rows().Id
|
||||
default:
|
||||
return "", fmt.Errorf("position is not supported")
|
||||
}
|
||||
|
||||
rowID, err := t.addRow(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := s.InsertTo(req.TargetId, req.Position, rowID); err != nil {
|
||||
return "", fmt.Errorf("insert row: %w", err)
|
||||
}
|
||||
return rowID, nil
|
||||
}
|
||||
|
||||
func (t *Editor) RowDelete(s *state.State, req pb.RpcBlockTableRowDeleteRequest) error {
|
||||
_, err := pickRow(s, req.TargetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pick target row: %w", err)
|
||||
}
|
||||
|
||||
if !s.Unlink(req.TargetId) {
|
||||
return fmt.Errorf("unlink row block")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Editor) ColumnDelete(s *state.State, req pb.RpcBlockTableColumnDeleteRequest) error {
|
||||
_, err := pickColumn(s, req.TargetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pick target column: %w", err)
|
||||
}
|
||||
|
||||
tb, err := NewTable(s, req.TargetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("initialize table state: %w", err)
|
||||
}
|
||||
|
||||
for _, rowID := range tb.RowIDs() {
|
||||
row, err := pickRow(s, rowID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pick row %s: %w", rowID, err)
|
||||
}
|
||||
|
||||
for _, cellID := range row.Model().ChildrenIds {
|
||||
_, colID, err := ParseCellID(cellID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse cell id %s: %w", cellID, err)
|
||||
}
|
||||
|
||||
if colID == req.TargetId {
|
||||
if !s.Unlink(cellID) {
|
||||
return fmt.Errorf("unlink cell %s", cellID)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !s.Unlink(req.TargetId) {
|
||||
return fmt.Errorf("unlink column header")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Editor) ColumnMove(s *state.State, req pb.RpcBlockTableColumnMoveRequest) error {
|
||||
switch req.Position {
|
||||
case model.Block_Left:
|
||||
req.Position = model.Block_Top
|
||||
case model.Block_Right:
|
||||
req.Position = model.Block_Bottom
|
||||
default:
|
||||
return fmt.Errorf("position is not supported")
|
||||
}
|
||||
_, err := pickColumn(s, req.TargetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get target column: %w", err)
|
||||
}
|
||||
_, err = pickColumn(s, req.DropTargetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get drop target column: %w", err)
|
||||
}
|
||||
|
||||
tb, err := NewTable(s, req.TargetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init table block: %w", err)
|
||||
}
|
||||
|
||||
if !s.Unlink(req.TargetId) {
|
||||
return fmt.Errorf("unlink target column")
|
||||
}
|
||||
if err = s.InsertTo(req.DropTargetId, req.Position, req.TargetId); err != nil {
|
||||
return fmt.Errorf("insert column: %w", err)
|
||||
}
|
||||
|
||||
colIdx := tb.MakeColumnIndex()
|
||||
|
||||
for _, id := range tb.RowIDs() {
|
||||
row, err := getRow(s, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get row %s: %w", id, err)
|
||||
}
|
||||
normalizeRow(nil, colIdx, row)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Editor) RowDuplicate(s *state.State, req pb.RpcBlockTableRowDuplicateRequest) (newRowID string, err error) {
|
||||
srcRow, err := pickRow(s, req.BlockId)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("pick source row: %w", err)
|
||||
}
|
||||
|
||||
newRow := srcRow.Copy()
|
||||
newRow.Model().Id = t.generateRowID()
|
||||
if !s.Add(newRow) {
|
||||
return "", fmt.Errorf("add new row %s", newRow.Model().Id)
|
||||
}
|
||||
if err = s.InsertTo(req.TargetId, req.Position, newRow.Model().Id); err != nil {
|
||||
return "", fmt.Errorf("insert column: %w", err)
|
||||
}
|
||||
|
||||
for i, srcID := range newRow.Model().ChildrenIds {
|
||||
cell := s.Pick(srcID)
|
||||
if cell == nil {
|
||||
return "", fmt.Errorf("cell %s is not found", srcID)
|
||||
}
|
||||
_, colID, err := ParseCellID(srcID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parse cell id %s: %w", srcID, err)
|
||||
}
|
||||
|
||||
newCell := cell.Copy()
|
||||
newCell.Model().Id = MakeCellID(newRow.Model().Id, colID)
|
||||
if !s.Add(newCell) {
|
||||
return "", fmt.Errorf("add new cell %s", newCell.Model().Id)
|
||||
}
|
||||
newRow.Model().ChildrenIds[i] = newCell.Model().Id
|
||||
}
|
||||
|
||||
return newRow.Model().Id, nil
|
||||
}
|
||||
|
||||
func (t *Editor) RowListFill(s *state.State, req pb.RpcBlockTableRowListFillRequest) error {
|
||||
if len(req.BlockIds) == 0 {
|
||||
return fmt.Errorf("empty row list")
|
||||
}
|
||||
|
||||
tb, err := NewTable(s, req.BlockIds[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("init table: %w", err)
|
||||
}
|
||||
|
||||
columns := tb.ColumnIDs()
|
||||
|
||||
for _, rowID := range req.BlockIds {
|
||||
row, err := getRow(s, rowID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get row %s: %w", rowID, err)
|
||||
}
|
||||
|
||||
newIds := make([]string, 0, len(columns))
|
||||
for _, colID := range columns {
|
||||
id := MakeCellID(rowID, colID)
|
||||
newIds = append(newIds, id)
|
||||
|
||||
if !s.Exists(id) {
|
||||
_, err := addCell(s, rowID, colID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("add cell %s: %w", id, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
row.Model().ChildrenIds = newIds
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Editor) RowListClean(s *state.State, req pb.RpcBlockTableRowListCleanRequest) error {
|
||||
if len(req.BlockIds) == 0 {
|
||||
return fmt.Errorf("empty row list")
|
||||
}
|
||||
|
||||
for _, rowID := range req.BlockIds {
|
||||
row, err := pickRow(s, rowID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pick row: %w", err)
|
||||
}
|
||||
|
||||
for _, cellID := range row.Model().ChildrenIds {
|
||||
cell := s.Pick(cellID)
|
||||
if v, ok := cell.(text.Block); ok && v.IsEmpty() {
|
||||
s.Unlink(cellID)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Editor) RowSetHeader(s *state.State, req pb.RpcBlockTableRowSetHeaderRequest) error {
|
||||
tb, err := NewTable(s, req.TargetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init table: %w", err)
|
||||
}
|
||||
|
||||
row, err := getRow(s, req.TargetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get target row: %w", err)
|
||||
}
|
||||
|
||||
if row.Model().GetTableRow().IsHeader != req.IsHeader {
|
||||
row.Model().GetTableRow().IsHeader = req.IsHeader
|
||||
|
||||
err = normalizeRows(s, tb)
|
||||
if err != nil {
|
||||
return fmt.Errorf("normalize rows: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Editor) ColumnListFill(s *state.State, req pb.RpcBlockTableColumnListFillRequest) error {
|
||||
if len(req.BlockIds) == 0 {
|
||||
return fmt.Errorf("empty row list")
|
||||
}
|
||||
|
||||
tb, err := NewTable(s, req.BlockIds[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("init table: %w", err)
|
||||
}
|
||||
|
||||
rows := tb.RowIDs()
|
||||
|
||||
for _, colID := range req.BlockIds {
|
||||
for _, rowID := range rows {
|
||||
id := MakeCellID(rowID, colID)
|
||||
if s.Exists(id) {
|
||||
continue
|
||||
}
|
||||
_, err := addCell(s, rowID, colID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("add cell %s: %w", id, err)
|
||||
}
|
||||
|
||||
row, err := getRow(s, rowID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get row %s: %w", rowID, err)
|
||||
}
|
||||
|
||||
row.Model().ChildrenIds = append(row.Model().ChildrenIds, id)
|
||||
}
|
||||
}
|
||||
|
||||
colIdx := tb.MakeColumnIndex()
|
||||
for _, rowID := range rows {
|
||||
row, err := getRow(s, rowID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get row %s: %w", rowID, err)
|
||||
}
|
||||
normalizeRow(nil, colIdx, row)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Editor) cleanupTables(_ smartblock.ApplyInfo) error {
|
||||
if t.sb == nil {
|
||||
return fmt.Errorf("nil smartblock")
|
||||
}
|
||||
s := t.sb.NewState()
|
||||
|
||||
err := s.Iterate(func(b simple.Block) bool {
|
||||
if b.Model().GetTable() == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
tb, err := NewTable(s, b.Model().Id)
|
||||
if err != nil {
|
||||
log.Errorf("cleanup: init table %s: %s", b.Model().Id, err)
|
||||
return true
|
||||
}
|
||||
err = t.RowListClean(s, pb.RpcBlockTableRowListCleanRequest{
|
||||
BlockIds: tb.RowIDs(),
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("cleanup table %s: %s", b.Model().Id, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("cleanup iterate: %s", err)
|
||||
}
|
||||
|
||||
if err = t.sb.Apply(s, smartblock.KeepInternalFlags); err != nil {
|
||||
if err == source.ErrReadOnly {
|
||||
return nil
|
||||
}
|
||||
log.Errorf("cleanup apply: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Editor) ColumnCreate(s *state.State, req pb.RpcBlockTableColumnCreateRequest) (string, error) {
|
||||
switch req.Position {
|
||||
case model.Block_Left:
|
||||
req.Position = model.Block_Top
|
||||
if _, err := pickColumn(s, req.TargetId); err != nil {
|
||||
return "", fmt.Errorf("pick column: %w", err)
|
||||
}
|
||||
case model.Block_Right:
|
||||
req.Position = model.Block_Bottom
|
||||
if _, err := pickColumn(s, req.TargetId); err != nil {
|
||||
return "", fmt.Errorf("pick column: %w", err)
|
||||
}
|
||||
case model.Block_Inner:
|
||||
tb, err := NewTable(s, req.TargetId)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("initialize table state: %w", err)
|
||||
}
|
||||
req.TargetId = tb.Columns().Id
|
||||
default:
|
||||
return "", fmt.Errorf("position is not supported")
|
||||
}
|
||||
|
||||
colID, err := t.addColumnHeader(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err = s.InsertTo(req.TargetId, req.Position, colID); err != nil {
|
||||
return "", fmt.Errorf("insert column header: %w", err)
|
||||
}
|
||||
|
||||
return colID, t.cloneColumnStyles(s, req.TargetId, colID)
|
||||
}
|
||||
|
||||
func (t *Editor) cloneColumnStyles(s *state.State, srcColID, targetColID string) error {
|
||||
tb, err := NewTable(s, srcColID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init table block: %w", err)
|
||||
}
|
||||
colIdx := tb.MakeColumnIndex()
|
||||
|
||||
for _, rowID := range tb.RowIDs() {
|
||||
row, err := pickRow(s, rowID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pick row: %w", err)
|
||||
}
|
||||
|
||||
var protoBlock simple.Block
|
||||
for _, cellID := range row.Model().ChildrenIds {
|
||||
_, colID, err := ParseCellID(cellID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse cell id: %w", err)
|
||||
}
|
||||
|
||||
if colID == srcColID {
|
||||
protoBlock = s.Pick(cellID)
|
||||
}
|
||||
}
|
||||
|
||||
if protoBlock != nil && protoBlock.Model().BackgroundColor != "" {
|
||||
targetCellID := MakeCellID(rowID, targetColID)
|
||||
|
||||
if !s.Exists(targetCellID) {
|
||||
_, err := addCell(s, rowID, targetColID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("add cell: %w", err)
|
||||
}
|
||||
}
|
||||
cell := s.Get(targetCellID)
|
||||
cell.Model().BackgroundColor = protoBlock.Model().BackgroundColor
|
||||
|
||||
row = s.Get(row.Model().Id)
|
||||
row.Model().ChildrenIds = append(row.Model().ChildrenIds, targetCellID)
|
||||
normalizeRow(nil, colIdx, row)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Editor) ColumnDuplicate(s *state.State, req pb.RpcBlockTableColumnDuplicateRequest) (id string, err error) {
|
||||
switch req.Position {
|
||||
case model.Block_Left:
|
||||
req.Position = model.Block_Top
|
||||
case model.Block_Right:
|
||||
req.Position = model.Block_Bottom
|
||||
default:
|
||||
return "", fmt.Errorf("position is not supported")
|
||||
}
|
||||
|
||||
srcCol, err := pickColumn(s, req.BlockId)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("pick source column: %w", err)
|
||||
}
|
||||
|
||||
_, err = pickColumn(s, req.TargetId)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("pick target column: %w", err)
|
||||
}
|
||||
|
||||
tb, err := NewTable(s, req.TargetId)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("init table block: %w", err)
|
||||
}
|
||||
|
||||
newCol := srcCol.Copy()
|
||||
newCol.Model().Id = t.generateColID()
|
||||
if !s.Add(newCol) {
|
||||
return "", fmt.Errorf("add column block")
|
||||
}
|
||||
if err = s.InsertTo(req.TargetId, req.Position, newCol.Model().Id); err != nil {
|
||||
return "", fmt.Errorf("insert column: %w", err)
|
||||
}
|
||||
|
||||
colIdx := tb.MakeColumnIndex()
|
||||
|
||||
for _, rowID := range tb.RowIDs() {
|
||||
row, err := getRow(s, rowID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("get row %s: %w", rowID, err)
|
||||
}
|
||||
|
||||
var cellID string
|
||||
for _, id := range row.Model().ChildrenIds {
|
||||
_, colID, err := ParseCellID(id)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parse cell %s in row %s: %w", cellID, rowID, err)
|
||||
}
|
||||
if colID == req.BlockId {
|
||||
cellID = id
|
||||
break
|
||||
}
|
||||
}
|
||||
if cellID == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
cell := s.Pick(cellID)
|
||||
if cell == nil {
|
||||
return "", fmt.Errorf("cell %s is not found", cellID)
|
||||
}
|
||||
cell = cell.Copy()
|
||||
cell.Model().Id = MakeCellID(rowID, newCol.Model().Id)
|
||||
|
||||
if !s.Add(cell) {
|
||||
return "", fmt.Errorf("add cell block")
|
||||
}
|
||||
|
||||
row.Model().ChildrenIds = append(row.Model().ChildrenIds, cell.Model().Id)
|
||||
normalizeRow(nil, colIdx, row)
|
||||
}
|
||||
|
||||
return newCol.Model().Id, nil
|
||||
}
|
||||
|
||||
func (t *Editor) Expand(s *state.State, req pb.RpcBlockTableExpandRequest) error {
|
||||
tb, err := NewTable(s, req.TargetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init table block: %w", err)
|
||||
}
|
||||
|
||||
for i := uint32(0); i < req.Columns; i++ {
|
||||
_, err := t.ColumnCreate(s, pb.RpcBlockTableColumnCreateRequest{
|
||||
TargetId: req.TargetId,
|
||||
Position: model.Block_Inner,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("create column: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for i := uint32(0); i < req.Rows; i++ {
|
||||
rows := tb.Rows()
|
||||
_, err := t.RowCreate(s, pb.RpcBlockTableRowCreateRequest{
|
||||
TargetId: rows.ChildrenIds[len(rows.ChildrenIds)-1],
|
||||
Position: model.Block_Bottom,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("create row: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Editor) Sort(s *state.State, req pb.RpcBlockTableSortRequest) error {
|
||||
_, err := pickColumn(s, req.ColumnId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pick column: %w", err)
|
||||
}
|
||||
|
||||
tb, err := NewTable(s, req.ColumnId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init table block: %w", err)
|
||||
}
|
||||
|
||||
rows := s.Get(tb.Rows().Id)
|
||||
sorter := tableSorter{
|
||||
rowIDs: make([]string, 0, len(rows.Model().ChildrenIds)),
|
||||
values: make([]string, len(rows.Model().ChildrenIds)),
|
||||
}
|
||||
|
||||
var headers []string
|
||||
|
||||
var i int
|
||||
for _, rowID := range rows.Model().ChildrenIds {
|
||||
row, err := pickRow(s, rowID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pick row %s: %w", rowID, err)
|
||||
}
|
||||
if row.Model().GetTableRow().GetIsHeader() {
|
||||
headers = append(headers, rowID)
|
||||
continue
|
||||
}
|
||||
|
||||
sorter.rowIDs = append(sorter.rowIDs, rowID)
|
||||
for _, cellID := range row.Model().ChildrenIds {
|
||||
_, colID, err := ParseCellID(cellID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse cell id %s: %w", cellID, err)
|
||||
}
|
||||
if colID == req.ColumnId {
|
||||
cell := s.Pick(cellID)
|
||||
if cell == nil {
|
||||
return fmt.Errorf("cell %s is not found", cellID)
|
||||
}
|
||||
sorter.values[i] = cell.Model().GetText().GetText()
|
||||
}
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if req.Type == model.BlockContentDataviewSort_Asc {
|
||||
sort.Stable(sorter)
|
||||
} else {
|
||||
sort.Stable(sort.Reverse(sorter))
|
||||
}
|
||||
|
||||
// nolint:gocritic
|
||||
rows.Model().ChildrenIds = append(headers, sorter.rowIDs...)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Editor) CellCreate(s *state.State, rowID string, colID string, b *model.Block) (string, error) {
|
||||
tb, err := NewTable(s, rowID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("initialize table state: %w", err)
|
||||
}
|
||||
|
||||
row, err := getRow(s, rowID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("get row: %w", err)
|
||||
}
|
||||
if _, err = pickColumn(s, colID); err != nil {
|
||||
return "", fmt.Errorf("pick column: %w", err)
|
||||
}
|
||||
|
||||
cellID, err := addCell(s, rowID, colID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("add cell: %w", err)
|
||||
}
|
||||
cell := s.Get(cellID)
|
||||
cell.Model().Content = b.Content
|
||||
if err := s.InsertTo(rowID, model.Block_Inner, cellID); err != nil {
|
||||
return "", fmt.Errorf("insert to: %w", err)
|
||||
}
|
||||
|
||||
colIdx := tb.MakeColumnIndex()
|
||||
normalizeRow(nil, colIdx, row)
|
||||
|
||||
return cellID, nil
|
||||
}
|
||||
var ErrCannotMoveTableBlocks = fmt.Errorf("can not move table blocks")
|
||||
|
||||
type tableSorter struct {
|
||||
rowIDs []string
|
||||
|
@ -766,27 +35,6 @@ func (t tableSorter) Swap(i, j int) {
|
|||
t.rowIDs[i], t.rowIDs[j] = t.rowIDs[j], t.rowIDs[i]
|
||||
}
|
||||
|
||||
func (t *Editor) addColumnHeader(s *state.State) (string, error) {
|
||||
b := simple.New(&model.Block{
|
||||
Id: t.generateColID(),
|
||||
Content: &model.BlockContentOfTableColumn{
|
||||
TableColumn: &model.BlockContentTableColumn{},
|
||||
},
|
||||
})
|
||||
if !s.Add(b) {
|
||||
return "", fmt.Errorf("add column block")
|
||||
}
|
||||
return b.Model().Id, nil
|
||||
}
|
||||
|
||||
func (t *Editor) addRow(s *state.State) (string, error) {
|
||||
row := makeRow(t.generateRowID())
|
||||
if !s.Add(row) {
|
||||
return "", fmt.Errorf("add row block")
|
||||
}
|
||||
return row.Model().Id, nil
|
||||
}
|
||||
|
||||
func makeRow(id string) simple.Block {
|
||||
return simple.New(&model.Block{
|
||||
Id: id,
|
||||
|
@ -868,14 +116,7 @@ func NewTable(s *state.State, id string) (*Table, error) {
|
|||
s: s,
|
||||
}
|
||||
|
||||
next := s.Pick(id)
|
||||
for next != nil {
|
||||
if next.Model().GetTable() != nil {
|
||||
tb.block = next
|
||||
break
|
||||
}
|
||||
next = s.PickParentOf(next.Model().Id)
|
||||
}
|
||||
tb.block = PickTableRootBlock(s, id)
|
||||
if tb.block == nil {
|
||||
return nil, fmt.Errorf("root table block is not found")
|
||||
}
|
||||
|
@ -901,6 +142,19 @@ func NewTable(s *state.State, id string) (*Table, error) {
|
|||
return &tb, nil
|
||||
}
|
||||
|
||||
// PickTableRootBlock iterates over parents of block. Returns nil in case root table block is not found
|
||||
func PickTableRootBlock(s *state.State, id string) (block simple.Block) {
|
||||
next := s.Pick(id)
|
||||
for next != nil {
|
||||
if next.Model().GetTable() != nil {
|
||||
block = next
|
||||
break
|
||||
}
|
||||
next = s.PickParentOf(next.Model().Id)
|
||||
}
|
||||
return block
|
||||
}
|
||||
|
||||
// destructureDivs removes child dividers from block
|
||||
func destructureDivs(s *state.State, blockID string) {
|
||||
parent := s.Pick(blockID)
|
||||
|
@ -1006,3 +260,32 @@ func (tb Table) Iterate(f func(b simple.Block, pos CellPosition) bool) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckTableBlocksMove checks if Insert operation is allowed in case table blocks are affected
|
||||
func CheckTableBlocksMove(st *state.State, target string, pos model.BlockPosition, blockIds []string) (string, model.BlockPosition, error) {
|
||||
if t, err := NewTable(st, target); err == nil && t != nil {
|
||||
// we allow moving rows between each other
|
||||
if lo.Every(t.RowIDs(), append(blockIds, target)) {
|
||||
if pos == model.Block_Bottom || pos == model.Block_Top {
|
||||
return target, pos, nil
|
||||
}
|
||||
return "", 0, fmt.Errorf("failed to move rows: position should be Top or Bottom, got %s", model.BlockPosition_name[int32(pos)])
|
||||
}
|
||||
}
|
||||
|
||||
for _, id := range blockIds {
|
||||
t := PickTableRootBlock(st, id)
|
||||
if t != nil && t.Model().Id != id {
|
||||
// we should not move table blocks except table root block
|
||||
return "", 0, ErrCannotMoveTableBlocks
|
||||
}
|
||||
}
|
||||
|
||||
t := PickTableRootBlock(st, target)
|
||||
if t != nil && t.Model().Id != target {
|
||||
// if the target is one of table blocks, but not table root, we should insert blocks under the table
|
||||
return t.Model().Id, model.Block_Bottom, nil
|
||||
}
|
||||
|
||||
return target, pos, nil
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -4,7 +4,6 @@ package mock_treesyncer
|
|||
|
||||
import (
|
||||
app "github.com/anyproto/any-sync/app"
|
||||
domain "github.com/anyproto/anytype-heart/core/domain"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
|
@ -112,38 +111,37 @@ func (_c *MockSyncDetailsUpdater_Name_Call) RunAndReturn(run func() string) *Moc
|
|||
return _c
|
||||
}
|
||||
|
||||
// UpdateDetails provides a mock function with given fields: objectId, status, syncError, spaceId
|
||||
func (_m *MockSyncDetailsUpdater) UpdateDetails(objectId []string, status domain.ObjectSyncStatus, syncError domain.SyncError, spaceId string) {
|
||||
_m.Called(objectId, status, syncError, spaceId)
|
||||
// UpdateSpaceDetails provides a mock function with given fields: existing, missing, spaceId
|
||||
func (_m *MockSyncDetailsUpdater) UpdateSpaceDetails(existing []string, missing []string, spaceId string) {
|
||||
_m.Called(existing, missing, spaceId)
|
||||
}
|
||||
|
||||
// MockSyncDetailsUpdater_UpdateDetails_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateDetails'
|
||||
type MockSyncDetailsUpdater_UpdateDetails_Call struct {
|
||||
// MockSyncDetailsUpdater_UpdateSpaceDetails_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateSpaceDetails'
|
||||
type MockSyncDetailsUpdater_UpdateSpaceDetails_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// UpdateDetails is a helper method to define mock.On call
|
||||
// - objectId []string
|
||||
// - status domain.ObjectSyncStatus
|
||||
// - syncError domain.SyncError
|
||||
// UpdateSpaceDetails is a helper method to define mock.On call
|
||||
// - existing []string
|
||||
// - missing []string
|
||||
// - spaceId string
|
||||
func (_e *MockSyncDetailsUpdater_Expecter) UpdateDetails(objectId interface{}, status interface{}, syncError interface{}, spaceId interface{}) *MockSyncDetailsUpdater_UpdateDetails_Call {
|
||||
return &MockSyncDetailsUpdater_UpdateDetails_Call{Call: _e.mock.On("UpdateDetails", objectId, status, syncError, spaceId)}
|
||||
func (_e *MockSyncDetailsUpdater_Expecter) UpdateSpaceDetails(existing interface{}, missing interface{}, spaceId interface{}) *MockSyncDetailsUpdater_UpdateSpaceDetails_Call {
|
||||
return &MockSyncDetailsUpdater_UpdateSpaceDetails_Call{Call: _e.mock.On("UpdateSpaceDetails", existing, missing, spaceId)}
|
||||
}
|
||||
|
||||
func (_c *MockSyncDetailsUpdater_UpdateDetails_Call) Run(run func(objectId []string, status domain.ObjectSyncStatus, syncError domain.SyncError, spaceId string)) *MockSyncDetailsUpdater_UpdateDetails_Call {
|
||||
func (_c *MockSyncDetailsUpdater_UpdateSpaceDetails_Call) Run(run func(existing []string, missing []string, spaceId string)) *MockSyncDetailsUpdater_UpdateSpaceDetails_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].([]string), args[1].(domain.ObjectSyncStatus), args[2].(domain.SyncError), args[3].(string))
|
||||
run(args[0].([]string), args[1].([]string), args[2].(string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockSyncDetailsUpdater_UpdateDetails_Call) Return() *MockSyncDetailsUpdater_UpdateDetails_Call {
|
||||
func (_c *MockSyncDetailsUpdater_UpdateSpaceDetails_Call) Return() *MockSyncDetailsUpdater_UpdateSpaceDetails_Call {
|
||||
_c.Call.Return()
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockSyncDetailsUpdater_UpdateDetails_Call) RunAndReturn(run func([]string, domain.ObjectSyncStatus, domain.SyncError, string)) *MockSyncDetailsUpdater_UpdateDetails_Call {
|
||||
func (_c *MockSyncDetailsUpdater_UpdateSpaceDetails_Call) RunAndReturn(run func([]string, []string, string)) *MockSyncDetailsUpdater_UpdateSpaceDetails_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
|
|
@ -15,8 +15,6 @@ import (
|
|||
"github.com/anyproto/any-sync/net/streampool"
|
||||
"github.com/anyproto/any-sync/nodeconf"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/domain"
|
||||
)
|
||||
|
||||
var log = logger.NewNamed(treemanager.CName)
|
||||
|
@ -62,14 +60,9 @@ type SyncedTreeRemover interface {
|
|||
RemoveAllExcept(senderId string, differentRemoteIds []string)
|
||||
}
|
||||
|
||||
type PeerStatusChecker interface {
|
||||
app.Component
|
||||
IsPeerOffline(peerId string) bool
|
||||
}
|
||||
|
||||
type SyncDetailsUpdater interface {
|
||||
app.Component
|
||||
UpdateDetails(objectId []string, status domain.ObjectSyncStatus, syncError domain.SyncError, spaceId string)
|
||||
UpdateSpaceDetails(existing, missing []string, spaceId string)
|
||||
}
|
||||
|
||||
type treeSyncer struct {
|
||||
|
@ -84,7 +77,6 @@ type treeSyncer struct {
|
|||
treeManager treemanager.TreeManager
|
||||
isRunning bool
|
||||
isSyncing bool
|
||||
peerManager PeerStatusChecker
|
||||
nodeConf nodeconf.NodeConf
|
||||
syncedTreeRemover SyncedTreeRemover
|
||||
syncDetailsUpdater SyncDetailsUpdater
|
||||
|
@ -106,7 +98,6 @@ func NewTreeSyncer(spaceId string) treesyncer.TreeSyncer {
|
|||
func (t *treeSyncer) Init(a *app.App) (err error) {
|
||||
t.isSyncing = true
|
||||
t.treeManager = app.MustComponent[treemanager.TreeManager](a)
|
||||
t.peerManager = app.MustComponent[PeerStatusChecker](a)
|
||||
t.nodeConf = app.MustComponent[nodeconf.NodeConf](a)
|
||||
t.syncedTreeRemover = app.MustComponent[SyncedTreeRemover](a)
|
||||
t.syncDetailsUpdater = app.MustComponent[SyncDetailsUpdater](a)
|
||||
|
@ -161,13 +152,11 @@ func (t *treeSyncer) ShouldSync(peerId string) bool {
|
|||
return t.isSyncing
|
||||
}
|
||||
|
||||
func (t *treeSyncer) SyncAll(ctx context.Context, peerId string, existing, missing []string) error {
|
||||
func (t *treeSyncer) SyncAll(ctx context.Context, peerId string, existing, missing []string) (err error) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
var err error
|
||||
isResponsible := slices.Contains(t.nodeConf.NodeIds(t.spaceId), peerId)
|
||||
defer t.sendResultEvent(err, isResponsible, peerId, existing)
|
||||
t.sendSyncingEvent(peerId, existing, missing, isResponsible)
|
||||
t.sendSyncEvents(existing, missing, isResponsible)
|
||||
reqExec, exists := t.requestPools[peerId]
|
||||
if !exists {
|
||||
reqExec = newExecutor(t.requests, 0)
|
||||
|
@ -206,31 +195,15 @@ func (t *treeSyncer) SyncAll(ctx context.Context, peerId string, existing, missi
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *treeSyncer) sendSyncingEvent(peerId string, existing []string, missing []string, nodePeer bool) {
|
||||
func (t *treeSyncer) sendSyncEvents(existing, missing []string, nodePeer bool) {
|
||||
if !nodePeer {
|
||||
return
|
||||
}
|
||||
if t.peerManager.IsPeerOffline(peerId) {
|
||||
t.sendDetailsUpdates(existing, domain.ObjectError, domain.NetworkError)
|
||||
return
|
||||
}
|
||||
if len(existing) != 0 || len(missing) != 0 {
|
||||
t.sendDetailsUpdates(existing, domain.ObjectSyncing, domain.Null)
|
||||
}
|
||||
t.sendDetailsUpdates(existing, missing)
|
||||
}
|
||||
|
||||
func (t *treeSyncer) sendResultEvent(err error, nodePeer bool, peerId string, existing []string) {
|
||||
if nodePeer && !t.peerManager.IsPeerOffline(peerId) {
|
||||
if err != nil {
|
||||
t.sendDetailsUpdates(existing, domain.ObjectError, domain.NetworkError)
|
||||
} else {
|
||||
t.sendDetailsUpdates(existing, domain.ObjectSynced, domain.Null)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *treeSyncer) sendDetailsUpdates(existing []string, status domain.ObjectSyncStatus, syncError domain.SyncError) {
|
||||
t.syncDetailsUpdater.UpdateDetails(existing, status, syncError, t.spaceId)
|
||||
func (t *treeSyncer) sendDetailsUpdates(existing, missing []string) {
|
||||
t.syncDetailsUpdater.UpdateSpaceDetails(existing, missing, t.spaceId)
|
||||
}
|
||||
|
||||
func (t *treeSyncer) requestTree(peerId, id string) {
|
||||
|
@ -257,6 +230,7 @@ func (t *treeSyncer) updateTree(peerId, id string) {
|
|||
syncTree, ok := tr.(synctree.SyncTree)
|
||||
if !ok {
|
||||
log.Warn("not a sync tree")
|
||||
return
|
||||
}
|
||||
if err = syncTree.SyncWithPeer(ctx, peerId); err != nil {
|
||||
log.Warn("synctree.SyncWithPeer error", zap.Error(err))
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
"go.uber.org/mock/gomock"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/block/object/treesyncer/mock_treesyncer"
|
||||
"github.com/anyproto/anytype-heart/core/domain"
|
||||
"github.com/anyproto/anytype-heart/tests/testutil"
|
||||
)
|
||||
|
||||
|
@ -26,7 +25,6 @@ type fixture struct {
|
|||
missingMock *mock_objecttree.MockObjectTree
|
||||
existingMock *mock_synctree.MockSyncTree
|
||||
treeManager *mock_treemanager.MockTreeManager
|
||||
checker *mock_treesyncer.MockPeerStatusChecker
|
||||
nodeConf *mock_nodeconf.MockService
|
||||
syncStatus *mock_treesyncer.MockSyncedTreeRemover
|
||||
syncDetailsUpdater *mock_treesyncer.MockSyncDetailsUpdater
|
||||
|
@ -37,8 +35,6 @@ func newFixture(t *testing.T, spaceId string) *fixture {
|
|||
treeManager := mock_treemanager.NewMockTreeManager(ctrl)
|
||||
missingMock := mock_objecttree.NewMockObjectTree(ctrl)
|
||||
existingMock := mock_synctree.NewMockSyncTree(ctrl)
|
||||
checker := mock_treesyncer.NewMockPeerStatusChecker(t)
|
||||
checker.EXPECT().Name().Return("checker").Maybe()
|
||||
nodeConf := mock_nodeconf.NewMockService(ctrl)
|
||||
nodeConf.EXPECT().Name().Return("nodeConf").AnyTimes()
|
||||
syncStatus := mock_treesyncer.NewMockSyncedTreeRemover(t)
|
||||
|
@ -46,7 +42,6 @@ func newFixture(t *testing.T, spaceId string) *fixture {
|
|||
|
||||
a := new(app.App)
|
||||
a.Register(testutil.PrepareMock(context.Background(), a, treeManager)).
|
||||
Register(testutil.PrepareMock(context.Background(), a, checker)).
|
||||
Register(testutil.PrepareMock(context.Background(), a, syncStatus)).
|
||||
Register(testutil.PrepareMock(context.Background(), a, nodeConf)).
|
||||
Register(testutil.PrepareMock(context.Background(), a, syncDetailsUpdater))
|
||||
|
@ -59,7 +54,6 @@ func newFixture(t *testing.T, spaceId string) *fixture {
|
|||
missingMock: missingMock,
|
||||
existingMock: existingMock,
|
||||
treeManager: treeManager,
|
||||
checker: checker,
|
||||
nodeConf: nodeConf,
|
||||
syncStatus: syncStatus,
|
||||
syncDetailsUpdater: syncDetailsUpdater,
|
||||
|
@ -91,6 +85,25 @@ func TestTreeSyncer(t *testing.T) {
|
|||
fx.Close(ctx)
|
||||
})
|
||||
|
||||
t.Run("delayed sync notify sync status", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fx := newFixture(t, spaceId)
|
||||
fx.treeManager.EXPECT().GetTree(gomock.Any(), spaceId, existingId).Return(fx.existingMock, nil)
|
||||
fx.existingMock.EXPECT().SyncWithPeer(gomock.Any(), peerId).Return(nil)
|
||||
fx.treeManager.EXPECT().GetTree(gomock.Any(), spaceId, missingId).Return(fx.missingMock, nil)
|
||||
fx.nodeConf.EXPECT().NodeIds(spaceId).Return([]string{peerId})
|
||||
fx.syncDetailsUpdater.EXPECT().UpdateSpaceDetails([]string{existingId}, []string{missingId}, spaceId)
|
||||
fx.syncStatus.EXPECT().RemoveAllExcept(peerId, []string{existingId}).Return()
|
||||
err := fx.SyncAll(context.Background(), peerId, []string{existingId}, []string{missingId})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, fx.requestPools[peerId])
|
||||
require.NotNil(t, fx.headPools[peerId])
|
||||
|
||||
fx.StartSync()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
fx.Close(ctx)
|
||||
})
|
||||
|
||||
t.Run("sync after run", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fx := newFixture(t, spaceId)
|
||||
|
@ -189,45 +202,5 @@ func TestTreeSyncer(t *testing.T) {
|
|||
require.Equal(t, []string{"before close", "after done"}, events)
|
||||
mutex.Unlock()
|
||||
})
|
||||
t.Run("send offline event", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fx := newFixture(t, spaceId)
|
||||
fx.treeManager.EXPECT().GetTree(gomock.Any(), spaceId, existingId).Return(fx.existingMock, nil)
|
||||
fx.existingMock.EXPECT().SyncWithPeer(gomock.Any(), peerId).Return(nil)
|
||||
fx.treeManager.EXPECT().GetTree(gomock.Any(), spaceId, missingId).Return(fx.missingMock, nil)
|
||||
fx.nodeConf.EXPECT().NodeIds(spaceId).Return([]string{peerId})
|
||||
fx.checker.EXPECT().IsPeerOffline(peerId).Return(true)
|
||||
fx.syncStatus.EXPECT().RemoveAllExcept(peerId, []string{existingId}).Return()
|
||||
fx.syncDetailsUpdater.EXPECT().UpdateDetails([]string{"existing"}, domain.ObjectError, domain.NetworkError, "spaceId").Return()
|
||||
|
||||
fx.StartSync()
|
||||
err := fx.SyncAll(context.Background(), peerId, []string{existingId}, []string{missingId})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, fx.requestPools[peerId])
|
||||
require.NotNil(t, fx.headPools[peerId])
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
fx.Close(ctx)
|
||||
})
|
||||
t.Run("send syncing and synced event", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fx := newFixture(t, spaceId)
|
||||
fx.treeManager.EXPECT().GetTree(gomock.Any(), spaceId, existingId).Return(fx.existingMock, nil)
|
||||
fx.existingMock.EXPECT().SyncWithPeer(gomock.Any(), peerId).Return(nil)
|
||||
fx.treeManager.EXPECT().GetTree(gomock.Any(), spaceId, missingId).Return(fx.missingMock, nil)
|
||||
fx.nodeConf.EXPECT().NodeIds(spaceId).Return([]string{peerId})
|
||||
fx.checker.EXPECT().IsPeerOffline(peerId).Return(false)
|
||||
fx.syncStatus.EXPECT().RemoveAllExcept(peerId, []string{existingId}).Return()
|
||||
fx.syncDetailsUpdater.EXPECT().UpdateDetails([]string{"existing"}, domain.ObjectSynced, domain.Null, "spaceId").Return()
|
||||
fx.syncDetailsUpdater.EXPECT().UpdateDetails([]string{"existing"}, domain.ObjectSyncing, domain.Null, "spaceId").Return()
|
||||
|
||||
fx.StartSync()
|
||||
err := fx.SyncAll(context.Background(), peerId, []string{existingId}, []string{missingId})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, fx.requestPools[peerId])
|
||||
require.NotNil(t, fx.headPools[peerId])
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
fx.Close(ctx)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,53 +1,29 @@
|
|||
package domain
|
||||
|
||||
type SyncType int32
|
||||
|
||||
const (
|
||||
Objects SyncType = 0
|
||||
Files SyncType = 1
|
||||
)
|
||||
|
||||
type SpaceSyncStatus int32
|
||||
|
||||
const (
|
||||
Synced SpaceSyncStatus = 0
|
||||
Syncing SpaceSyncStatus = 1
|
||||
Error SpaceSyncStatus = 2
|
||||
Offline SpaceSyncStatus = 3
|
||||
Unknown SpaceSyncStatus = 4
|
||||
SpaceSyncStatusSynced SpaceSyncStatus = 0
|
||||
SpaceSyncStatusSyncing SpaceSyncStatus = 1
|
||||
SpaceSyncStatusError SpaceSyncStatus = 2
|
||||
SpaceSyncStatusOffline SpaceSyncStatus = 3
|
||||
SpaceSyncStatusUnknown SpaceSyncStatus = 4
|
||||
)
|
||||
|
||||
type ObjectSyncStatus int32
|
||||
|
||||
const (
|
||||
ObjectSynced ObjectSyncStatus = 0
|
||||
ObjectSyncing ObjectSyncStatus = 1
|
||||
ObjectError ObjectSyncStatus = 2
|
||||
ObjectQueued ObjectSyncStatus = 3
|
||||
ObjectSyncStatusSynced ObjectSyncStatus = 0
|
||||
ObjectSyncStatusSyncing ObjectSyncStatus = 1
|
||||
ObjectSyncStatusError ObjectSyncStatus = 2
|
||||
ObjectSyncStatusQueued ObjectSyncStatus = 3
|
||||
)
|
||||
|
||||
type SyncError int32
|
||||
|
||||
const (
|
||||
Null SyncError = 0
|
||||
StorageLimitExceed SyncError = 1
|
||||
IncompatibleVersion SyncError = 2
|
||||
NetworkError SyncError = 3
|
||||
Oversized SyncError = 4
|
||||
SyncErrorNull SyncError = 0
|
||||
SyncErrorIncompatibleVersion SyncError = 2
|
||||
SyncErrorNetworkError SyncError = 3
|
||||
SyncErrorOversized SyncError = 4
|
||||
)
|
||||
|
||||
type SpaceSync struct {
|
||||
SpaceId string
|
||||
Status SpaceSyncStatus
|
||||
SyncError SyncError
|
||||
SyncType SyncType
|
||||
}
|
||||
|
||||
func MakeSyncStatus(spaceId string, status SpaceSyncStatus, syncError SyncError, syncType SyncType) *SpaceSync {
|
||||
return &SpaceSync{
|
||||
SpaceId: spaceId,
|
||||
Status: status,
|
||||
SyncError: syncError,
|
||||
SyncType: syncType,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -304,8 +304,8 @@ func (s *service) makeInitialDetails(fileId domain.FileId, origin objectorigin.O
|
|||
// Use general file layout. It will be changed for proper layout after indexing
|
||||
bundle.RelationKeyLayout.String(): pbtypes.Int64(int64(model.ObjectType_file)),
|
||||
bundle.RelationKeyFileIndexingStatus.String(): pbtypes.Int64(int64(model.FileIndexingStatus_NotIndexed)),
|
||||
bundle.RelationKeySyncStatus.String(): pbtypes.Int64(int64(domain.ObjectQueued)),
|
||||
bundle.RelationKeySyncError.String(): pbtypes.Int64(int64(domain.Null)),
|
||||
bundle.RelationKeySyncStatus.String(): pbtypes.Int64(int64(domain.ObjectSyncStatusQueued)),
|
||||
bundle.RelationKeySyncError.String(): pbtypes.Int64(int64(domain.SyncErrorNull)),
|
||||
bundle.RelationKeyFileBackupStatus.String(): pbtypes.Int64(int64(filesyncstatus.Queued)),
|
||||
},
|
||||
}
|
||||
|
|
|
@ -97,6 +97,9 @@ func (s *fileSync) handleLimitReachedError(err error, it *QueueItem) *errLimitRe
|
|||
func (s *fileSync) uploadingHandler(ctx context.Context, it *QueueItem) (persistentqueue.Action, error) {
|
||||
spaceId, fileId := it.SpaceId, it.FileId
|
||||
err := s.uploadFile(ctx, spaceId, fileId, it.ObjectId)
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return persistentqueue.ActionRetry, nil
|
||||
}
|
||||
if isObjectDeletedError(err) {
|
||||
return persistentqueue.ActionDone, s.DeleteFile(it.ObjectId, it.FullFileId())
|
||||
}
|
||||
|
@ -143,6 +146,9 @@ func (s *fileSync) addToRetryUploadingQueue(it *QueueItem) persistentqueue.Actio
|
|||
func (s *fileSync) retryingHandler(ctx context.Context, it *QueueItem) (persistentqueue.Action, error) {
|
||||
spaceId, fileId := it.SpaceId, it.FileId
|
||||
err := s.uploadFile(ctx, spaceId, fileId, it.ObjectId)
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return persistentqueue.ActionRetry, nil
|
||||
}
|
||||
if isObjectDeletedError(err) {
|
||||
return persistentqueue.ActionDone, s.removeFromUploadingQueues(it.ObjectId)
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ type service struct {
|
|||
func (s *service) Init(a *app.App) (err error) {
|
||||
s.pool = a.MustComponent(pool.CName).(pool.Pool)
|
||||
s.peerStore = a.MustComponent(peerstore.CName).(peerstore.PeerStore)
|
||||
s.peerStore.AddObserver(func(peerId string, spaceIds []string) {
|
||||
s.peerStore.AddObserver(func(peerId string, _, spaceIds []string, peerRemoved bool) {
|
||||
select {
|
||||
case s.peerUpdateCh <- struct{}{}:
|
||||
default:
|
||||
|
|
|
@ -207,11 +207,11 @@ func (i *indexer) ReindexSpace(space clientspace.Space) (err error) {
|
|||
|
||||
func (i *indexer) addSyncDetails(space clientspace.Space) {
|
||||
typesForSyncRelations := helper.SyncRelationsSmartblockTypes()
|
||||
syncStatus := domain.ObjectSynced
|
||||
syncError := domain.Null
|
||||
syncStatus := domain.ObjectSyncStatusSynced
|
||||
syncError := domain.SyncErrorNull
|
||||
if i.config.IsLocalOnlyMode() {
|
||||
syncStatus = domain.ObjectError
|
||||
syncError = domain.NetworkError
|
||||
syncStatus = domain.ObjectSyncStatusError
|
||||
syncError = domain.SyncErrorNetworkError
|
||||
}
|
||||
ids, err := i.getIdsForTypes(space, typesForSyncRelations...)
|
||||
if err != nil {
|
||||
|
|
|
@ -2,22 +2,28 @@ package peerstatus
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/anyproto/any-sync/app"
|
||||
"github.com/anyproto/any-sync/net/pool"
|
||||
"github.com/samber/lo"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/event"
|
||||
"github.com/anyproto/anytype-heart/core/session"
|
||||
"github.com/anyproto/anytype-heart/pb"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/logging"
|
||||
"github.com/anyproto/anytype-heart/space/spacecore/peerstore"
|
||||
)
|
||||
|
||||
const CName = "core.syncstatus.p2p"
|
||||
|
||||
var log = logging.Logger(CName)
|
||||
|
||||
type Status int32
|
||||
|
||||
var ErrClosed = errors.New("component is closing")
|
||||
|
||||
const (
|
||||
Unknown Status = 0
|
||||
Connected Status = 1
|
||||
|
@ -25,6 +31,19 @@ const (
|
|||
NotConnected Status = 3
|
||||
)
|
||||
|
||||
func (s Status) ToPb() pb.EventP2PStatusStatus {
|
||||
switch s {
|
||||
case Connected:
|
||||
return pb.EventP2PStatus_Connected
|
||||
case NotConnected:
|
||||
return pb.EventP2PStatus_NotConnected
|
||||
case NotPossible:
|
||||
return pb.EventP2PStatus_NotPossible
|
||||
}
|
||||
// default status is NotConnected
|
||||
return pb.EventP2PStatus_NotConnected
|
||||
}
|
||||
|
||||
type LocalDiscoveryHook interface {
|
||||
app.Component
|
||||
RegisterP2PNotPossible(hook func())
|
||||
|
@ -33,39 +52,35 @@ type LocalDiscoveryHook interface {
|
|||
|
||||
type PeerToPeerStatus interface {
|
||||
app.ComponentRunnable
|
||||
SendNotPossibleStatus()
|
||||
CheckPeerStatus()
|
||||
ResetNotPossibleStatus()
|
||||
RegisterSpace(spaceId string)
|
||||
UnregisterSpace(spaceId string)
|
||||
}
|
||||
|
||||
type spaceStatus struct {
|
||||
status Status
|
||||
connectionsCount int64
|
||||
}
|
||||
|
||||
type p2pStatus struct {
|
||||
spaceIds map[string]struct{}
|
||||
spaceIds map[string]*spaceStatus
|
||||
eventSender event.Sender
|
||||
contextCancel context.CancelFunc
|
||||
ctx context.Context
|
||||
peerStore peerstore.PeerStore
|
||||
|
||||
sync.Mutex
|
||||
status Status
|
||||
connectionsCount int64
|
||||
|
||||
forceCheckSpace chan struct{}
|
||||
updateStatus chan Status
|
||||
resetNotPossibleStatus chan struct{}
|
||||
finish chan struct{}
|
||||
p2pNotPossible bool // global flag means p2p is not possible because of network
|
||||
workerFinished chan struct{}
|
||||
refreshSpaceId chan string
|
||||
|
||||
peersConnectionPool pool.Pool
|
||||
}
|
||||
|
||||
func New() PeerToPeerStatus {
|
||||
p2pStatusService := &p2pStatus{
|
||||
forceCheckSpace: make(chan struct{}, 1),
|
||||
updateStatus: make(chan Status, 1),
|
||||
resetNotPossibleStatus: make(chan struct{}, 1),
|
||||
finish: make(chan struct{}),
|
||||
spaceIds: make(map[string]struct{}),
|
||||
workerFinished: make(chan struct{}),
|
||||
refreshSpaceId: make(chan string),
|
||||
spaceIds: make(map[string]*spaceStatus),
|
||||
}
|
||||
|
||||
return p2pStatusService
|
||||
|
@ -77,20 +92,35 @@ func (p *p2pStatus) Init(a *app.App) (err error) {
|
|||
p.peersConnectionPool = app.MustComponent[pool.Service](a)
|
||||
localDiscoveryHook := app.MustComponent[LocalDiscoveryHook](a)
|
||||
sessionHookRunner := app.MustComponent[session.HookRunner](a)
|
||||
localDiscoveryHook.RegisterP2PNotPossible(p.SendNotPossibleStatus)
|
||||
localDiscoveryHook.RegisterResetNotPossible(p.ResetNotPossibleStatus)
|
||||
localDiscoveryHook.RegisterP2PNotPossible(p.setNotPossibleStatus)
|
||||
localDiscoveryHook.RegisterResetNotPossible(p.resetNotPossibleStatus)
|
||||
sessionHookRunner.RegisterHook(p.sendStatusForNewSession)
|
||||
p.ctx, p.contextCancel = context.WithCancel(context.Background())
|
||||
p.peerStore.AddObserver(func(peerId string, spaceIdsBefore, spaceIdsAfter []string, peerRemoved bool) {
|
||||
// we need to update status for all spaces that were either added or removed to some local peer
|
||||
// because we start this observer on init we can be sure that the spaceIdsBefore is empty on the first run for peer
|
||||
removed, added := lo.Difference(spaceIdsBefore, spaceIdsAfter)
|
||||
err := p.refreshSpaces(lo.Union(removed, added))
|
||||
if errors.Is(err, ErrClosed) {
|
||||
return
|
||||
} else if err != nil {
|
||||
log.Errorf("refreshSpaces failed: %v", err)
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *p2pStatus) sendStatusForNewSession(ctx session.Context) error {
|
||||
p.sendStatus(p.status)
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
for spaceId, space := range p.spaceIds {
|
||||
p.sendEvent(ctx.ID(), spaceId, space.status.ToPb(), space.connectionsCount)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *p2pStatus) Run(ctx context.Context) error {
|
||||
p.ctx, p.contextCancel = context.WithCancel(context.Background())
|
||||
go p.checkP2PDevices()
|
||||
go p.worker()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -98,7 +128,7 @@ func (p *p2pStatus) Close(ctx context.Context) error {
|
|||
if p.contextCancel != nil {
|
||||
p.contextCancel()
|
||||
}
|
||||
<-p.finish
|
||||
<-p.workerFinished
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -106,156 +136,145 @@ func (p *p2pStatus) Name() (name string) {
|
|||
return CName
|
||||
}
|
||||
|
||||
func (p *p2pStatus) CheckPeerStatus() {
|
||||
p.forceCheckSpace <- struct{}{}
|
||||
}
|
||||
|
||||
func (p *p2pStatus) SendNotPossibleStatus() {
|
||||
p.updateStatus <- NotPossible
|
||||
}
|
||||
|
||||
func (p *p2pStatus) ResetNotPossibleStatus() {
|
||||
p.resetNotPossibleStatus <- struct{}{}
|
||||
}
|
||||
|
||||
func (p *p2pStatus) RegisterSpace(spaceId string) {
|
||||
func (p *p2pStatus) setNotPossibleStatus() {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
p.spaceIds[spaceId] = struct{}{}
|
||||
connection := p.connectionsCount
|
||||
|
||||
p.eventSender.Broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{
|
||||
{
|
||||
Value: &pb.EventMessageValueOfP2PStatusUpdate{
|
||||
P2PStatusUpdate: &pb.EventP2PStatusUpdate{
|
||||
SpaceId: spaceId,
|
||||
Status: p.mapStatusToEvent(p.status),
|
||||
DevicesCounter: connection,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
if p.p2pNotPossible {
|
||||
p.Unlock()
|
||||
return
|
||||
}
|
||||
p.p2pNotPossible = true
|
||||
p.Unlock()
|
||||
p.refreshAllSpaces()
|
||||
}
|
||||
|
||||
func (p *p2pStatus) resetNotPossibleStatus() {
|
||||
p.Lock()
|
||||
if !p.p2pNotPossible {
|
||||
p.Unlock()
|
||||
return
|
||||
}
|
||||
p.p2pNotPossible = false
|
||||
p.Unlock()
|
||||
p.refreshAllSpaces()
|
||||
}
|
||||
|
||||
// RegisterSpace registers spaceId to be monitored for p2p status changes
|
||||
// must be called only when p2pStatus is Running
|
||||
func (p *p2pStatus) RegisterSpace(spaceId string) {
|
||||
select {
|
||||
case <-p.ctx.Done():
|
||||
return
|
||||
case p.refreshSpaceId <- spaceId:
|
||||
}
|
||||
}
|
||||
|
||||
// UnregisterSpace unregisters spaceId from monitoring
|
||||
// must be called only when p2pStatus is Running
|
||||
func (p *p2pStatus) UnregisterSpace(spaceId string) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
delete(p.spaceIds, spaceId)
|
||||
}
|
||||
|
||||
func (p *p2pStatus) checkP2PDevices() {
|
||||
defer close(p.finish)
|
||||
timer := time.NewTicker(10 * time.Second)
|
||||
defer timer.Stop()
|
||||
p.updateSpaceP2PStatus()
|
||||
func (p *p2pStatus) worker() {
|
||||
defer close(p.workerFinished)
|
||||
for {
|
||||
select {
|
||||
case <-p.ctx.Done():
|
||||
return
|
||||
case <-timer.C:
|
||||
p.updateSpaceP2PStatus()
|
||||
case <-p.forceCheckSpace:
|
||||
p.updateSpaceP2PStatus()
|
||||
case newStatus := <-p.updateStatus:
|
||||
p.sendStatus(newStatus)
|
||||
case <-p.resetNotPossibleStatus:
|
||||
p.resetNotPossible()
|
||||
case spaceId := <-p.refreshSpaceId:
|
||||
p.processSpaceStatusUpdate(spaceId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *p2pStatus) updateSpaceP2PStatus() {
|
||||
func (p *p2pStatus) refreshAllSpaces() {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
connectionCount := p.countOpenConnections()
|
||||
newStatus, event := p.getResultStatus(connectionCount)
|
||||
if newStatus == NotPossible {
|
||||
return
|
||||
var spaceIds = make([]string, 0, len(p.spaceIds))
|
||||
for spaceId := range p.spaceIds {
|
||||
spaceIds = append(spaceIds, spaceId)
|
||||
}
|
||||
if p.status != newStatus || p.connectionsCount != connectionCount {
|
||||
p.sendEvent(event, connectionCount)
|
||||
p.status = newStatus
|
||||
p.connectionsCount = connectionCount
|
||||
p.Unlock()
|
||||
err := p.refreshSpaces(spaceIds)
|
||||
if errors.Is(err, ErrClosed) {
|
||||
return
|
||||
} else if err != nil {
|
||||
log.Errorf("refreshSpaces failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *p2pStatus) getResultStatus(connectionCount int64) (Status, pb.EventP2PStatusStatus) {
|
||||
func (p *p2pStatus) refreshSpaces(spaceIds []string) error {
|
||||
for _, spaceId := range spaceIds {
|
||||
select {
|
||||
case <-p.ctx.Done():
|
||||
return ErrClosed
|
||||
case p.refreshSpaceId <- spaceId:
|
||||
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateSpaceP2PStatus updates status for specific spaceId and sends event if status changed
|
||||
func (p *p2pStatus) processSpaceStatusUpdate(spaceId string) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
var (
|
||||
newStatus Status
|
||||
event pb.EventP2PStatusStatus
|
||||
currentStatus *spaceStatus
|
||||
ok bool
|
||||
)
|
||||
if p.status == NotPossible && connectionCount == 0 {
|
||||
return NotPossible, pb.EventP2PStatus_NotPossible
|
||||
if currentStatus, ok = p.spaceIds[spaceId]; !ok {
|
||||
currentStatus = &spaceStatus{
|
||||
status: Unknown,
|
||||
connectionsCount: 0,
|
||||
}
|
||||
|
||||
p.spaceIds[spaceId] = currentStatus
|
||||
}
|
||||
connectionCount := p.countOpenConnections(spaceId)
|
||||
newStatus := p.getResultStatus(p.p2pNotPossible, connectionCount)
|
||||
|
||||
if currentStatus.status != newStatus || currentStatus.connectionsCount != connectionCount {
|
||||
p.sendEvent("", spaceId, newStatus.ToPb(), connectionCount)
|
||||
currentStatus.status = newStatus
|
||||
currentStatus.connectionsCount = connectionCount
|
||||
}
|
||||
}
|
||||
|
||||
func (p *p2pStatus) getResultStatus(notPossible bool, connectionCount int64) Status {
|
||||
if notPossible && connectionCount == 0 {
|
||||
return NotPossible
|
||||
}
|
||||
if connectionCount == 0 {
|
||||
event = pb.EventP2PStatus_NotConnected
|
||||
newStatus = NotConnected
|
||||
return NotConnected
|
||||
} else {
|
||||
event = pb.EventP2PStatus_Connected
|
||||
newStatus = Connected
|
||||
return Connected
|
||||
}
|
||||
return newStatus, event
|
||||
}
|
||||
func (p *p2pStatus) countOpenConnections() int64 {
|
||||
var connectionCount int64
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*20)
|
||||
defer cancelFunc()
|
||||
peerIds := p.peerStore.AllLocalPeers()
|
||||
for _, peerId := range peerIds {
|
||||
_, err := p.peersConnectionPool.Pick(ctx, peerId)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
connectionCount++
|
||||
}
|
||||
return connectionCount
|
||||
}
|
||||
|
||||
func (p *p2pStatus) sendStatus(status Status) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
pbStatus := p.mapStatusToEvent(status)
|
||||
p.status = status
|
||||
p.sendEvent(pbStatus, p.connectionsCount)
|
||||
func (p *p2pStatus) countOpenConnections(spaceId string) int64 {
|
||||
peerIds := p.peerStore.LocalPeerIds(spaceId)
|
||||
return int64(len(peerIds))
|
||||
}
|
||||
|
||||
func (p *p2pStatus) mapStatusToEvent(status Status) pb.EventP2PStatusStatus {
|
||||
var pbStatus pb.EventP2PStatusStatus
|
||||
switch status {
|
||||
case Connected:
|
||||
pbStatus = pb.EventP2PStatus_Connected
|
||||
case NotConnected:
|
||||
pbStatus = pb.EventP2PStatus_NotConnected
|
||||
case NotPossible:
|
||||
pbStatus = pb.EventP2PStatus_NotPossible
|
||||
}
|
||||
return pbStatus
|
||||
}
|
||||
|
||||
func (p *p2pStatus) sendEvent(status pb.EventP2PStatusStatus, count int64) {
|
||||
for spaceId := range p.spaceIds {
|
||||
p.eventSender.Broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{
|
||||
{
|
||||
Value: &pb.EventMessageValueOfP2PStatusUpdate{
|
||||
P2PStatusUpdate: &pb.EventP2PStatusUpdate{
|
||||
SpaceId: spaceId,
|
||||
Status: status,
|
||||
DevicesCounter: count,
|
||||
},
|
||||
// sendEvent sends event to session with sessionToken or broadcast to all sessions if sessionToken is empty
|
||||
func (p *p2pStatus) sendEvent(sessionToken string, spaceId string, status pb.EventP2PStatusStatus, count int64) {
|
||||
event := &pb.Event{
|
||||
Messages: []*pb.EventMessage{
|
||||
{
|
||||
Value: &pb.EventMessageValueOfP2PStatusUpdate{
|
||||
P2PStatusUpdate: &pb.EventP2PStatusUpdate{
|
||||
SpaceId: spaceId,
|
||||
Status: status,
|
||||
DevicesCounter: count,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *p2pStatus) resetNotPossible() {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
if p.status == NotPossible {
|
||||
p.status = NotConnected
|
||||
if sessionToken != "" {
|
||||
p.eventSender.SendToSession(sessionToken, event)
|
||||
return
|
||||
}
|
||||
p.eventSender.Broadcast(event)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package peerstatus
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -22,7 +23,7 @@ import (
|
|||
)
|
||||
|
||||
type fixture struct {
|
||||
PeerToPeerStatus
|
||||
*p2pStatus
|
||||
sender *mock_event.MockSender
|
||||
service *mock_nodeconf.MockService
|
||||
store peerstore.PeerStore
|
||||
|
@ -35,9 +36,6 @@ func TestP2PStatus_Init(t *testing.T) {
|
|||
// given
|
||||
f := newFixture(t, "spaceId", pb.EventP2PStatus_NotConnected, 1)
|
||||
|
||||
// when
|
||||
f.Run(nil)
|
||||
|
||||
// then
|
||||
f.Close(nil)
|
||||
})
|
||||
|
@ -47,7 +45,6 @@ func TestP2pStatus_SendNewStatus(t *testing.T) {
|
|||
t.Run("send NotPossible status", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixture(t, "spaceId", pb.EventP2PStatus_NotConnected, 1)
|
||||
f.Run(nil)
|
||||
|
||||
// when
|
||||
f.sender.EXPECT().Broadcast(&pb.Event{
|
||||
|
@ -63,16 +60,34 @@ func TestP2pStatus_SendNewStatus(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})
|
||||
f.SendNotPossibleStatus()
|
||||
|
||||
f.setNotPossibleStatus()
|
||||
|
||||
// then
|
||||
status := f.PeerToPeerStatus.(*p2pStatus)
|
||||
assert.NotNil(t, status)
|
||||
err := waitForStatus(status, NotPossible)
|
||||
|
||||
err := waitForStatus("spaceId", f.p2pStatus, NotPossible)
|
||||
assert.Nil(t, err)
|
||||
|
||||
f.CheckPeerStatus()
|
||||
err = waitForStatus(status, NotPossible)
|
||||
// when
|
||||
f.sender.EXPECT().Broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{
|
||||
{
|
||||
Value: &pb.EventMessageValueOfP2PStatusUpdate{
|
||||
P2PStatusUpdate: &pb.EventP2PStatusUpdate{
|
||||
SpaceId: "spaceId",
|
||||
Status: pb.EventP2PStatus_NotConnected,
|
||||
DevicesCounter: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
f.resetNotPossibleStatus()
|
||||
|
||||
err = f.refreshSpaces([]string{"spaceId"})
|
||||
assert.Nil(t, err)
|
||||
|
||||
checkStatus(t, "spaceId", f.p2pStatus, NotConnected)
|
||||
|
||||
assert.Nil(t, err)
|
||||
f.Close(nil)
|
||||
|
@ -81,13 +96,10 @@ func TestP2pStatus_SendNewStatus(t *testing.T) {
|
|||
// given
|
||||
f := newFixture(t, "spaceId", pb.EventP2PStatus_NotConnected, 1)
|
||||
|
||||
// when
|
||||
f.Run(nil)
|
||||
|
||||
// then
|
||||
status := f.PeerToPeerStatus.(*p2pStatus)
|
||||
status := f.p2pStatus
|
||||
assert.NotNil(t, status)
|
||||
err := waitForStatus(status, NotConnected)
|
||||
err := waitForStatus("spaceId", status, NotConnected)
|
||||
assert.Nil(t, err)
|
||||
f.Close(nil)
|
||||
})
|
||||
|
@ -104,8 +116,6 @@ func TestP2pStatus_SendPeerUpdate(t *testing.T) {
|
|||
err := f.pool.AddPeer(context.Background(), peer)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// when
|
||||
f.Run(nil)
|
||||
f.sender.EXPECT().Broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{
|
||||
{
|
||||
|
@ -119,19 +129,111 @@ func TestP2pStatus_SendPeerUpdate(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})
|
||||
f.CheckPeerStatus()
|
||||
|
||||
// then
|
||||
f.Close(nil)
|
||||
|
||||
status := f.PeerToPeerStatus.(*p2pStatus)
|
||||
assert.NotNil(t, status)
|
||||
err = waitForStatus(status, Connected)
|
||||
assert.Nil(t, err)
|
||||
checkStatus(t, "spaceId", f.p2pStatus, Connected)
|
||||
// should not create a problem, cause we already closed
|
||||
f.store.RemoveLocalPeer("peerId")
|
||||
|
||||
})
|
||||
t.Run("send NotConnected status, because we have peer were disconnected", func(t *testing.T) {
|
||||
t.Run("send NotConnected status, because we have peer and then were disconnected", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixture(t, "spaceId", pb.EventP2PStatus_Connected, 1)
|
||||
ctrl := gomock.NewController(t)
|
||||
peer := mock_peer.NewMockPeer(ctrl)
|
||||
peer.EXPECT().Id().Return("peerId")
|
||||
f.sender.EXPECT().Broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{
|
||||
{
|
||||
Value: &pb.EventMessageValueOfP2PStatusUpdate{
|
||||
P2PStatusUpdate: &pb.EventP2PStatusUpdate{
|
||||
SpaceId: "spaceId",
|
||||
Status: pb.EventP2PStatus_Connected,
|
||||
DevicesCounter: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
err := f.pool.AddPeer(context.Background(), peer)
|
||||
assert.Nil(t, err)
|
||||
f.store.UpdateLocalPeer("peerId", []string{"spaceId"})
|
||||
checkStatus(t, "spaceId", f.p2pStatus, Connected)
|
||||
|
||||
f.sender.EXPECT().Broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{
|
||||
{
|
||||
Value: &pb.EventMessageValueOfP2PStatusUpdate{
|
||||
P2PStatusUpdate: &pb.EventP2PStatusUpdate{
|
||||
SpaceId: "spaceId",
|
||||
Status: pb.EventP2PStatus_NotConnected,
|
||||
DevicesCounter: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
f.store.RemoveLocalPeer("peerId")
|
||||
|
||||
checkStatus(t, "spaceId", f.p2pStatus, NotConnected)
|
||||
|
||||
// then
|
||||
f.Close(nil)
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
t.Run("connection was not possible, but after a while starts working", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixture(t, "spaceId", pb.EventP2PStatus_NotConnected, 1)
|
||||
|
||||
// when
|
||||
f.sender.EXPECT().Broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{
|
||||
{
|
||||
Value: &pb.EventMessageValueOfP2PStatusUpdate{
|
||||
P2PStatusUpdate: &pb.EventP2PStatusUpdate{
|
||||
SpaceId: "spaceId",
|
||||
Status: pb.EventP2PStatus_NotPossible,
|
||||
DevicesCounter: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
f.setNotPossibleStatus()
|
||||
checkStatus(t, "spaceId", f.p2pStatus, NotPossible)
|
||||
|
||||
f.store.UpdateLocalPeer("peerId", []string{"spaceId"})
|
||||
ctrl := gomock.NewController(t)
|
||||
peer := mock_peer.NewMockPeer(ctrl)
|
||||
peer.EXPECT().Id().Return("peerId")
|
||||
f.sender.EXPECT().Broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{
|
||||
{
|
||||
Value: &pb.EventMessageValueOfP2PStatusUpdate{
|
||||
P2PStatusUpdate: &pb.EventP2PStatusUpdate{
|
||||
SpaceId: "spaceId",
|
||||
Status: pb.EventP2PStatus_Connected,
|
||||
DevicesCounter: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
err := f.pool.AddPeer(context.Background(), peer)
|
||||
assert.Nil(t, err)
|
||||
|
||||
checkStatus(t, "spaceId", f.p2pStatus, Connected)
|
||||
// then
|
||||
f.Close(nil)
|
||||
})
|
||||
t.Run("no peers were connected, but after a while one is connected", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixture(t, "spaceId", pb.EventP2PStatus_NotConnected, 1)
|
||||
|
||||
// when
|
||||
checkStatus(t, "spaceId", f.p2pStatus, NotConnected)
|
||||
|
||||
f.store.UpdateLocalPeer("peerId", []string{"spaceId"})
|
||||
ctrl := gomock.NewController(t)
|
||||
peer := mock_peer.NewMockPeer(ctrl)
|
||||
|
@ -139,8 +241,6 @@ func TestP2pStatus_SendPeerUpdate(t *testing.T) {
|
|||
err := f.pool.AddPeer(context.Background(), peer)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// when
|
||||
f.Run(nil)
|
||||
f.sender.EXPECT().Broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{
|
||||
{
|
||||
|
@ -154,139 +254,16 @@ func TestP2pStatus_SendPeerUpdate(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})
|
||||
err = waitForStatus(f.PeerToPeerStatus.(*p2pStatus), Connected)
|
||||
assert.Nil(t, err)
|
||||
|
||||
f.store.RemoveLocalPeer("peerId")
|
||||
f.sender.EXPECT().Broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{
|
||||
{
|
||||
Value: &pb.EventMessageValueOfP2PStatusUpdate{
|
||||
P2PStatusUpdate: &pb.EventP2PStatusUpdate{
|
||||
SpaceId: "spaceId",
|
||||
Status: pb.EventP2PStatus_NotConnected,
|
||||
DevicesCounter: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
f.CheckPeerStatus()
|
||||
err = waitForStatus(f.PeerToPeerStatus.(*p2pStatus), NotConnected)
|
||||
assert.Nil(t, err)
|
||||
checkStatus(t, "spaceId", f.p2pStatus, Connected)
|
||||
|
||||
// then
|
||||
f.Close(nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
status := f.PeerToPeerStatus.(*p2pStatus)
|
||||
assert.NotNil(t, status)
|
||||
err = waitForStatus(status, NotConnected)
|
||||
})
|
||||
t.Run("connection was not possible, but after a while starts working", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixture(t, "spaceId", pb.EventP2PStatus_NotConnected, 1)
|
||||
|
||||
// when
|
||||
f.Run(nil)
|
||||
|
||||
f.sender.EXPECT().Broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{
|
||||
{
|
||||
Value: &pb.EventMessageValueOfP2PStatusUpdate{
|
||||
P2PStatusUpdate: &pb.EventP2PStatusUpdate{
|
||||
SpaceId: "spaceId",
|
||||
Status: pb.EventP2PStatus_NotPossible,
|
||||
DevicesCounter: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
f.SendNotPossibleStatus()
|
||||
err := waitForStatus(f.PeerToPeerStatus.(*p2pStatus), NotPossible)
|
||||
assert.Nil(t, err)
|
||||
|
||||
f.store.UpdateLocalPeer("peerId", []string{"spaceId"})
|
||||
ctrl := gomock.NewController(t)
|
||||
peer := mock_peer.NewMockPeer(ctrl)
|
||||
peer.EXPECT().Id().Return("peerId")
|
||||
err = f.pool.AddPeer(context.Background(), peer)
|
||||
assert.Nil(t, err)
|
||||
|
||||
f.sender.EXPECT().Broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{
|
||||
{
|
||||
Value: &pb.EventMessageValueOfP2PStatusUpdate{
|
||||
P2PStatusUpdate: &pb.EventP2PStatusUpdate{
|
||||
SpaceId: "spaceId",
|
||||
Status: pb.EventP2PStatus_Connected,
|
||||
DevicesCounter: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
f.CheckPeerStatus()
|
||||
err = waitForStatus(f.PeerToPeerStatus.(*p2pStatus), Connected)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// then
|
||||
f.Close(nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
status := f.PeerToPeerStatus.(*p2pStatus)
|
||||
assert.NotNil(t, status)
|
||||
checkStatus(t, status, Connected)
|
||||
})
|
||||
t.Run("no peers were connected, but after a while one is connected", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixture(t, "spaceId", pb.EventP2PStatus_NotConnected, 1)
|
||||
|
||||
// when
|
||||
f.Run(nil)
|
||||
|
||||
err := waitForStatus(f.PeerToPeerStatus.(*p2pStatus), NotConnected)
|
||||
|
||||
f.store.UpdateLocalPeer("peerId", []string{"spaceId"})
|
||||
ctrl := gomock.NewController(t)
|
||||
peer := mock_peer.NewMockPeer(ctrl)
|
||||
peer.EXPECT().Id().Return("peerId")
|
||||
err = f.pool.AddPeer(context.Background(), peer)
|
||||
assert.Nil(t, err)
|
||||
|
||||
f.sender.EXPECT().Broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{
|
||||
{
|
||||
Value: &pb.EventMessageValueOfP2PStatusUpdate{
|
||||
P2PStatusUpdate: &pb.EventP2PStatusUpdate{
|
||||
SpaceId: "spaceId",
|
||||
Status: pb.EventP2PStatus_Connected,
|
||||
DevicesCounter: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
f.CheckPeerStatus()
|
||||
err = waitForStatus(f.PeerToPeerStatus.(*p2pStatus), Connected)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// then
|
||||
f.Close(nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
status := f.PeerToPeerStatus.(*p2pStatus)
|
||||
assert.NotNil(t, status)
|
||||
checkStatus(t, status, Connected)
|
||||
})
|
||||
t.Run("reset not possible status", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixture(t, "spaceId", pb.EventP2PStatus_NotConnected, 1)
|
||||
|
||||
// when
|
||||
f.Run(nil)
|
||||
|
||||
f.sender.EXPECT().Broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{
|
||||
{
|
||||
|
@ -300,10 +277,13 @@ func TestP2pStatus_SendPeerUpdate(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})
|
||||
f.SendNotPossibleStatus()
|
||||
status := f.PeerToPeerStatus.(*p2pStatus)
|
||||
assert.NotNil(t, status)
|
||||
err := waitForStatus(status, NotPossible)
|
||||
f.setNotPossibleStatus()
|
||||
checkStatus(t, "spaceId", f.p2pStatus, NotPossible)
|
||||
|
||||
// double set should not generate new event
|
||||
f.setNotPossibleStatus()
|
||||
checkStatus(t, "spaceId", f.p2pStatus, NotPossible)
|
||||
|
||||
f.sender.EXPECT().Broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{
|
||||
{
|
||||
|
@ -317,35 +297,72 @@ func TestP2pStatus_SendPeerUpdate(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})
|
||||
f.ResetNotPossibleStatus()
|
||||
err = waitForStatus(status, NotConnected)
|
||||
assert.Nil(t, err)
|
||||
|
||||
f.resetNotPossibleStatus()
|
||||
checkStatus(t, "spaceId", f.p2pStatus, NotConnected)
|
||||
// then
|
||||
f.Close(nil)
|
||||
assert.Nil(t, err)
|
||||
checkStatus(t, status, NotConnected)
|
||||
})
|
||||
t.Run("don't reset not possible status, because status != NotPossible", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixture(t, "spaceId", pb.EventP2PStatus_NotConnected, 1)
|
||||
|
||||
// when
|
||||
f.Run(nil)
|
||||
|
||||
status := f.PeerToPeerStatus.(*p2pStatus)
|
||||
|
||||
err := waitForStatus(status, NotConnected)
|
||||
f.ResetNotPossibleStatus()
|
||||
err = waitForStatus(status, NotConnected)
|
||||
checkStatus(t, "spaceId", f.p2pStatus, NotConnected)
|
||||
|
||||
f.resetNotPossibleStatus()
|
||||
checkStatus(t, "spaceId", f.p2pStatus, NotConnected)
|
||||
// then
|
||||
f.Close(nil)
|
||||
assert.Nil(t, err)
|
||||
checkStatus(t, status, NotConnected)
|
||||
checkStatus(t, "spaceId", f.p2pStatus, NotConnected)
|
||||
})
|
||||
}
|
||||
|
||||
func TestP2pStatus_SendToNewSession(t *testing.T) {
|
||||
t.Run("send event only to new session", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixture(t, "spaceId", pb.EventP2PStatus_Connected, 1)
|
||||
ctrl := gomock.NewController(t)
|
||||
peer := mock_peer.NewMockPeer(ctrl)
|
||||
peer.EXPECT().Id().Return("peerId")
|
||||
f.sender.EXPECT().Broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{
|
||||
{
|
||||
Value: &pb.EventMessageValueOfP2PStatusUpdate{
|
||||
P2PStatusUpdate: &pb.EventP2PStatusUpdate{
|
||||
SpaceId: "spaceId",
|
||||
Status: pb.EventP2PStatus_Connected,
|
||||
DevicesCounter: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
err := f.pool.AddPeer(context.Background(), peer)
|
||||
assert.Nil(t, err)
|
||||
f.store.UpdateLocalPeer("peerId", []string{"spaceId"})
|
||||
checkStatus(t, "spaceId", f.p2pStatus, Connected)
|
||||
|
||||
f.sender.EXPECT().SendToSession("token1", &pb.Event{
|
||||
Messages: []*pb.EventMessage{
|
||||
{
|
||||
Value: &pb.EventMessageValueOfP2PStatusUpdate{
|
||||
P2PStatusUpdate: &pb.EventP2PStatusUpdate{
|
||||
SpaceId: "spaceId",
|
||||
Status: pb.EventP2PStatus_Connected,
|
||||
DevicesCounter: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
err = f.sendStatusForNewSession(session.NewContext(session.WithSession("token1")))
|
||||
assert.Nil(t, err)
|
||||
|
||||
// then
|
||||
f.Close(nil)
|
||||
})
|
||||
}
|
||||
func TestP2pStatus_UnregisterSpace(t *testing.T) {
|
||||
t.Run("success", func(t *testing.T) {
|
||||
// given
|
||||
|
@ -356,7 +373,7 @@ func TestP2pStatus_UnregisterSpace(t *testing.T) {
|
|||
|
||||
// then
|
||||
|
||||
status := f.PeerToPeerStatus.(*p2pStatus)
|
||||
status := f.p2pStatus
|
||||
assert.Len(t, status.spaceIds, 0)
|
||||
})
|
||||
t.Run("delete non existing space", func(t *testing.T) {
|
||||
|
@ -367,7 +384,7 @@ func TestP2pStatus_UnregisterSpace(t *testing.T) {
|
|||
f.UnregisterSpace("spaceId1")
|
||||
|
||||
// then
|
||||
status := f.PeerToPeerStatus.(*p2pStatus)
|
||||
status := f.p2pStatus
|
||||
assert.Len(t, status.spaceIds, 1)
|
||||
})
|
||||
}
|
||||
|
@ -423,40 +440,54 @@ func newFixture(t *testing.T, spaceId string, initialStatus pb.EventP2PStatusSta
|
|||
},
|
||||
},
|
||||
}).Maybe()
|
||||
status.RegisterSpace(spaceId)
|
||||
|
||||
err = status.Run(context.Background())
|
||||
assert.Nil(t, err)
|
||||
|
||||
status.RegisterSpace(spaceId)
|
||||
|
||||
f := &fixture{
|
||||
PeerToPeerStatus: status,
|
||||
sender: sender,
|
||||
service: service,
|
||||
store: store,
|
||||
pool: pool,
|
||||
hookRegister: hookRegister,
|
||||
p2pStatus: status.(*p2pStatus),
|
||||
sender: sender,
|
||||
service: service,
|
||||
store: store,
|
||||
pool: pool,
|
||||
hookRegister: hookRegister,
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func waitForStatus(statusSender *p2pStatus, expectedStatus Status) error {
|
||||
func waitForStatus(spaceId string, statusSender *p2pStatus, expectedStatus Status) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer cancel()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
case <-time.After(time.Millisecond * 10):
|
||||
statusSender.Lock()
|
||||
if statusSender.status == expectedStatus {
|
||||
if status, ok := statusSender.spaceIds[spaceId]; !ok {
|
||||
statusSender.Unlock()
|
||||
return nil
|
||||
return fmt.Errorf("spaceId %s not found", spaceId)
|
||||
} else {
|
||||
if status.status == expectedStatus {
|
||||
statusSender.Unlock()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
statusSender.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func checkStatus(t *testing.T, statusSender *p2pStatus, expectedStatus Status) {
|
||||
func checkStatus(t *testing.T, spaceId string, statusSender *p2pStatus, expectedStatus Status) {
|
||||
time.Sleep(time.Millisecond * 300)
|
||||
statusSender.Lock()
|
||||
defer statusSender.Unlock()
|
||||
assert.Equal(t, expectedStatus, statusSender.status)
|
||||
if status, ok := statusSender.spaceIds[spaceId]; !ok {
|
||||
assert.Fail(t, "spaceId %s not found", spaceId)
|
||||
} else {
|
||||
assert.Equal(t, expectedStatus, status.status)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -350,7 +350,7 @@ func (ctx *opCtx) reset() {
|
|||
ctx.groups = ctx.groups[:0]
|
||||
if ctx.outputs == nil {
|
||||
ctx.outputs = map[string][]*pb.EventMessage{
|
||||
defaultOutput: make([]*pb.EventMessage, 0, 10),
|
||||
defaultOutput: nil,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
75
core/subscription/fixture.go
Normal file
75
core/subscription/fixture.go
Normal file
|
@ -0,0 +1,75 @@
|
|||
package subscription
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/anyproto/any-sync/app"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/event/mock_event"
|
||||
"github.com/anyproto/anytype-heart/core/kanban"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/localstore/objectstore"
|
||||
"github.com/anyproto/anytype-heart/tests/testutil"
|
||||
)
|
||||
|
||||
type InternalTestService struct {
|
||||
Service
|
||||
*objectstore.StoreFixture
|
||||
}
|
||||
|
||||
func (s *InternalTestService) Init(a *app.App) error {
|
||||
return s.Service.Init(a)
|
||||
}
|
||||
|
||||
func (s *InternalTestService) Run(ctx context.Context) error {
|
||||
err := s.StoreFixture.Run(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.Service.Run(ctx)
|
||||
}
|
||||
|
||||
func (s *InternalTestService) Close(ctx context.Context) (err error) {
|
||||
_ = s.Service.Close(ctx)
|
||||
return s.StoreFixture.Close(ctx)
|
||||
}
|
||||
|
||||
func NewInternalTestService(t *testing.T) *InternalTestService {
|
||||
s := New()
|
||||
ctx := context.Background()
|
||||
|
||||
objectStore := objectstore.NewStoreFixture(t)
|
||||
|
||||
a := &app.App{}
|
||||
a.Register(objectStore)
|
||||
a.Register(kanban.New())
|
||||
a.Register(&collectionServiceMock{MockCollectionService: NewMockCollectionService(t)})
|
||||
a.Register(testutil.PrepareMock(ctx, a, mock_event.NewMockSender(t)))
|
||||
a.Register(s)
|
||||
err := a.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
return &InternalTestService{Service: s, StoreFixture: objectStore}
|
||||
}
|
||||
|
||||
func RegisterSubscriptionService(t *testing.T, a *app.App) *InternalTestService {
|
||||
s := New()
|
||||
ctx := context.Background()
|
||||
objectStore := objectstore.NewStoreFixture(t)
|
||||
a.Register(objectStore).
|
||||
Register(kanban.New()).
|
||||
Register(&collectionServiceMock{MockCollectionService: NewMockCollectionService(t)}).
|
||||
Register(testutil.PrepareMock(ctx, a, mock_event.NewMockSender(t))).
|
||||
Register(s)
|
||||
return &InternalTestService{Service: s, StoreFixture: objectStore}
|
||||
}
|
||||
|
||||
type collectionServiceMock struct {
|
||||
*MockCollectionService
|
||||
}
|
||||
|
||||
func (c *collectionServiceMock) Name() string {
|
||||
return "collectionService"
|
||||
}
|
||||
|
||||
func (c *collectionServiceMock) Init(a *app.App) error { return nil }
|
|
@ -23,16 +23,6 @@ import (
|
|||
"github.com/anyproto/anytype-heart/util/testMock/mockKanban"
|
||||
)
|
||||
|
||||
type collectionServiceMock struct {
|
||||
*MockCollectionService
|
||||
}
|
||||
|
||||
func (c *collectionServiceMock) Name() string {
|
||||
return "collectionService"
|
||||
}
|
||||
|
||||
func (c *collectionServiceMock) Init(a *app.App) error { return nil }
|
||||
|
||||
type fixture struct {
|
||||
Service
|
||||
a *app.App
|
||||
|
|
|
@ -26,7 +26,7 @@ func wrapToEventMessages(vals []pb.IsEventMessageValue) []*pb.EventMessage {
|
|||
}
|
||||
|
||||
func TestInternalSubscriptionSingle(t *testing.T) {
|
||||
fx := newFixtureWithRealObjectStore(t)
|
||||
fx := NewInternalTestService(t)
|
||||
resp, err := fx.Search(SubscribeRequest{
|
||||
SubId: "test",
|
||||
Filters: []*model.BlockContentDataviewFilter{
|
||||
|
@ -44,7 +44,7 @@ func TestInternalSubscriptionSingle(t *testing.T) {
|
|||
require.Empty(t, resp.Records)
|
||||
|
||||
t.Run("amend details not related to filter", func(t *testing.T) {
|
||||
fx.store.AddObjects(t, []objectstore.TestObject{
|
||||
fx.AddObjects(t, []objectstore.TestObject{
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id1"),
|
||||
bundle.RelationKeyName: pbtypes.String("task1"),
|
||||
|
@ -53,7 +53,7 @@ func TestInternalSubscriptionSingle(t *testing.T) {
|
|||
},
|
||||
})
|
||||
time.Sleep(batchTime)
|
||||
fx.store.AddObjects(t, []objectstore.TestObject{
|
||||
fx.AddObjects(t, []objectstore.TestObject{
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id1"),
|
||||
bundle.RelationKeyName: pbtypes.String("task1 renamed"),
|
||||
|
@ -74,7 +74,7 @@ func TestInternalSubscriptionSingle(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("amend details related to filter -- remove from subscription", func(t *testing.T) {
|
||||
fx.store.AddObjects(t, []objectstore.TestObject{
|
||||
fx.AddObjects(t, []objectstore.TestObject{
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id2"),
|
||||
bundle.RelationKeyName: pbtypes.String("task2"),
|
||||
|
@ -83,7 +83,7 @@ func TestInternalSubscriptionSingle(t *testing.T) {
|
|||
})
|
||||
time.Sleep(batchTime)
|
||||
|
||||
fx.store.AddObjects(t, []objectstore.TestObject{
|
||||
fx.AddObjects(t, []objectstore.TestObject{
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id2"),
|
||||
bundle.RelationKeyName: pbtypes.String("task2"),
|
||||
|
@ -112,7 +112,7 @@ func TestInternalSubscriptionSingle(t *testing.T) {
|
|||
|
||||
t.Run("try to add after close", func(t *testing.T) {
|
||||
time.Sleep(batchTime)
|
||||
fx.store.AddObjects(t, []objectstore.TestObject{
|
||||
fx.AddObjects(t, []objectstore.TestObject{
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id3"),
|
||||
bundle.RelationKeyName: pbtypes.String("task2"),
|
||||
|
|
|
@ -125,11 +125,11 @@ func (s *service) Init(a *app.App) (err error) {
|
|||
s.ds = newDependencyService(s)
|
||||
s.subscriptions = make(map[string]subscription)
|
||||
s.customOutput = map[string]*mb2.MB[*pb.EventMessage]{}
|
||||
s.objectStore = a.MustComponent(objectstore.CName).(objectstore.ObjectStore)
|
||||
s.kanban = a.MustComponent(kanban.CName).(kanban.Service)
|
||||
s.objectStore = app.MustComponent[objectstore.ObjectStore](a)
|
||||
s.kanban = app.MustComponent[kanban.Service](a)
|
||||
s.recBatch = mb.New(0)
|
||||
s.collectionService = app.MustComponent[CollectionService](a)
|
||||
s.eventSender = a.MustComponent(event.CName).(event.Sender)
|
||||
s.eventSender = app.MustComponent[event.Sender](a)
|
||||
s.ctxBuf = &opCtx{c: s.cache}
|
||||
s.initDebugger()
|
||||
return
|
||||
|
@ -574,18 +574,18 @@ func (s *service) onChange(entries []*entry) time.Duration {
|
|||
handleTime := time.Since(st)
|
||||
|
||||
// Reset output buffer
|
||||
for subId, msgs := range s.ctxBuf.outputs {
|
||||
for subId := range s.ctxBuf.outputs {
|
||||
if subId == defaultOutput {
|
||||
s.ctxBuf.outputs[subId] = msgs[:0]
|
||||
s.ctxBuf.outputs[subId] = nil
|
||||
} else if _, ok := s.customOutput[subId]; ok {
|
||||
s.ctxBuf.outputs[subId] = msgs[:0]
|
||||
s.ctxBuf.outputs[subId] = nil
|
||||
} else {
|
||||
delete(s.ctxBuf.outputs, subId)
|
||||
}
|
||||
}
|
||||
for subId := range s.customOutput {
|
||||
if _, ok := s.ctxBuf.outputs[subId]; !ok {
|
||||
s.ctxBuf.outputs[subId] = make([]*pb.EventMessage, 0, 10)
|
||||
s.ctxBuf.outputs[subId] = nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5,8 +5,6 @@ package mock_detailsupdater
|
|||
import (
|
||||
app "github.com/anyproto/any-sync/app"
|
||||
|
||||
domain "github.com/anyproto/anytype-heart/core/domain"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
|
@ -114,35 +112,69 @@ func (_c *MockSpaceStatusUpdater_Name_Call) RunAndReturn(run func() string) *Moc
|
|||
return _c
|
||||
}
|
||||
|
||||
// SendUpdate provides a mock function with given fields: status
|
||||
func (_m *MockSpaceStatusUpdater) SendUpdate(status *domain.SpaceSync) {
|
||||
_m.Called(status)
|
||||
// Refresh provides a mock function with given fields: spaceId
|
||||
func (_m *MockSpaceStatusUpdater) Refresh(spaceId string) {
|
||||
_m.Called(spaceId)
|
||||
}
|
||||
|
||||
// MockSpaceStatusUpdater_SendUpdate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendUpdate'
|
||||
type MockSpaceStatusUpdater_SendUpdate_Call struct {
|
||||
// MockSpaceStatusUpdater_Refresh_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Refresh'
|
||||
type MockSpaceStatusUpdater_Refresh_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// SendUpdate is a helper method to define mock.On call
|
||||
// - status *domain.SpaceSync
|
||||
func (_e *MockSpaceStatusUpdater_Expecter) SendUpdate(status interface{}) *MockSpaceStatusUpdater_SendUpdate_Call {
|
||||
return &MockSpaceStatusUpdater_SendUpdate_Call{Call: _e.mock.On("SendUpdate", status)}
|
||||
// Refresh is a helper method to define mock.On call
|
||||
// - spaceId string
|
||||
func (_e *MockSpaceStatusUpdater_Expecter) Refresh(spaceId interface{}) *MockSpaceStatusUpdater_Refresh_Call {
|
||||
return &MockSpaceStatusUpdater_Refresh_Call{Call: _e.mock.On("Refresh", spaceId)}
|
||||
}
|
||||
|
||||
func (_c *MockSpaceStatusUpdater_SendUpdate_Call) Run(run func(status *domain.SpaceSync)) *MockSpaceStatusUpdater_SendUpdate_Call {
|
||||
func (_c *MockSpaceStatusUpdater_Refresh_Call) Run(run func(spaceId string)) *MockSpaceStatusUpdater_Refresh_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*domain.SpaceSync))
|
||||
run(args[0].(string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockSpaceStatusUpdater_SendUpdate_Call) Return() *MockSpaceStatusUpdater_SendUpdate_Call {
|
||||
func (_c *MockSpaceStatusUpdater_Refresh_Call) Return() *MockSpaceStatusUpdater_Refresh_Call {
|
||||
_c.Call.Return()
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockSpaceStatusUpdater_SendUpdate_Call) RunAndReturn(run func(*domain.SpaceSync)) *MockSpaceStatusUpdater_SendUpdate_Call {
|
||||
func (_c *MockSpaceStatusUpdater_Refresh_Call) RunAndReturn(run func(string)) *MockSpaceStatusUpdater_Refresh_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// UpdateMissingIds provides a mock function with given fields: spaceId, ids
|
||||
func (_m *MockSpaceStatusUpdater) UpdateMissingIds(spaceId string, ids []string) {
|
||||
_m.Called(spaceId, ids)
|
||||
}
|
||||
|
||||
// MockSpaceStatusUpdater_UpdateMissingIds_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateMissingIds'
|
||||
type MockSpaceStatusUpdater_UpdateMissingIds_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// UpdateMissingIds is a helper method to define mock.On call
|
||||
// - spaceId string
|
||||
// - ids []string
|
||||
func (_e *MockSpaceStatusUpdater_Expecter) UpdateMissingIds(spaceId interface{}, ids interface{}) *MockSpaceStatusUpdater_UpdateMissingIds_Call {
|
||||
return &MockSpaceStatusUpdater_UpdateMissingIds_Call{Call: _e.mock.On("UpdateMissingIds", spaceId, ids)}
|
||||
}
|
||||
|
||||
func (_c *MockSpaceStatusUpdater_UpdateMissingIds_Call) Run(run func(spaceId string, ids []string)) *MockSpaceStatusUpdater_UpdateMissingIds_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(string), args[1].([]string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockSpaceStatusUpdater_UpdateMissingIds_Call) Return() *MockSpaceStatusUpdater_UpdateMissingIds_Call {
|
||||
_c.Call.Return()
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockSpaceStatusUpdater_UpdateMissingIds_Call) RunAndReturn(run func(string, []string)) *MockSpaceStatusUpdater_UpdateMissingIds_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
|
|
@ -12,18 +12,18 @@ import (
|
|||
"github.com/cheggaaa/mb/v3"
|
||||
"github.com/gogo/protobuf/types"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/basic"
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/smartblock"
|
||||
"github.com/anyproto/anytype-heart/core/domain"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/detailsupdater/helper"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/filesyncstatus"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/syncsubscriptions"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/bundle"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/database"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/localstore/objectstore"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/logging"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/pb/model"
|
||||
"github.com/anyproto/anytype-heart/space"
|
||||
"github.com/anyproto/anytype-heart/util/pbtypes"
|
||||
"github.com/anyproto/anytype-heart/util/slice"
|
||||
)
|
||||
|
||||
var log = logging.Logger(CName)
|
||||
|
@ -31,29 +31,31 @@ var log = logging.Logger(CName)
|
|||
const CName = "core.syncstatus.objectsyncstatus.updater"
|
||||
|
||||
type syncStatusDetails struct {
|
||||
objectIds []string
|
||||
status domain.ObjectSyncStatus
|
||||
syncError domain.SyncError
|
||||
spaceId string
|
||||
objectId string
|
||||
status domain.ObjectSyncStatus
|
||||
spaceId string
|
||||
}
|
||||
|
||||
type Updater interface {
|
||||
app.ComponentRunnable
|
||||
UpdateDetails(objectId []string, status domain.ObjectSyncStatus, syncError domain.SyncError, spaceId string)
|
||||
UpdateSpaceDetails(existing, missing []string, spaceId string)
|
||||
UpdateDetails(objectId string, status domain.ObjectSyncStatus, spaceId string)
|
||||
}
|
||||
|
||||
type SpaceStatusUpdater interface {
|
||||
app.Component
|
||||
SendUpdate(status *domain.SpaceSync)
|
||||
Refresh(spaceId string)
|
||||
UpdateMissingIds(spaceId string, ids []string)
|
||||
}
|
||||
|
||||
type syncStatusUpdater struct {
|
||||
objectStore objectstore.ObjectStore
|
||||
ctx context.Context
|
||||
ctxCancel context.CancelFunc
|
||||
batcher *mb.MB[*syncStatusDetails]
|
||||
spaceService space.Service
|
||||
spaceSyncStatus SpaceStatusUpdater
|
||||
objectStore objectstore.ObjectStore
|
||||
ctx context.Context
|
||||
ctxCancel context.CancelFunc
|
||||
batcher *mb.MB[string]
|
||||
spaceService space.Service
|
||||
spaceSyncStatus SpaceStatusUpdater
|
||||
syncSubscriptions syncsubscriptions.SyncSubscriptions
|
||||
|
||||
entries map[string]*syncStatusDetails
|
||||
mx sync.Mutex
|
||||
|
@ -61,9 +63,9 @@ type syncStatusUpdater struct {
|
|||
finish chan struct{}
|
||||
}
|
||||
|
||||
func NewUpdater() Updater {
|
||||
func New() Updater {
|
||||
return &syncStatusUpdater{
|
||||
batcher: mb.New[*syncStatusDetails](0),
|
||||
batcher: mb.New[string](0),
|
||||
finish: make(chan struct{}),
|
||||
entries: make(map[string]*syncStatusDetails, 0),
|
||||
}
|
||||
|
@ -87,6 +89,7 @@ func (u *syncStatusUpdater) Init(a *app.App) (err error) {
|
|||
u.objectStore = app.MustComponent[objectstore.ObjectStore](a)
|
||||
u.spaceService = app.MustComponent[space.Service](a)
|
||||
u.spaceSyncStatus = app.MustComponent[SpaceStatusUpdater](a)
|
||||
u.syncSubscriptions = app.MustComponent[syncsubscriptions.SyncSubscriptions](a)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -94,94 +97,121 @@ func (u *syncStatusUpdater) Name() (name string) {
|
|||
return CName
|
||||
}
|
||||
|
||||
func (u *syncStatusUpdater) UpdateDetails(objectId []string, status domain.ObjectSyncStatus, syncError domain.SyncError, spaceId string) {
|
||||
func (u *syncStatusUpdater) UpdateDetails(objectId string, status domain.ObjectSyncStatus, spaceId string) {
|
||||
if spaceId == u.spaceService.TechSpaceId() {
|
||||
return
|
||||
}
|
||||
for _, id := range objectId {
|
||||
u.mx.Lock()
|
||||
u.entries[id] = &syncStatusDetails{
|
||||
status: status,
|
||||
syncError: syncError,
|
||||
spaceId: spaceId,
|
||||
}
|
||||
u.mx.Unlock()
|
||||
}
|
||||
err := u.batcher.TryAdd(&syncStatusDetails{
|
||||
objectIds: objectId,
|
||||
status: status,
|
||||
syncError: syncError,
|
||||
spaceId: spaceId,
|
||||
err := u.addToQueue(&syncStatusDetails{
|
||||
objectId: objectId,
|
||||
status: status,
|
||||
spaceId: spaceId,
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("failed to add sync details update to queue: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (u *syncStatusUpdater) updateDetails(syncStatusDetails *syncStatusDetails) {
|
||||
details := u.extractObjectDetails(syncStatusDetails)
|
||||
for _, detail := range details {
|
||||
id := pbtypes.GetString(detail.Details, bundle.RelationKeyId.String())
|
||||
err := u.setObjectDetails(syncStatusDetails, detail.Details, id)
|
||||
func (u *syncStatusUpdater) addToQueue(details *syncStatusDetails) error {
|
||||
u.mx.Lock()
|
||||
u.entries[details.objectId] = details
|
||||
u.mx.Unlock()
|
||||
return u.batcher.TryAdd(details.objectId)
|
||||
}
|
||||
|
||||
func (u *syncStatusUpdater) processEvents() {
|
||||
defer close(u.finish)
|
||||
|
||||
for {
|
||||
objectId, err := u.batcher.WaitOne(u.ctx)
|
||||
if err != nil {
|
||||
log.Errorf("failed to update object details %s", err)
|
||||
return
|
||||
}
|
||||
u.updateSpecificObject(objectId)
|
||||
}
|
||||
}
|
||||
|
||||
func (u *syncStatusUpdater) updateSpecificObject(objectId string) {
|
||||
u.mx.Lock()
|
||||
objectStatus := u.entries[objectId]
|
||||
delete(u.entries, objectId)
|
||||
u.mx.Unlock()
|
||||
|
||||
if objectStatus != nil {
|
||||
err := u.updateObjectDetails(objectStatus, objectId)
|
||||
if err != nil {
|
||||
log.Errorf("failed to update details %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (u *syncStatusUpdater) extractObjectDetails(syncStatusDetails *syncStatusDetails) []database.Record {
|
||||
details, err := u.objectStore.Query(database.Query{
|
||||
Filters: []*model.BlockContentDataviewFilter{
|
||||
{
|
||||
RelationKey: bundle.RelationKeySyncStatus.String(),
|
||||
Condition: model.BlockContentDataviewFilter_NotEqual,
|
||||
Value: pbtypes.Int64(int64(syncStatusDetails.status)),
|
||||
},
|
||||
{
|
||||
RelationKey: bundle.RelationKeySpaceId.String(),
|
||||
Condition: model.BlockContentDataviewFilter_Equal,
|
||||
Value: pbtypes.String(syncStatusDetails.spaceId),
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("failed to update object details %s", err)
|
||||
func (u *syncStatusUpdater) UpdateSpaceDetails(existing, missing []string, spaceId string) {
|
||||
if spaceId == u.spaceService.TechSpaceId() {
|
||||
return
|
||||
}
|
||||
return details
|
||||
u.spaceSyncStatus.UpdateMissingIds(spaceId, missing)
|
||||
ids := u.getSyncingObjects(spaceId)
|
||||
|
||||
// removed contains ids that are not yet marked as syncing
|
||||
// added contains ids that were syncing, but appeared as synced, because they are not in existing list
|
||||
removed, added := slice.DifferenceRemovedAdded(existing, ids)
|
||||
if len(removed)+len(added) == 0 {
|
||||
u.spaceSyncStatus.Refresh(spaceId)
|
||||
return
|
||||
}
|
||||
for _, id := range added {
|
||||
err := u.addToQueue(&syncStatusDetails{
|
||||
objectId: id,
|
||||
status: domain.ObjectSyncStatusSynced,
|
||||
spaceId: spaceId,
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("failed to add sync details update to queue: %s", err)
|
||||
}
|
||||
}
|
||||
for _, id := range removed {
|
||||
err := u.addToQueue(&syncStatusDetails{
|
||||
objectId: id,
|
||||
status: domain.ObjectSyncStatusSyncing,
|
||||
spaceId: spaceId,
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("failed to add sync details update to queue: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (u *syncStatusUpdater) getSyncingObjects(spaceId string) []string {
|
||||
sub, err := u.syncSubscriptions.GetSubscription(spaceId)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
ids := make([]string, 0, sub.GetObjectSubscription().Len())
|
||||
sub.GetObjectSubscription().Iterate(func(id string, _ struct{}) bool {
|
||||
ids = append(ids, id)
|
||||
return true
|
||||
})
|
||||
return ids
|
||||
}
|
||||
|
||||
func (u *syncStatusUpdater) updateObjectDetails(syncStatusDetails *syncStatusDetails, objectId string) error {
|
||||
record, err := u.objectStore.GetDetails(objectId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return u.setObjectDetails(syncStatusDetails, record.Details, objectId)
|
||||
}
|
||||
|
||||
func (u *syncStatusUpdater) setObjectDetails(syncStatusDetails *syncStatusDetails, record *types.Struct, objectId string) error {
|
||||
status := syncStatusDetails.status
|
||||
syncError := syncStatusDetails.syncError
|
||||
if fileStatus, ok := record.GetFields()[bundle.RelationKeyFileBackupStatus.String()]; ok {
|
||||
status, syncError = mapFileStatus(filesyncstatus.Status(int(fileStatus.GetNumberValue())))
|
||||
}
|
||||
changed := u.hasRelationsChange(record, status, syncError)
|
||||
if !changed {
|
||||
return nil
|
||||
}
|
||||
if !u.isLayoutSuitableForSyncRelations(record) {
|
||||
return nil
|
||||
}
|
||||
syncError := domain.SyncErrorNull
|
||||
spc, err := u.spaceService.Get(u.ctx, syncStatusDetails.spaceId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
spaceStatus := mapObjectSyncToSpaceSyncStatus(status, syncError)
|
||||
defer u.sendSpaceStatusUpdate(err, syncStatusDetails, spaceStatus, syncError)
|
||||
defer u.spaceSyncStatus.Refresh(syncStatusDetails.spaceId)
|
||||
err = spc.DoLockedIfNotExists(objectId, func() error {
|
||||
return u.objectStore.ModifyObjectDetails(objectId, func(details *types.Struct) (*types.Struct, error) {
|
||||
if details == nil || details.Fields == nil {
|
||||
details = &types.Struct{Fields: map[string]*types.Value{}}
|
||||
}
|
||||
if !u.isLayoutSuitableForSyncRelations(details) {
|
||||
return details, nil
|
||||
}
|
||||
if fileStatus, ok := details.GetFields()[bundle.RelationKeyFileBackupStatus.String()]; ok {
|
||||
status, syncError = getSyncStatusForFile(status, syncError, filesyncstatus.Status(int(fileStatus.GetNumberValue())))
|
||||
}
|
||||
details.Fields[bundle.RelationKeySyncStatus.String()] = pbtypes.Int64(int64(status))
|
||||
details.Fields[bundle.RelationKeySyncError.String()] = pbtypes.Int64(int64(syncError))
|
||||
details.Fields[bundle.RelationKeySyncDate.String()] = pbtypes.Int64(time.Now().Unix())
|
||||
|
@ -199,120 +229,71 @@ func (u *syncStatusUpdater) setObjectDetails(syncStatusDetails *syncStatusDetail
|
|||
})
|
||||
}
|
||||
|
||||
func (u *syncStatusUpdater) isLayoutSuitableForSyncRelations(details *types.Struct) bool {
|
||||
layoutsWithoutSyncRelations := []float64{
|
||||
float64(model.ObjectType_participant),
|
||||
float64(model.ObjectType_dashboard),
|
||||
float64(model.ObjectType_spaceView),
|
||||
float64(model.ObjectType_space),
|
||||
float64(model.ObjectType_date),
|
||||
}
|
||||
layout := details.Fields[bundle.RelationKeyLayout.String()].GetNumberValue()
|
||||
return !slices.Contains(layoutsWithoutSyncRelations, layout)
|
||||
}
|
||||
|
||||
func mapObjectSyncToSpaceSyncStatus(status domain.ObjectSyncStatus, syncError domain.SyncError) domain.SpaceSyncStatus {
|
||||
switch status {
|
||||
case domain.ObjectSynced:
|
||||
return domain.Synced
|
||||
case domain.ObjectSyncing, domain.ObjectQueued:
|
||||
return domain.Syncing
|
||||
case domain.ObjectError:
|
||||
// don't send error to space if file were oversized
|
||||
if syncError != domain.Oversized {
|
||||
return domain.Error
|
||||
}
|
||||
}
|
||||
return domain.Synced
|
||||
}
|
||||
|
||||
func (u *syncStatusUpdater) sendSpaceStatusUpdate(err error, syncStatusDetails *syncStatusDetails, status domain.SpaceSyncStatus, syncError domain.SyncError) {
|
||||
if err == nil {
|
||||
u.spaceSyncStatus.SendUpdate(domain.MakeSyncStatus(syncStatusDetails.spaceId, status, syncError, domain.Objects))
|
||||
}
|
||||
}
|
||||
|
||||
func mapFileStatus(status filesyncstatus.Status) (domain.ObjectSyncStatus, domain.SyncError) {
|
||||
var syncError domain.SyncError
|
||||
switch status {
|
||||
case filesyncstatus.Syncing:
|
||||
return domain.ObjectSyncing, domain.Null
|
||||
case filesyncstatus.Queued:
|
||||
return domain.ObjectQueued, domain.Null
|
||||
case filesyncstatus.Limited:
|
||||
syncError = domain.Oversized
|
||||
return domain.ObjectError, syncError
|
||||
case filesyncstatus.Unknown:
|
||||
syncError = domain.NetworkError
|
||||
return domain.ObjectError, syncError
|
||||
default:
|
||||
return domain.ObjectSynced, domain.Null
|
||||
}
|
||||
}
|
||||
|
||||
func (u *syncStatusUpdater) setSyncDetails(sb smartblock.SmartBlock, status domain.ObjectSyncStatus, syncError domain.SyncError) error {
|
||||
if !slices.Contains(helper.SyncRelationsSmartblockTypes(), sb.Type()) {
|
||||
return nil
|
||||
}
|
||||
if d, ok := sb.(basic.DetailsSettable); ok {
|
||||
syncStatusDetails := []*model.Detail{
|
||||
{
|
||||
Key: bundle.RelationKeySyncStatus.String(),
|
||||
Value: pbtypes.Int64(int64(status)),
|
||||
},
|
||||
}
|
||||
syncStatusDetails = append(syncStatusDetails, &model.Detail{
|
||||
Key: bundle.RelationKeySyncError.String(),
|
||||
Value: pbtypes.Int64(int64(syncError)),
|
||||
})
|
||||
syncStatusDetails = append(syncStatusDetails, &model.Detail{
|
||||
Key: bundle.RelationKeySyncDate.String(),
|
||||
Value: pbtypes.Int64(time.Now().Unix()),
|
||||
})
|
||||
return d.SetDetails(nil, syncStatusDetails, false)
|
||||
if !u.isLayoutSuitableForSyncRelations(sb.Details()) {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
st := sb.NewState()
|
||||
if fileStatus, ok := st.Details().GetFields()[bundle.RelationKeyFileBackupStatus.String()]; ok {
|
||||
status, syncError = getSyncStatusForFile(status, syncError, filesyncstatus.Status(int(fileStatus.GetNumberValue())))
|
||||
}
|
||||
st.SetDetailAndBundledRelation(bundle.RelationKeySyncStatus, pbtypes.Int64(int64(status)))
|
||||
st.SetDetailAndBundledRelation(bundle.RelationKeySyncError, pbtypes.Int64(int64(syncError)))
|
||||
st.SetDetailAndBundledRelation(bundle.RelationKeySyncDate, pbtypes.Int64(time.Now().Unix()))
|
||||
|
||||
return sb.Apply(st, smartblock.KeepInternalFlags /* do not erase flags */)
|
||||
}
|
||||
|
||||
func (u *syncStatusUpdater) hasRelationsChange(record *types.Struct, status domain.ObjectSyncStatus, syncError domain.SyncError) bool {
|
||||
var changed bool
|
||||
if record == nil || len(record.GetFields()) == 0 {
|
||||
changed = true
|
||||
}
|
||||
if pbtypes.Get(record, bundle.RelationKeySyncStatus.String()) == nil ||
|
||||
pbtypes.Get(record, bundle.RelationKeySyncError.String()) == nil {
|
||||
changed = true
|
||||
}
|
||||
if pbtypes.GetInt64(record, bundle.RelationKeySyncStatus.String()) != int64(status) {
|
||||
changed = true
|
||||
}
|
||||
if pbtypes.GetInt64(record, bundle.RelationKeySyncError.String()) != int64(syncError) {
|
||||
changed = true
|
||||
}
|
||||
return changed
|
||||
var suitableLayouts = map[model.ObjectTypeLayout]struct{}{
|
||||
model.ObjectType_basic: {},
|
||||
model.ObjectType_profile: {},
|
||||
model.ObjectType_todo: {},
|
||||
model.ObjectType_set: {},
|
||||
model.ObjectType_objectType: {},
|
||||
model.ObjectType_relation: {},
|
||||
model.ObjectType_file: {},
|
||||
model.ObjectType_image: {},
|
||||
model.ObjectType_note: {},
|
||||
model.ObjectType_bookmark: {},
|
||||
model.ObjectType_relationOption: {},
|
||||
model.ObjectType_collection: {},
|
||||
model.ObjectType_audio: {},
|
||||
model.ObjectType_video: {},
|
||||
model.ObjectType_pdf: {},
|
||||
}
|
||||
|
||||
func (u *syncStatusUpdater) processEvents() {
|
||||
defer close(u.finish)
|
||||
for {
|
||||
status, err := u.batcher.WaitOne(u.ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, id := range status.objectIds {
|
||||
u.mx.Lock()
|
||||
objectStatus := u.entries[id]
|
||||
delete(u.entries, id)
|
||||
u.mx.Unlock()
|
||||
if objectStatus != nil {
|
||||
err := u.updateObjectDetails(objectStatus, id)
|
||||
if err != nil {
|
||||
log.Errorf("failed to update details %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(status.objectIds) == 0 {
|
||||
u.updateDetails(status)
|
||||
}
|
||||
func (u *syncStatusUpdater) isLayoutSuitableForSyncRelations(details *types.Struct) bool {
|
||||
layout := model.ObjectTypeLayout(pbtypes.GetInt64(details, bundle.RelationKeyLayout.String()))
|
||||
_, ok := suitableLayouts[layout]
|
||||
return ok
|
||||
}
|
||||
|
||||
func getSyncStatusForFile(objectStatus domain.ObjectSyncStatus, objectSyncError domain.SyncError, fileStatus filesyncstatus.Status) (domain.ObjectSyncStatus, domain.SyncError) {
|
||||
statusFromFile, errFromFile := mapFileStatus(fileStatus)
|
||||
// If file status is synced, then prioritize object's status, otherwise pick file status
|
||||
if statusFromFile != domain.ObjectSyncStatusSynced {
|
||||
objectStatus = statusFromFile
|
||||
}
|
||||
if errFromFile != domain.SyncErrorNull {
|
||||
objectSyncError = errFromFile
|
||||
}
|
||||
return objectStatus, objectSyncError
|
||||
}
|
||||
|
||||
func mapFileStatus(status filesyncstatus.Status) (domain.ObjectSyncStatus, domain.SyncError) {
|
||||
switch status {
|
||||
case filesyncstatus.Syncing:
|
||||
return domain.ObjectSyncStatusSyncing, domain.SyncErrorNull
|
||||
case filesyncstatus.Queued:
|
||||
return domain.ObjectSyncStatusQueued, domain.SyncErrorNull
|
||||
case filesyncstatus.Limited:
|
||||
return domain.ObjectSyncStatusError, domain.SyncErrorOversized
|
||||
case filesyncstatus.Unknown:
|
||||
return domain.ObjectSyncStatusError, domain.SyncErrorNetworkError
|
||||
default:
|
||||
return domain.ObjectSyncStatusSynced, domain.SyncErrorNull
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,19 +3,24 @@ package detailsupdater
|
|||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/anyproto/any-sync/app"
|
||||
"github.com/anyproto/any-sync/app/ocache"
|
||||
"github.com/cheggaaa/mb/v3"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/block/editor"
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/smartblock"
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/smartblock/smarttest"
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/state"
|
||||
domain "github.com/anyproto/anytype-heart/core/domain"
|
||||
"github.com/anyproto/anytype-heart/core/subscription"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/detailsupdater/mock_detailsupdater"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/filesyncstatus"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/syncsubscriptions"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/bundle"
|
||||
coresb "github.com/anyproto/anytype-heart/pkg/lib/core/smartblock"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/localstore/objectstore"
|
||||
|
@ -26,232 +31,286 @@ import (
|
|||
"github.com/anyproto/anytype-heart/util/pbtypes"
|
||||
)
|
||||
|
||||
func TestSyncStatusUpdater_UpdateDetails(t *testing.T) {
|
||||
t.Run("update sync status and date - no changes", func(t *testing.T) {
|
||||
// given
|
||||
fixture := newFixture(t)
|
||||
fixture.storeFixture.AddObjects(t, []objectstore.TestObject{
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id"),
|
||||
bundle.RelationKeySyncStatus: pbtypes.Int64(int64(domain.Synced)),
|
||||
bundle.RelationKeySyncError: pbtypes.Int64(int64(domain.Null)),
|
||||
},
|
||||
})
|
||||
|
||||
// when
|
||||
err := fixture.updater.updateObjectDetails(&syncStatusDetails{[]string{"id"}, domain.ObjectSynced, domain.Null, "spaceId"}, "id")
|
||||
|
||||
// then
|
||||
assert.Nil(t, err)
|
||||
fixture.service.AssertNotCalled(t, "Get")
|
||||
})
|
||||
t.Run("update sync status and date - details exist in store", func(t *testing.T) {
|
||||
// given
|
||||
fixture := newFixture(t)
|
||||
space := mock_clientspace.NewMockSpace(t)
|
||||
fixture.service.EXPECT().Get(fixture.updater.ctx, "spaceId").Return(space, nil)
|
||||
fixture.storeFixture.AddObjects(t, []objectstore.TestObject{
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id"),
|
||||
},
|
||||
})
|
||||
space.EXPECT().DoLockedIfNotExists("id", mock.Anything).Return(nil)
|
||||
|
||||
// when
|
||||
fixture.statusUpdater.EXPECT().SendUpdate(domain.MakeSyncStatus("spaceId", domain.Synced, domain.Null, domain.Objects))
|
||||
err := fixture.updater.updateObjectDetails(&syncStatusDetails{[]string{"id"}, domain.ObjectSynced, domain.Null, "spaceId"}, "id")
|
||||
|
||||
// then
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
t.Run("update sync status and date - object not exist in cache", func(t *testing.T) {
|
||||
// given
|
||||
fixture := newFixture(t)
|
||||
space := mock_clientspace.NewMockSpace(t)
|
||||
fixture.service.EXPECT().Get(fixture.updater.ctx, "spaceId").Return(space, nil)
|
||||
fixture.storeFixture.AddObjects(t, []objectstore.TestObject{
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id"),
|
||||
bundle.RelationKeySyncStatus: pbtypes.Int64(int64(domain.Error)),
|
||||
bundle.RelationKeySyncError: pbtypes.Int64(int64(domain.NetworkError)),
|
||||
},
|
||||
})
|
||||
space.EXPECT().DoLockedIfNotExists("id", mock.Anything).Return(nil)
|
||||
|
||||
// when
|
||||
fixture.statusUpdater.EXPECT().SendUpdate(domain.MakeSyncStatus("spaceId", domain.Synced, domain.Null, domain.Objects))
|
||||
err := fixture.updater.updateObjectDetails(&syncStatusDetails{[]string{"id"}, domain.ObjectSynced, domain.Null, "spaceId"}, "id")
|
||||
|
||||
// then
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
t.Run("update sync status and date - object exist in cache", func(t *testing.T) {
|
||||
// given
|
||||
fixture := newFixture(t)
|
||||
space := mock_clientspace.NewMockSpace(t)
|
||||
fixture.service.EXPECT().Get(fixture.updater.ctx, "spaceId").Return(space, nil)
|
||||
space.EXPECT().DoLockedIfNotExists("id", mock.Anything).Return(ocache.ErrExists)
|
||||
space.EXPECT().DoCtx(fixture.updater.ctx, "id", mock.Anything).Return(nil)
|
||||
|
||||
// when
|
||||
fixture.statusUpdater.EXPECT().SendUpdate(domain.MakeSyncStatus("spaceId", domain.Synced, domain.Null, domain.Objects))
|
||||
err := fixture.updater.updateObjectDetails(&syncStatusDetails{[]string{"id"}, domain.ObjectSynced, domain.Null, "spaceId"}, "id")
|
||||
|
||||
// then
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
|
||||
t.Run("update sync status and date - file status", func(t *testing.T) {
|
||||
// given
|
||||
fixture := newFixture(t)
|
||||
space := mock_clientspace.NewMockSpace(t)
|
||||
fixture.service.EXPECT().Get(fixture.updater.ctx, "spaceId").Return(space, nil)
|
||||
fixture.storeFixture.AddObjects(t, []objectstore.TestObject{
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id"),
|
||||
bundle.RelationKeyFileBackupStatus: pbtypes.Int64(int64(filesyncstatus.Syncing)),
|
||||
},
|
||||
})
|
||||
space.EXPECT().DoLockedIfNotExists("id", mock.Anything).Return(nil)
|
||||
|
||||
// when
|
||||
fixture.statusUpdater.EXPECT().SendUpdate(domain.MakeSyncStatus("spaceId", domain.Syncing, domain.Null, domain.Objects))
|
||||
err := fixture.updater.updateObjectDetails(&syncStatusDetails{[]string{"id"}, domain.ObjectSynced, domain.Null, "spaceId"}, "id")
|
||||
|
||||
// then
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
t.Run("update sync status and date - unknown file status", func(t *testing.T) {
|
||||
// given
|
||||
fixture := newFixture(t)
|
||||
space := mock_clientspace.NewMockSpace(t)
|
||||
fixture.service.EXPECT().Get(fixture.updater.ctx, "spaceId").Return(space, nil)
|
||||
fixture.storeFixture.AddObjects(t, []objectstore.TestObject{
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id"),
|
||||
bundle.RelationKeyFileBackupStatus: pbtypes.Int64(int64(filesyncstatus.Unknown)),
|
||||
},
|
||||
})
|
||||
space.EXPECT().DoLockedIfNotExists("id", mock.Anything).Return(nil)
|
||||
|
||||
// when
|
||||
fixture.statusUpdater.EXPECT().SendUpdate(domain.MakeSyncStatus("spaceId", domain.Error, domain.NetworkError, domain.Objects))
|
||||
err := fixture.updater.updateObjectDetails(&syncStatusDetails{[]string{"id"}, domain.ObjectSynced, domain.Null, "spaceId"}, "id")
|
||||
|
||||
// then
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
t.Run("update sync status and date - queued file status", func(t *testing.T) {
|
||||
// given
|
||||
fixture := newFixture(t)
|
||||
space := mock_clientspace.NewMockSpace(t)
|
||||
fixture.service.EXPECT().Get(fixture.updater.ctx, "spaceId").Return(space, nil)
|
||||
fixture.storeFixture.AddObjects(t, []objectstore.TestObject{
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id"),
|
||||
bundle.RelationKeyFileBackupStatus: pbtypes.Int64(int64(filesyncstatus.Queued)),
|
||||
},
|
||||
})
|
||||
space.EXPECT().DoLockedIfNotExists("id", mock.Anything).Return(nil)
|
||||
|
||||
// when
|
||||
fixture.statusUpdater.EXPECT().SendUpdate(domain.MakeSyncStatus("spaceId", domain.Syncing, domain.Null, domain.Objects))
|
||||
err := fixture.updater.updateObjectDetails(&syncStatusDetails{[]string{"id"}, domain.ObjectSyncing, domain.Null, "spaceId"}, "id")
|
||||
|
||||
// then
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
t.Run("update sync status and date - synced file status", func(t *testing.T) {
|
||||
// given
|
||||
fixture := newFixture(t)
|
||||
space := mock_clientspace.NewMockSpace(t)
|
||||
fixture.service.EXPECT().Get(fixture.updater.ctx, "spaceId").Return(space, nil)
|
||||
fixture.storeFixture.AddObjects(t, []objectstore.TestObject{
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id"),
|
||||
bundle.RelationKeyFileBackupStatus: pbtypes.Int64(int64(filesyncstatus.Synced)),
|
||||
},
|
||||
})
|
||||
space.EXPECT().DoLockedIfNotExists("id", mock.Anything).Return(nil)
|
||||
|
||||
// when
|
||||
fixture.statusUpdater.EXPECT().SendUpdate(domain.MakeSyncStatus("spaceId", domain.Synced, domain.Null, domain.Objects))
|
||||
err := fixture.updater.updateObjectDetails(&syncStatusDetails{[]string{"id"}, domain.ObjectSynced, domain.Null, "spaceId"}, "id")
|
||||
|
||||
// then
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
type updateTester struct {
|
||||
t *testing.T
|
||||
waitCh chan struct{}
|
||||
minEventsCount int
|
||||
maxEventsCount int
|
||||
}
|
||||
|
||||
func TestSyncStatusUpdater_Run(t *testing.T) {
|
||||
t.Run("run", func(t *testing.T) {
|
||||
// given
|
||||
fixture := newFixture(t)
|
||||
func newUpdateTester(t *testing.T, minEventsCount int, maxEventsCount int) *updateTester {
|
||||
return &updateTester{
|
||||
t: t,
|
||||
minEventsCount: minEventsCount,
|
||||
maxEventsCount: maxEventsCount,
|
||||
waitCh: make(chan struct{}, maxEventsCount),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *updateTester) done() {
|
||||
t.waitCh <- struct{}{}
|
||||
}
|
||||
|
||||
// wait waits for at least one event up to t.maxEventsCount events
|
||||
func (t *updateTester) wait() {
|
||||
timeout := time.After(1 * time.Second)
|
||||
minReceivedTimer := time.After(10 * time.Millisecond)
|
||||
var eventsReceived int
|
||||
for i := 0; i < t.maxEventsCount; i++ {
|
||||
select {
|
||||
case <-minReceivedTimer:
|
||||
if eventsReceived >= t.minEventsCount {
|
||||
return
|
||||
}
|
||||
case <-t.waitCh:
|
||||
eventsReceived++
|
||||
case <-timeout:
|
||||
t.t.Fatal("timeout")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newUpdateDetailsFixture(t *testing.T) *fixture {
|
||||
fx := newFixture(t)
|
||||
fx.spaceService.EXPECT().TechSpaceId().Return("techSpace")
|
||||
err := fx.Run(context.Background())
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
err := fx.Close(context.Background())
|
||||
require.NoError(t, err)
|
||||
})
|
||||
return fx
|
||||
}
|
||||
|
||||
func TestSyncStatusUpdater_UpdateDetails(t *testing.T) {
|
||||
t.Run("ignore tech space", func(t *testing.T) {
|
||||
fx := newUpdateDetailsFixture(t)
|
||||
|
||||
fx.UpdateDetails("spaceView1", domain.ObjectSyncStatusSynced, "techSpace")
|
||||
})
|
||||
|
||||
t.Run("updates to the same object", func(t *testing.T) {
|
||||
fx := newUpdateDetailsFixture(t)
|
||||
updTester := newUpdateTester(t, 1, 4)
|
||||
|
||||
// when
|
||||
fixture.service.EXPECT().TechSpaceId().Return("techSpaceId")
|
||||
space := mock_clientspace.NewMockSpace(t)
|
||||
fixture.service.EXPECT().Get(mock.Anything, mock.Anything).Return(space, nil).Maybe()
|
||||
space.EXPECT().DoLockedIfNotExists(mock.Anything, mock.Anything).Return(nil).Maybe()
|
||||
space.EXPECT().DoCtx(mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
|
||||
err := fixture.updater.Run(context.Background())
|
||||
fixture.statusUpdater.EXPECT().SendUpdate(mock.Anything).Return().Maybe()
|
||||
assert.Nil(t, err)
|
||||
fixture.updater.UpdateDetails([]string{"id"}, domain.ObjectSynced, domain.Null, "spaceId")
|
||||
fx.spaceService.EXPECT().Get(mock.Anything, "space1").Return(space, nil)
|
||||
space.EXPECT().DoLockedIfNotExists(mock.Anything, mock.Anything).Return(ocache.ErrExists).Times(0)
|
||||
space.EXPECT().DoCtx(mock.Anything, mock.Anything, mock.Anything).Run(func(ctx context.Context, objectId string, apply func(smartblock.SmartBlock) error) {
|
||||
sb := smarttest.New(objectId)
|
||||
st := sb.Doc.(*state.State)
|
||||
st.SetDetailAndBundledRelation(bundle.RelationKeyLayout, pbtypes.Int64(int64(model.ObjectType_basic)))
|
||||
err := apply(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// then
|
||||
err = fixture.updater.Close(context.Background())
|
||||
assert.Nil(t, err)
|
||||
det := sb.Doc.LocalDetails()
|
||||
assert.Contains(t, det.GetFields(), bundle.RelationKeySyncStatus.String())
|
||||
assert.Contains(t, det.GetFields(), bundle.RelationKeySyncDate.String())
|
||||
assert.Contains(t, det.GetFields(), bundle.RelationKeySyncError.String())
|
||||
|
||||
fx.spaceStatusUpdater.EXPECT().Refresh("space1")
|
||||
|
||||
updTester.done()
|
||||
}).Return(nil).Times(0)
|
||||
|
||||
fx.UpdateDetails("id1", domain.ObjectSyncStatusSyncing, "space1")
|
||||
fx.UpdateDetails("id1", domain.ObjectSyncStatusError, "space1")
|
||||
fx.UpdateDetails("id1", domain.ObjectSyncStatusSyncing, "space1")
|
||||
fx.UpdateDetails("id1", domain.ObjectSyncStatusSynced, "space1")
|
||||
|
||||
updTester.wait()
|
||||
})
|
||||
|
||||
t.Run("run 2 time for 1 object", func(t *testing.T) {
|
||||
// given
|
||||
fixture := newFixture(t)
|
||||
t.Run("updates to object not in cache", func(t *testing.T) {
|
||||
fx := newUpdateDetailsFixture(t)
|
||||
updTester := newUpdateTester(t, 1, 1)
|
||||
|
||||
// when
|
||||
fixture.service.EXPECT().TechSpaceId().Return("techSpaceId").Times(2)
|
||||
fixture.updater.UpdateDetails([]string{"id"}, domain.ObjectSynced, domain.Null, "spaceId")
|
||||
fixture.updater.UpdateDetails([]string{"id"}, domain.ObjectSyncing, domain.Null, "spaceId")
|
||||
fx.subscriptionService.StoreFixture.AddObjects(t, []objectstore.TestObject{
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id1"),
|
||||
bundle.RelationKeySpaceId: pbtypes.String("space1"),
|
||||
bundle.RelationKeyLayout: pbtypes.Int64(int64(model.ObjectType_basic)),
|
||||
},
|
||||
})
|
||||
|
||||
// then
|
||||
assert.Equal(t, &syncStatusDetails{status: domain.ObjectSyncing, syncError: domain.Null, spaceId: "spaceId"}, fixture.updater.entries["id"])
|
||||
space := mock_clientspace.NewMockSpace(t)
|
||||
fx.spaceService.EXPECT().Get(mock.Anything, "space1").Return(space, nil)
|
||||
space.EXPECT().DoLockedIfNotExists(mock.Anything, mock.Anything).Run(func(objectId string, proc func() error) {
|
||||
err := proc()
|
||||
require.NoError(t, err)
|
||||
|
||||
details, err := fx.objectStore.GetDetails(objectId)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, pbtypes.GetInt64(details.Details, bundle.RelationKeySyncStatus.String()) == int64(domain.ObjectSyncStatusError))
|
||||
assert.True(t, pbtypes.GetInt64(details.Details, bundle.RelationKeySyncError.String()) == int64(domain.SyncErrorNull))
|
||||
assert.Contains(t, details.Details.GetFields(), bundle.RelationKeySyncDate.String())
|
||||
updTester.done()
|
||||
}).Return(nil).Times(0)
|
||||
|
||||
fx.UpdateDetails("id1", domain.ObjectSyncStatusError, "space1")
|
||||
|
||||
fx.spaceStatusUpdater.EXPECT().Refresh("space1")
|
||||
|
||||
updTester.wait()
|
||||
})
|
||||
|
||||
t.Run("updates in file object", func(t *testing.T) {
|
||||
t.Run("file backup status limited", func(t *testing.T) {
|
||||
fx := newUpdateDetailsFixture(t)
|
||||
updTester := newUpdateTester(t, 1, 1)
|
||||
|
||||
space := mock_clientspace.NewMockSpace(t)
|
||||
fx.spaceService.EXPECT().Get(mock.Anything, "space1").Return(space, nil)
|
||||
space.EXPECT().DoLockedIfNotExists(mock.Anything, mock.Anything).Return(ocache.ErrExists)
|
||||
space.EXPECT().DoCtx(mock.Anything, mock.Anything, mock.Anything).Run(func(ctx context.Context, objectId string, apply func(smartblock.SmartBlock) error) {
|
||||
sb := smarttest.New(objectId)
|
||||
st := sb.Doc.(*state.State)
|
||||
st.SetDetailAndBundledRelation(bundle.RelationKeyLayout, pbtypes.Int64(int64(model.ObjectType_file)))
|
||||
st.SetDetailAndBundledRelation(bundle.RelationKeyFileBackupStatus, pbtypes.Int64(int64(filesyncstatus.Limited)))
|
||||
err := apply(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
det := sb.Doc.LocalDetails()
|
||||
assert.True(t, pbtypes.GetInt64(det, bundle.RelationKeySyncStatus.String()) == int64(domain.ObjectSyncStatusError))
|
||||
assert.True(t, pbtypes.GetInt64(det, bundle.RelationKeySyncError.String()) == int64(domain.SyncErrorOversized))
|
||||
assert.Contains(t, det.GetFields(), bundle.RelationKeySyncDate.String())
|
||||
|
||||
fx.spaceStatusUpdater.EXPECT().Refresh("space1")
|
||||
|
||||
updTester.done()
|
||||
}).Return(nil)
|
||||
|
||||
fx.UpdateDetails("id2", domain.ObjectSyncStatusSynced, "space1")
|
||||
|
||||
updTester.wait()
|
||||
})
|
||||
t.Run("prioritize object status", func(t *testing.T) {
|
||||
fx := newUpdateDetailsFixture(t)
|
||||
updTester := newUpdateTester(t, 1, 1)
|
||||
|
||||
space := mock_clientspace.NewMockSpace(t)
|
||||
fx.spaceService.EXPECT().Get(mock.Anything, "space1").Return(space, nil)
|
||||
space.EXPECT().DoLockedIfNotExists(mock.Anything, mock.Anything).Return(ocache.ErrExists)
|
||||
space.EXPECT().DoCtx(mock.Anything, mock.Anything, mock.Anything).Run(func(ctx context.Context, objectId string, apply func(smartblock.SmartBlock) error) {
|
||||
sb := smarttest.New(objectId)
|
||||
st := sb.Doc.(*state.State)
|
||||
st.SetDetailAndBundledRelation(bundle.RelationKeyLayout, pbtypes.Int64(int64(model.ObjectType_file)))
|
||||
st.SetDetailAndBundledRelation(bundle.RelationKeyFileBackupStatus, pbtypes.Int64(int64(filesyncstatus.Synced)))
|
||||
err := apply(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
det := sb.Doc.LocalDetails()
|
||||
assert.True(t, pbtypes.GetInt64(det, bundle.RelationKeySyncStatus.String()) == int64(domain.ObjectSyncStatusSyncing))
|
||||
assert.Contains(t, det.GetFields(), bundle.RelationKeySyncError.String())
|
||||
assert.Contains(t, det.GetFields(), bundle.RelationKeySyncDate.String())
|
||||
|
||||
fx.spaceStatusUpdater.EXPECT().Refresh("space1")
|
||||
|
||||
updTester.done()
|
||||
}).Return(nil)
|
||||
|
||||
fx.UpdateDetails("id3", domain.ObjectSyncStatusSyncing, "space1")
|
||||
|
||||
updTester.wait()
|
||||
})
|
||||
})
|
||||
|
||||
// TODO Test DoLockedIfNotExists
|
||||
}
|
||||
|
||||
func TestSyncStatusUpdater_UpdateSpaceDetails(t *testing.T) {
|
||||
fx := newUpdateDetailsFixture(t)
|
||||
updTester := newUpdateTester(t, 3, 3)
|
||||
|
||||
fx.subscriptionService.StoreFixture.AddObjects(t, []objectstore.TestObject{
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id1"),
|
||||
bundle.RelationKeySpaceId: pbtypes.String("space1"),
|
||||
bundle.RelationKeyLayout: pbtypes.Int64(int64(model.ObjectType_basic)),
|
||||
bundle.RelationKeySyncStatus: pbtypes.Int64(int64(domain.ObjectSyncStatusSyncing)),
|
||||
},
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id4"),
|
||||
bundle.RelationKeySpaceId: pbtypes.String("space1"),
|
||||
bundle.RelationKeyLayout: pbtypes.Int64(int64(model.ObjectType_basic)),
|
||||
bundle.RelationKeySyncStatus: pbtypes.Int64(int64(domain.ObjectSyncStatusSyncing)),
|
||||
},
|
||||
})
|
||||
|
||||
space := mock_clientspace.NewMockSpace(t)
|
||||
fx.spaceService.EXPECT().Get(mock.Anything, "space1").Return(space, nil)
|
||||
space.EXPECT().DoLockedIfNotExists(mock.Anything, mock.Anything).Return(ocache.ErrExists).Times(0)
|
||||
|
||||
assertUpdate := func(objectId string, status domain.ObjectSyncStatus) {
|
||||
space.EXPECT().DoCtx(mock.Anything, objectId, mock.Anything).Run(func(ctx context.Context, objectId string, apply func(smartblock.SmartBlock) error) {
|
||||
sb := smarttest.New(objectId)
|
||||
st := sb.Doc.(*state.State)
|
||||
st.SetDetailAndBundledRelation(bundle.RelationKeyLayout, pbtypes.Int64(int64(model.ObjectType_basic)))
|
||||
err := apply(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
det := sb.Doc.LocalDetails()
|
||||
assert.True(t, pbtypes.GetInt64(det, bundle.RelationKeySyncStatus.String()) == int64(status))
|
||||
assert.Contains(t, det.GetFields(), bundle.RelationKeySyncDate.String())
|
||||
assert.Contains(t, det.GetFields(), bundle.RelationKeySyncError.String())
|
||||
|
||||
fx.spaceStatusUpdater.EXPECT().Refresh("space1")
|
||||
|
||||
updTester.done()
|
||||
}).Return(nil).Times(0)
|
||||
}
|
||||
|
||||
assertUpdate("id2", domain.ObjectSyncStatusSyncing)
|
||||
assertUpdate("id4", domain.ObjectSyncStatusSynced)
|
||||
|
||||
fx.spaceStatusUpdater.EXPECT().UpdateMissingIds("space1", []string{"id3"})
|
||||
fx.UpdateSpaceDetails([]string{"id1", "id2"}, []string{"id3"}, "space1")
|
||||
|
||||
fx.spaceStatusUpdater.EXPECT().UpdateMissingIds("space1", []string{"id3"})
|
||||
fx.spaceStatusUpdater.EXPECT().Refresh("space1")
|
||||
fx.UpdateSpaceDetails([]string{"id1", "id2"}, []string{"id3"}, "space1")
|
||||
|
||||
updTester.wait()
|
||||
}
|
||||
|
||||
func TestSyncStatusUpdater_setSyncDetails(t *testing.T) {
|
||||
t.Run("set smartblock details", func(t *testing.T) {
|
||||
// given
|
||||
fixture := newFixture(t)
|
||||
fx := newFixture(t)
|
||||
sb := smarttest.New("id")
|
||||
|
||||
// when
|
||||
err := fixture.updater.setSyncDetails(fixture.sb, domain.ObjectError, domain.NetworkError)
|
||||
err := fx.setSyncDetails(sb, domain.ObjectSyncStatusError, domain.SyncErrorNetworkError)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// then
|
||||
details := fixture.sb.NewState().CombinedDetails().GetFields()
|
||||
details := sb.NewState().CombinedDetails().GetFields()
|
||||
assert.NotNil(t, details)
|
||||
assert.Equal(t, pbtypes.Int64(int64(domain.Error)), details[bundle.RelationKeySyncStatus.String()])
|
||||
assert.Equal(t, pbtypes.Int64(int64(domain.NetworkError)), details[bundle.RelationKeySyncError.String()])
|
||||
assert.Equal(t, pbtypes.Int64(int64(domain.SpaceSyncStatusError)), details[bundle.RelationKeySyncStatus.String()])
|
||||
assert.Equal(t, pbtypes.Int64(int64(domain.SyncErrorNetworkError)), details[bundle.RelationKeySyncError.String()])
|
||||
assert.NotNil(t, details[bundle.RelationKeySyncDate.String()])
|
||||
})
|
||||
t.Run("not set smartblock details, because it doesn't implement interface DetailsSettable", func(t *testing.T) {
|
||||
// given
|
||||
fixture := newFixture(t)
|
||||
fx := newFixture(t)
|
||||
sb := smarttest.New("id")
|
||||
|
||||
// when
|
||||
fixture.sb.SetType(coresb.SmartBlockTypePage)
|
||||
err := fixture.updater.setSyncDetails(editor.NewMissingObject(fixture.sb), domain.ObjectError, domain.NetworkError)
|
||||
sb.SetType(coresb.SmartBlockTypePage)
|
||||
err := fx.setSyncDetails(editor.NewMissingObject(sb), domain.ObjectSyncStatusError, domain.SyncErrorNetworkError)
|
||||
|
||||
// then
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
t.Run("not set smartblock details, because it doesn't need details", func(t *testing.T) {
|
||||
// given
|
||||
fixture := newFixture(t)
|
||||
fx := newFixture(t)
|
||||
sb := smarttest.New("id")
|
||||
|
||||
// when
|
||||
fixture.sb.SetType(coresb.SmartBlockTypeHome)
|
||||
err := fixture.updater.setSyncDetails(fixture.sb, domain.ObjectError, domain.NetworkError)
|
||||
sb.SetType(coresb.SmartBlockTypeHome)
|
||||
err := fx.setSyncDetails(sb, domain.ObjectSyncStatusError, domain.SyncErrorNetworkError)
|
||||
|
||||
// then
|
||||
assert.Nil(t, err)
|
||||
|
@ -261,13 +320,13 @@ func TestSyncStatusUpdater_setSyncDetails(t *testing.T) {
|
|||
func TestSyncStatusUpdater_isLayoutSuitableForSyncRelations(t *testing.T) {
|
||||
t.Run("isLayoutSuitableForSyncRelations - participant details", func(t *testing.T) {
|
||||
// given
|
||||
fixture := newFixture(t)
|
||||
fx := newFixture(t)
|
||||
|
||||
// when
|
||||
details := &types.Struct{Fields: map[string]*types.Value{
|
||||
bundle.RelationKeyLayout.String(): pbtypes.Float64(float64(model.ObjectType_participant)),
|
||||
}}
|
||||
isSuitable := fixture.updater.isLayoutSuitableForSyncRelations(details)
|
||||
isSuitable := fx.isLayoutSuitableForSyncRelations(details)
|
||||
|
||||
// then
|
||||
assert.False(t, isSuitable)
|
||||
|
@ -275,13 +334,13 @@ func TestSyncStatusUpdater_isLayoutSuitableForSyncRelations(t *testing.T) {
|
|||
|
||||
t.Run("isLayoutSuitableForSyncRelations - basic details", func(t *testing.T) {
|
||||
// given
|
||||
fixture := newFixture(t)
|
||||
fx := newFixture(t)
|
||||
|
||||
// when
|
||||
details := &types.Struct{Fields: map[string]*types.Value{
|
||||
bundle.RelationKeyLayout.String(): pbtypes.Float64(float64(model.ObjectType_basic)),
|
||||
}}
|
||||
isSuitable := fixture.updater.isLayoutSuitableForSyncRelations(details)
|
||||
isSuitable := fx.isLayoutSuitableForSyncRelations(details)
|
||||
|
||||
// then
|
||||
assert.True(t, isSuitable)
|
||||
|
@ -289,34 +348,36 @@ func TestSyncStatusUpdater_isLayoutSuitableForSyncRelations(t *testing.T) {
|
|||
}
|
||||
|
||||
func newFixture(t *testing.T) *fixture {
|
||||
smartTest := smarttest.New("id")
|
||||
storeFixture := objectstore.NewStoreFixture(t)
|
||||
service := mock_space.NewMockService(t)
|
||||
updater := &syncStatusUpdater{
|
||||
batcher: mb.New[*syncStatusDetails](0),
|
||||
finish: make(chan struct{}),
|
||||
entries: map[string]*syncStatusDetails{},
|
||||
}
|
||||
updater := New()
|
||||
statusUpdater := mock_detailsupdater.NewMockSpaceStatusUpdater(t)
|
||||
|
||||
syncSub := syncsubscriptions.New()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
a := &app.App{}
|
||||
a.Register(storeFixture).
|
||||
Register(testutil.PrepareMock(context.Background(), a, service)).
|
||||
Register(testutil.PrepareMock(context.Background(), a, statusUpdater))
|
||||
subscriptionService := subscription.RegisterSubscriptionService(t, a)
|
||||
a.Register(syncSub)
|
||||
a.Register(testutil.PrepareMock(ctx, a, service))
|
||||
a.Register(testutil.PrepareMock(ctx, a, statusUpdater))
|
||||
err := updater.Init(a)
|
||||
assert.Nil(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = a.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
return &fixture{
|
||||
updater: updater,
|
||||
sb: smartTest,
|
||||
storeFixture: storeFixture,
|
||||
service: service,
|
||||
statusUpdater: statusUpdater,
|
||||
syncStatusUpdater: updater.(*syncStatusUpdater),
|
||||
spaceService: service,
|
||||
spaceStatusUpdater: statusUpdater,
|
||||
subscriptionService: subscriptionService,
|
||||
}
|
||||
}
|
||||
|
||||
type fixture struct {
|
||||
sb *smarttest.SmartTest
|
||||
updater *syncStatusUpdater
|
||||
storeFixture *objectstore.StoreFixture
|
||||
service *mock_space.MockService
|
||||
statusUpdater *mock_detailsupdater.MockSpaceStatusUpdater
|
||||
*syncStatusUpdater
|
||||
spaceService *mock_space.MockService
|
||||
spaceStatusUpdater *mock_detailsupdater.MockSpaceStatusUpdater
|
||||
subscriptionService *subscription.InternalTestService
|
||||
}
|
||||
|
|
|
@ -3,51 +3,37 @@ package syncstatus
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/block/cache"
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/basic"
|
||||
"github.com/anyproto/anytype-heart/core/block/editor/smartblock"
|
||||
"github.com/anyproto/anytype-heart/core/domain"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/filesyncstatus"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/bundle"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/pb/model"
|
||||
"github.com/anyproto/anytype-heart/util/pbtypes"
|
||||
)
|
||||
|
||||
const limitReachErrorPercentage = 0.01
|
||||
|
||||
func (s *service) onFileUploadStarted(objectId string, _ domain.FullFileId) error {
|
||||
return s.indexFileSyncStatus(objectId, filesyncstatus.Syncing, 0)
|
||||
return s.indexFileSyncStatus(objectId, filesyncstatus.Syncing)
|
||||
}
|
||||
|
||||
func (s *service) onFileUploaded(objectId string, _ domain.FullFileId) error {
|
||||
return s.indexFileSyncStatus(objectId, filesyncstatus.Synced, 0)
|
||||
return s.indexFileSyncStatus(objectId, filesyncstatus.Synced)
|
||||
}
|
||||
|
||||
func (s *service) onFileLimited(objectId string, _ domain.FullFileId, bytesLeftPercentage float64) error {
|
||||
return s.indexFileSyncStatus(objectId, filesyncstatus.Limited, bytesLeftPercentage)
|
||||
return s.indexFileSyncStatus(objectId, filesyncstatus.Limited)
|
||||
}
|
||||
|
||||
func (s *service) OnFileDelete(fileId domain.FullFileId) {
|
||||
s.sendSpaceStatusUpdate(filesyncstatus.Synced, fileId.SpaceId, 0)
|
||||
}
|
||||
|
||||
func (s *service) indexFileSyncStatus(fileObjectId string, status filesyncstatus.Status, bytesLeftPercentage float64) error {
|
||||
var spaceId string
|
||||
func (s *service) indexFileSyncStatus(fileObjectId string, status filesyncstatus.Status) error {
|
||||
err := cache.Do(s.objectGetter, fileObjectId, func(sb smartblock.SmartBlock) (err error) {
|
||||
spaceId = sb.SpaceID()
|
||||
prevStatus := pbtypes.GetInt64(sb.Details(), bundle.RelationKeyFileBackupStatus.String())
|
||||
newStatus := int64(status)
|
||||
if prevStatus == newStatus {
|
||||
return nil
|
||||
}
|
||||
detailsSetter, ok := sb.(basic.DetailsSettable)
|
||||
if !ok {
|
||||
return fmt.Errorf("setting of details is not supported for %T", sb)
|
||||
}
|
||||
details := provideFileStatusDetails(status, newStatus)
|
||||
return detailsSetter.SetDetails(nil, details, true)
|
||||
st := sb.NewState()
|
||||
st.SetDetailAndBundledRelation(bundle.RelationKeyFileBackupStatus, pbtypes.Int64(newStatus))
|
||||
return sb.Apply(st)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("get object: %w", err)
|
||||
|
@ -56,79 +42,5 @@ func (s *service) indexFileSyncStatus(fileObjectId string, status filesyncstatus
|
|||
if err != nil {
|
||||
return fmt.Errorf("update tree: %w", err)
|
||||
}
|
||||
s.sendSpaceStatusUpdate(status, spaceId, bytesLeftPercentage)
|
||||
return nil
|
||||
}
|
||||
|
||||
func provideFileStatusDetails(status filesyncstatus.Status, newStatus int64) []*model.Detail {
|
||||
syncStatus, syncError := getFileObjectStatus(status)
|
||||
details := make([]*model.Detail, 0, 4)
|
||||
details = append(details, &model.Detail{
|
||||
Key: bundle.RelationKeySyncStatus.String(),
|
||||
Value: pbtypes.Int64(int64(syncStatus)),
|
||||
})
|
||||
details = append(details, &model.Detail{
|
||||
Key: bundle.RelationKeySyncError.String(),
|
||||
Value: pbtypes.Int64(int64(syncError)),
|
||||
})
|
||||
details = append(details, &model.Detail{
|
||||
Key: bundle.RelationKeySyncDate.String(),
|
||||
Value: pbtypes.Int64(time.Now().Unix()),
|
||||
})
|
||||
details = append(details, &model.Detail{
|
||||
Key: bundle.RelationKeyFileBackupStatus.String(),
|
||||
Value: pbtypes.Int64(newStatus),
|
||||
})
|
||||
return details
|
||||
}
|
||||
|
||||
func (s *service) sendSpaceStatusUpdate(status filesyncstatus.Status, spaceId string, bytesLeftPercentage float64) {
|
||||
spaceStatus, spaceError := getSyncStatus(status, bytesLeftPercentage)
|
||||
syncStatus := domain.MakeSyncStatus(spaceId, spaceStatus, spaceError, domain.Files)
|
||||
s.spaceSyncStatus.SendUpdate(syncStatus)
|
||||
}
|
||||
|
||||
func getFileObjectStatus(status filesyncstatus.Status) (domain.ObjectSyncStatus, domain.SyncError) {
|
||||
var (
|
||||
objectSyncStatus domain.ObjectSyncStatus
|
||||
objectError domain.SyncError
|
||||
)
|
||||
switch status {
|
||||
case filesyncstatus.Synced:
|
||||
objectSyncStatus = domain.ObjectSynced
|
||||
case filesyncstatus.Syncing:
|
||||
objectSyncStatus = domain.ObjectSyncing
|
||||
case filesyncstatus.Queued:
|
||||
objectSyncStatus = domain.ObjectQueued
|
||||
case filesyncstatus.Limited:
|
||||
objectError = domain.Oversized
|
||||
objectSyncStatus = domain.ObjectError
|
||||
case filesyncstatus.Unknown:
|
||||
objectSyncStatus = domain.ObjectError
|
||||
objectError = domain.NetworkError
|
||||
}
|
||||
return objectSyncStatus, objectError
|
||||
}
|
||||
|
||||
func getSyncStatus(status filesyncstatus.Status, bytesLeftPercentage float64) (domain.SpaceSyncStatus, domain.SyncError) {
|
||||
var (
|
||||
spaceStatus domain.SpaceSyncStatus
|
||||
spaceError domain.SyncError
|
||||
)
|
||||
switch status {
|
||||
case filesyncstatus.Synced:
|
||||
spaceStatus = domain.Synced
|
||||
case filesyncstatus.Syncing, filesyncstatus.Queued:
|
||||
spaceStatus = domain.Syncing
|
||||
case filesyncstatus.Limited:
|
||||
spaceStatus = domain.Synced
|
||||
if bytesLeftPercentage <= limitReachErrorPercentage {
|
||||
spaceStatus = domain.Error
|
||||
spaceError = domain.StorageLimitExceed
|
||||
}
|
||||
case filesyncstatus.Unknown:
|
||||
spaceStatus = domain.Error
|
||||
spaceError = domain.NetworkError
|
||||
}
|
||||
return spaceStatus, spaceError
|
||||
}
|
||||
|
|
|
@ -1,79 +0,0 @@
|
|||
package syncstatus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/domain"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/filesyncstatus"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/spacesyncstatus/mock_spacesyncstatus"
|
||||
)
|
||||
|
||||
func Test_sendSpaceStatusUpdate(t *testing.T) {
|
||||
t.Run("file limited", func(t *testing.T) {
|
||||
// given
|
||||
updater := mock_spacesyncstatus.NewMockUpdater(t)
|
||||
s := &service{
|
||||
spaceSyncStatus: updater,
|
||||
}
|
||||
|
||||
// when
|
||||
updater.EXPECT().SendUpdate(domain.MakeSyncStatus("spaceId", domain.Error, domain.StorageLimitExceed, domain.Files)).Return()
|
||||
s.sendSpaceStatusUpdate(filesyncstatus.Limited, "spaceId", 0)
|
||||
})
|
||||
t.Run("file limited, but over 1% of storage is available", func(t *testing.T) {
|
||||
// given
|
||||
updater := mock_spacesyncstatus.NewMockUpdater(t)
|
||||
s := &service{
|
||||
spaceSyncStatus: updater,
|
||||
}
|
||||
|
||||
// when
|
||||
updater.EXPECT().SendUpdate(domain.MakeSyncStatus("spaceId", domain.Synced, domain.Null, domain.Files)).Return()
|
||||
s.sendSpaceStatusUpdate(filesyncstatus.Limited, "spaceId", 0.9)
|
||||
})
|
||||
t.Run("file synced", func(t *testing.T) {
|
||||
// given
|
||||
updater := mock_spacesyncstatus.NewMockUpdater(t)
|
||||
s := &service{
|
||||
spaceSyncStatus: updater,
|
||||
}
|
||||
|
||||
// when
|
||||
updater.EXPECT().SendUpdate(domain.MakeSyncStatus("spaceId", domain.Synced, domain.Null, domain.Files)).Return()
|
||||
s.sendSpaceStatusUpdate(filesyncstatus.Synced, "spaceId", 0)
|
||||
})
|
||||
t.Run("file queued", func(t *testing.T) {
|
||||
// given
|
||||
updater := mock_spacesyncstatus.NewMockUpdater(t)
|
||||
s := &service{
|
||||
spaceSyncStatus: updater,
|
||||
}
|
||||
|
||||
// when
|
||||
updater.EXPECT().SendUpdate(domain.MakeSyncStatus("spaceId", domain.Syncing, domain.Null, domain.Files)).Return()
|
||||
s.sendSpaceStatusUpdate(filesyncstatus.Queued, "spaceId", 0)
|
||||
})
|
||||
t.Run("file syncing", func(t *testing.T) {
|
||||
// given
|
||||
updater := mock_spacesyncstatus.NewMockUpdater(t)
|
||||
s := &service{
|
||||
spaceSyncStatus: updater,
|
||||
}
|
||||
|
||||
// when
|
||||
updater.EXPECT().SendUpdate(domain.MakeSyncStatus("spaceId", domain.Syncing, domain.Null, domain.Files)).Return()
|
||||
s.sendSpaceStatusUpdate(filesyncstatus.Syncing, "spaceId", 0)
|
||||
})
|
||||
t.Run("file unknown status", func(t *testing.T) {
|
||||
// given
|
||||
updater := mock_spacesyncstatus.NewMockUpdater(t)
|
||||
s := &service{
|
||||
spaceSyncStatus: updater,
|
||||
}
|
||||
|
||||
// when
|
||||
updater.EXPECT().SendUpdate(domain.MakeSyncStatus("spaceId", domain.Error, domain.NetworkError, domain.Files)).Return()
|
||||
s.sendSpaceStatusUpdate(filesyncstatus.Unknown, "spaceId", 0)
|
||||
})
|
||||
|
||||
}
|
208
core/syncstatus/nodestatus/mock_nodestatus/mock_NodeStatus.go
Normal file
208
core/syncstatus/nodestatus/mock_nodestatus/mock_NodeStatus.go
Normal file
|
@ -0,0 +1,208 @@
|
|||
// Code generated by mockery. DO NOT EDIT.
|
||||
|
||||
package mock_nodestatus
|
||||
|
||||
import (
|
||||
app "github.com/anyproto/any-sync/app"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
nodestatus "github.com/anyproto/anytype-heart/core/syncstatus/nodestatus"
|
||||
)
|
||||
|
||||
// MockNodeStatus is an autogenerated mock type for the NodeStatus type
|
||||
type MockNodeStatus struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockNodeStatus_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockNodeStatus) EXPECT() *MockNodeStatus_Expecter {
|
||||
return &MockNodeStatus_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// GetNodeStatus provides a mock function with given fields: spaceId
|
||||
func (_m *MockNodeStatus) GetNodeStatus(spaceId string) nodestatus.ConnectionStatus {
|
||||
ret := _m.Called(spaceId)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetNodeStatus")
|
||||
}
|
||||
|
||||
var r0 nodestatus.ConnectionStatus
|
||||
if rf, ok := ret.Get(0).(func(string) nodestatus.ConnectionStatus); ok {
|
||||
r0 = rf(spaceId)
|
||||
} else {
|
||||
r0 = ret.Get(0).(nodestatus.ConnectionStatus)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockNodeStatus_GetNodeStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNodeStatus'
|
||||
type MockNodeStatus_GetNodeStatus_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetNodeStatus is a helper method to define mock.On call
|
||||
// - spaceId string
|
||||
func (_e *MockNodeStatus_Expecter) GetNodeStatus(spaceId interface{}) *MockNodeStatus_GetNodeStatus_Call {
|
||||
return &MockNodeStatus_GetNodeStatus_Call{Call: _e.mock.On("GetNodeStatus", spaceId)}
|
||||
}
|
||||
|
||||
func (_c *MockNodeStatus_GetNodeStatus_Call) Run(run func(spaceId string)) *MockNodeStatus_GetNodeStatus_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNodeStatus_GetNodeStatus_Call) Return(_a0 nodestatus.ConnectionStatus) *MockNodeStatus_GetNodeStatus_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNodeStatus_GetNodeStatus_Call) RunAndReturn(run func(string) nodestatus.ConnectionStatus) *MockNodeStatus_GetNodeStatus_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Init provides a mock function with given fields: a
|
||||
func (_m *MockNodeStatus) Init(a *app.App) error {
|
||||
ret := _m.Called(a)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Init")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*app.App) error); ok {
|
||||
r0 = rf(a)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockNodeStatus_Init_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Init'
|
||||
type MockNodeStatus_Init_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Init is a helper method to define mock.On call
|
||||
// - a *app.App
|
||||
func (_e *MockNodeStatus_Expecter) Init(a interface{}) *MockNodeStatus_Init_Call {
|
||||
return &MockNodeStatus_Init_Call{Call: _e.mock.On("Init", a)}
|
||||
}
|
||||
|
||||
func (_c *MockNodeStatus_Init_Call) Run(run func(a *app.App)) *MockNodeStatus_Init_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*app.App))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNodeStatus_Init_Call) Return(err error) *MockNodeStatus_Init_Call {
|
||||
_c.Call.Return(err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNodeStatus_Init_Call) RunAndReturn(run func(*app.App) error) *MockNodeStatus_Init_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Name provides a mock function with given fields:
|
||||
func (_m *MockNodeStatus) Name() string {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Name")
|
||||
}
|
||||
|
||||
var r0 string
|
||||
if rf, ok := ret.Get(0).(func() string); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockNodeStatus_Name_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Name'
|
||||
type MockNodeStatus_Name_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Name is a helper method to define mock.On call
|
||||
func (_e *MockNodeStatus_Expecter) Name() *MockNodeStatus_Name_Call {
|
||||
return &MockNodeStatus_Name_Call{Call: _e.mock.On("Name")}
|
||||
}
|
||||
|
||||
func (_c *MockNodeStatus_Name_Call) Run(run func()) *MockNodeStatus_Name_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNodeStatus_Name_Call) Return(name string) *MockNodeStatus_Name_Call {
|
||||
_c.Call.Return(name)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNodeStatus_Name_Call) RunAndReturn(run func() string) *MockNodeStatus_Name_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNodesStatus provides a mock function with given fields: spaceId, status
|
||||
func (_m *MockNodeStatus) SetNodesStatus(spaceId string, status nodestatus.ConnectionStatus) {
|
||||
_m.Called(spaceId, status)
|
||||
}
|
||||
|
||||
// MockNodeStatus_SetNodesStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetNodesStatus'
|
||||
type MockNodeStatus_SetNodesStatus_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// SetNodesStatus is a helper method to define mock.On call
|
||||
// - spaceId string
|
||||
// - status nodestatus.ConnectionStatus
|
||||
func (_e *MockNodeStatus_Expecter) SetNodesStatus(spaceId interface{}, status interface{}) *MockNodeStatus_SetNodesStatus_Call {
|
||||
return &MockNodeStatus_SetNodesStatus_Call{Call: _e.mock.On("SetNodesStatus", spaceId, status)}
|
||||
}
|
||||
|
||||
func (_c *MockNodeStatus_SetNodesStatus_Call) Run(run func(spaceId string, status nodestatus.ConnectionStatus)) *MockNodeStatus_SetNodesStatus_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(string), args[1].(nodestatus.ConnectionStatus))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNodeStatus_SetNodesStatus_Call) Return() *MockNodeStatus_SetNodesStatus_Call {
|
||||
_c.Call.Return()
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNodeStatus_SetNodesStatus_Call) RunAndReturn(run func(string, nodestatus.ConnectionStatus)) *MockNodeStatus_SetNodesStatus_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockNodeStatus creates a new instance of MockNodeStatus. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockNodeStatus(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockNodeStatus {
|
||||
mock := &MockNodeStatus{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
|
@ -1,19 +1,16 @@
|
|||
package nodestatus
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/anyproto/any-sync/app"
|
||||
"github.com/anyproto/any-sync/nodeconf"
|
||||
)
|
||||
|
||||
const CName = "core.syncstatus.nodestatus"
|
||||
|
||||
type nodeStatus struct {
|
||||
sync.Mutex
|
||||
configuration nodeconf.NodeConf
|
||||
nodeStatus map[string]ConnectionStatus
|
||||
nodeStatus map[string]ConnectionStatus
|
||||
}
|
||||
|
||||
type ConnectionStatus int
|
||||
|
@ -26,16 +23,15 @@ const (
|
|||
|
||||
type NodeStatus interface {
|
||||
app.Component
|
||||
SetNodesStatus(spaceId string, senderId string, status ConnectionStatus)
|
||||
SetNodesStatus(spaceId string, status ConnectionStatus)
|
||||
GetNodeStatus(spaceId string) ConnectionStatus
|
||||
}
|
||||
|
||||
func NewNodeStatus() NodeStatus {
|
||||
return &nodeStatus{nodeStatus: make(map[string]ConnectionStatus, 0)}
|
||||
return &nodeStatus{nodeStatus: make(map[string]ConnectionStatus)}
|
||||
}
|
||||
|
||||
func (n *nodeStatus) Init(a *app.App) (err error) {
|
||||
n.configuration = app.MustComponent[nodeconf.NodeConf](a)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -49,17 +45,8 @@ func (n *nodeStatus) GetNodeStatus(spaceId string) ConnectionStatus {
|
|||
return n.nodeStatus[spaceId]
|
||||
}
|
||||
|
||||
func (n *nodeStatus) SetNodesStatus(spaceId string, senderId string, status ConnectionStatus) {
|
||||
if !n.isSenderResponsible(senderId, spaceId) {
|
||||
return
|
||||
}
|
||||
|
||||
func (n *nodeStatus) SetNodesStatus(spaceId string, status ConnectionStatus) {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
n.nodeStatus[spaceId] = status
|
||||
}
|
||||
|
||||
func (n *nodeStatus) isSenderResponsible(senderId string, spaceId string) bool {
|
||||
return slices.Contains(n.configuration.NodeIds(spaceId), senderId)
|
||||
}
|
||||
|
|
|
@ -3,79 +3,13 @@ package nodestatus
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/anyproto/any-sync/app"
|
||||
"github.com/anyproto/any-sync/nodeconf/mock_nodeconf"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.uber.org/mock/gomock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type fixture struct {
|
||||
*nodeStatus
|
||||
nodeConf *mock_nodeconf.MockService
|
||||
}
|
||||
|
||||
func TestNodeStatus_SetNodesStatus(t *testing.T) {
|
||||
t.Run("peer is responsible", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixture(t)
|
||||
f.nodeConf.EXPECT().NodeIds("spaceId").Return([]string{"peerId"})
|
||||
|
||||
// when
|
||||
f.SetNodesStatus("spaceId", "peerId", Online)
|
||||
|
||||
// then
|
||||
assert.Equal(t, Online, f.nodeStatus.nodeStatus["spaceId"])
|
||||
})
|
||||
t.Run("peer is not responsible", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixture(t)
|
||||
f.nodeConf.EXPECT().NodeIds("spaceId").Return([]string{"peerId2"})
|
||||
|
||||
// when
|
||||
f.SetNodesStatus("spaceId", "peerId", ConnectionError)
|
||||
|
||||
// then
|
||||
assert.NotEqual(t, ConnectionError, f.nodeStatus.nodeStatus["spaceId"])
|
||||
})
|
||||
}
|
||||
|
||||
func TestNodeStatus_GetNodeStatus(t *testing.T) {
|
||||
t.Run("get default status", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixture(t)
|
||||
|
||||
// when
|
||||
status := f.GetNodeStatus("")
|
||||
|
||||
// then
|
||||
assert.Equal(t, Online, status)
|
||||
})
|
||||
t.Run("get updated status", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixture(t)
|
||||
f.nodeConf.EXPECT().NodeIds("spaceId").Return([]string{"peerId"})
|
||||
|
||||
// when
|
||||
f.SetNodesStatus("spaceId", "peerId", ConnectionError)
|
||||
status := f.GetNodeStatus("spaceId")
|
||||
|
||||
// then
|
||||
assert.Equal(t, ConnectionError, status)
|
||||
})
|
||||
}
|
||||
|
||||
func newFixture(t *testing.T) *fixture {
|
||||
ctrl := gomock.NewController(t)
|
||||
nodeConf := mock_nodeconf.NewMockService(ctrl)
|
||||
nodeStatus := &nodeStatus{
|
||||
nodeStatus: map[string]ConnectionStatus{},
|
||||
}
|
||||
a := &app.App{}
|
||||
a.Register(nodeConf)
|
||||
err := nodeStatus.Init(a)
|
||||
assert.Nil(t, err)
|
||||
return &fixture{
|
||||
nodeStatus: nodeStatus,
|
||||
nodeConf: nodeConf,
|
||||
}
|
||||
func TestNodeStatus(t *testing.T) {
|
||||
st := NewNodeStatus()
|
||||
st.SetNodesStatus("spaceId", Online)
|
||||
require.Equal(t, Online, st.GetNodeStatus("spaceId"))
|
||||
st.SetNodesStatus("spaceId", ConnectionError)
|
||||
require.Equal(t, ConnectionError, st.GetNodeStatus("spaceId"))
|
||||
}
|
||||
|
|
|
@ -112,9 +112,9 @@ func (_c *MockUpdater_Name_Call) RunAndReturn(run func() string) *MockUpdater_Na
|
|||
return _c
|
||||
}
|
||||
|
||||
// UpdateDetails provides a mock function with given fields: objectId, status, syncError, spaceId
|
||||
func (_m *MockUpdater) UpdateDetails(objectId []string, status domain.ObjectSyncStatus, syncError domain.SyncError, spaceId string) {
|
||||
_m.Called(objectId, status, syncError, spaceId)
|
||||
// UpdateDetails provides a mock function with given fields: objectId, status, spaceId
|
||||
func (_m *MockUpdater) UpdateDetails(objectId string, status domain.ObjectSyncStatus, spaceId string) {
|
||||
_m.Called(objectId, status, spaceId)
|
||||
}
|
||||
|
||||
// MockUpdater_UpdateDetails_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateDetails'
|
||||
|
@ -123,17 +123,16 @@ type MockUpdater_UpdateDetails_Call struct {
|
|||
}
|
||||
|
||||
// UpdateDetails is a helper method to define mock.On call
|
||||
// - objectId []string
|
||||
// - objectId string
|
||||
// - status domain.ObjectSyncStatus
|
||||
// - syncError domain.SyncError
|
||||
// - spaceId string
|
||||
func (_e *MockUpdater_Expecter) UpdateDetails(objectId interface{}, status interface{}, syncError interface{}, spaceId interface{}) *MockUpdater_UpdateDetails_Call {
|
||||
return &MockUpdater_UpdateDetails_Call{Call: _e.mock.On("UpdateDetails", objectId, status, syncError, spaceId)}
|
||||
func (_e *MockUpdater_Expecter) UpdateDetails(objectId interface{}, status interface{}, spaceId interface{}) *MockUpdater_UpdateDetails_Call {
|
||||
return &MockUpdater_UpdateDetails_Call{Call: _e.mock.On("UpdateDetails", objectId, status, spaceId)}
|
||||
}
|
||||
|
||||
func (_c *MockUpdater_UpdateDetails_Call) Run(run func(objectId []string, status domain.ObjectSyncStatus, syncError domain.SyncError, spaceId string)) *MockUpdater_UpdateDetails_Call {
|
||||
func (_c *MockUpdater_UpdateDetails_Call) Run(run func(objectId string, status domain.ObjectSyncStatus, spaceId string)) *MockUpdater_UpdateDetails_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].([]string), args[1].(domain.ObjectSyncStatus), args[2].(domain.SyncError), args[3].(string))
|
||||
run(args[0].(string), args[1].(domain.ObjectSyncStatus), args[2].(string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
@ -143,7 +142,7 @@ func (_c *MockUpdater_UpdateDetails_Call) Return() *MockUpdater_UpdateDetails_Ca
|
|||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockUpdater_UpdateDetails_Call) RunAndReturn(run func([]string, domain.ObjectSyncStatus, domain.SyncError, string)) *MockUpdater_UpdateDetails_Call {
|
||||
func (_c *MockUpdater_UpdateDetails_Call) RunAndReturn(run func(string, domain.ObjectSyncStatus, string)) *MockUpdater_UpdateDetails_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
|
|
@ -2,29 +2,28 @@ package objectsyncstatus
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/anyproto/any-sync/app"
|
||||
"github.com/anyproto/any-sync/app/logger"
|
||||
"github.com/anyproto/any-sync/commonspace/object/tree/treestorage"
|
||||
"github.com/anyproto/any-sync/commonspace/spacestate"
|
||||
"github.com/anyproto/any-sync/commonspace/syncstatus"
|
||||
|
||||
"github.com/anyproto/any-sync/commonspace/object/tree/treestorage"
|
||||
"github.com/anyproto/any-sync/commonspace/spacestorage"
|
||||
"github.com/anyproto/any-sync/nodeconf"
|
||||
"github.com/anyproto/any-sync/util/periodicsync"
|
||||
"github.com/anyproto/any-sync/util/slice"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/anytype/config"
|
||||
"github.com/anyproto/anytype-heart/core/domain"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/nodestatus"
|
||||
"github.com/anyproto/anytype-heart/util/slice"
|
||||
)
|
||||
|
||||
const (
|
||||
syncUpdateInterval = 5
|
||||
syncUpdateInterval = 3
|
||||
syncTimeout = time.Second
|
||||
)
|
||||
|
||||
|
@ -43,9 +42,16 @@ const (
|
|||
StatusNotSynced
|
||||
)
|
||||
|
||||
type treeHeadsEntry struct {
|
||||
heads []string
|
||||
syncStatus SyncStatus
|
||||
}
|
||||
|
||||
type StatusUpdater interface {
|
||||
HeadsChange(treeId string, heads []string)
|
||||
HeadsReceive(senderId, treeId string, heads []string)
|
||||
HeadsApply(senderId, treeId string, heads []string, allAdded bool)
|
||||
ObjectReceive(senderId, treeId string, heads []string)
|
||||
RemoveAllExcept(senderId string, differentRemoteIds []string)
|
||||
}
|
||||
|
||||
|
@ -61,12 +67,6 @@ type StatusService interface {
|
|||
StatusWatcher
|
||||
}
|
||||
|
||||
type treeHeadsEntry struct {
|
||||
heads []string
|
||||
stateCounter uint64
|
||||
syncStatus SyncStatus
|
||||
}
|
||||
|
||||
type treeStatus struct {
|
||||
treeId string
|
||||
status SyncStatus
|
||||
|
@ -74,22 +74,20 @@ type treeStatus struct {
|
|||
|
||||
type Updater interface {
|
||||
app.Component
|
||||
UpdateDetails(objectId []string, status domain.ObjectSyncStatus, syncError domain.SyncError, spaceId string)
|
||||
UpdateDetails(objectId string, status domain.ObjectSyncStatus, spaceId string)
|
||||
}
|
||||
|
||||
type syncStatusService struct {
|
||||
sync.Mutex
|
||||
configuration nodeconf.NodeConf
|
||||
periodicSync periodicsync.PeriodicSync
|
||||
updateReceiver UpdateReceiver
|
||||
storage spacestorage.SpaceStorage
|
||||
|
||||
spaceId string
|
||||
treeHeads map[string]treeHeadsEntry
|
||||
watchers map[string]struct{}
|
||||
stateCounter uint64
|
||||
|
||||
treeStatusBuf []treeStatus
|
||||
spaceId string
|
||||
synced []string
|
||||
tempSynced map[string]struct{}
|
||||
treeHeads map[string]treeHeadsEntry
|
||||
watchers map[string]struct{}
|
||||
|
||||
updateIntervalSecs int
|
||||
updateTimeout time.Duration
|
||||
|
@ -102,8 +100,9 @@ type syncStatusService struct {
|
|||
|
||||
func NewSyncStatusService() StatusService {
|
||||
return &syncStatusService{
|
||||
treeHeads: map[string]treeHeadsEntry{},
|
||||
watchers: map[string]struct{}{},
|
||||
tempSynced: map[string]struct{}{},
|
||||
treeHeads: map[string]treeHeadsEntry{},
|
||||
watchers: map[string]struct{}{},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -112,7 +111,6 @@ func (s *syncStatusService) Init(a *app.App) (err error) {
|
|||
s.updateIntervalSecs = syncUpdateInterval
|
||||
s.updateTimeout = syncTimeout
|
||||
s.spaceId = sharedState.SpaceId
|
||||
s.configuration = app.MustComponent[nodeconf.NodeConf](a)
|
||||
s.storage = app.MustComponent[spacestorage.SpaceStorage](a)
|
||||
s.periodicSync = periodicsync.NewPeriodicSync(
|
||||
s.updateIntervalSecs,
|
||||
|
@ -143,85 +141,101 @@ func (s *syncStatusService) Run(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (s *syncStatusService) HeadsChange(treeId string, heads []string) {
|
||||
s.Lock()
|
||||
s.addTreeHead(treeId, heads, StatusNotSynced)
|
||||
s.Unlock()
|
||||
s.updateDetails(treeId, domain.ObjectSyncStatusSyncing)
|
||||
}
|
||||
|
||||
func (s *syncStatusService) ObjectReceive(senderId, treeId string, heads []string) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
var headsCopy []string
|
||||
headsCopy = append(headsCopy, heads...)
|
||||
|
||||
s.treeHeads[treeId] = treeHeadsEntry{
|
||||
heads: headsCopy,
|
||||
stateCounter: s.stateCounter,
|
||||
syncStatus: StatusNotSynced,
|
||||
if len(heads) == 0 || !s.isSenderResponsible(senderId) {
|
||||
s.tempSynced[treeId] = struct{}{}
|
||||
return
|
||||
}
|
||||
s.synced = append(s.synced, treeId)
|
||||
}
|
||||
|
||||
func (s *syncStatusService) HeadsApply(senderId, treeId string, heads []string, allAdded bool) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if len(heads) == 0 || !s.isSenderResponsible(senderId) {
|
||||
if allAdded {
|
||||
s.tempSynced[treeId] = struct{}{}
|
||||
}
|
||||
return
|
||||
}
|
||||
if !allAdded {
|
||||
return
|
||||
}
|
||||
s.synced = append(s.synced, treeId)
|
||||
if curTreeHeads, ok := s.treeHeads[treeId]; ok {
|
||||
// checking if we received the head that we are interested in
|
||||
for _, head := range heads {
|
||||
if idx, found := slices.BinarySearch(curTreeHeads.heads, head); found {
|
||||
curTreeHeads.heads = slice.RemoveIndex(curTreeHeads.heads, idx)
|
||||
}
|
||||
}
|
||||
if len(curTreeHeads.heads) == 0 {
|
||||
curTreeHeads.syncStatus = StatusSynced
|
||||
}
|
||||
s.treeHeads[treeId] = curTreeHeads
|
||||
}
|
||||
s.stateCounter++
|
||||
s.updateDetails(treeId, domain.ObjectSyncing)
|
||||
}
|
||||
|
||||
func (s *syncStatusService) update(ctx context.Context) (err error) {
|
||||
s.treeStatusBuf = s.treeStatusBuf[:0]
|
||||
|
||||
s.Lock()
|
||||
var (
|
||||
updateDetailsStatuses = make([]treeStatus, 0, len(s.synced))
|
||||
updateThreadStatuses = make([]treeStatus, 0, len(s.watchers))
|
||||
)
|
||||
if s.updateReceiver == nil {
|
||||
s.Unlock()
|
||||
return
|
||||
}
|
||||
for _, treeId := range s.synced {
|
||||
updateDetailsStatuses = append(updateDetailsStatuses, treeStatus{treeId, StatusSynced})
|
||||
}
|
||||
for treeId := range s.watchers {
|
||||
// that means that we haven't yet got the status update
|
||||
treeHeads, exists := s.treeHeads[treeId]
|
||||
if !exists {
|
||||
err = fmt.Errorf("treeHeads should always exist for watchers")
|
||||
s.Unlock()
|
||||
return
|
||||
continue
|
||||
}
|
||||
s.treeStatusBuf = append(s.treeStatusBuf, treeStatus{treeId, treeHeads.syncStatus})
|
||||
updateThreadStatuses = append(updateThreadStatuses, treeStatus{treeId, treeHeads.syncStatus})
|
||||
}
|
||||
s.synced = s.synced[:0]
|
||||
s.Unlock()
|
||||
s.updateReceiver.UpdateNodeStatus()
|
||||
for _, entry := range s.treeStatusBuf {
|
||||
for _, entry := range updateDetailsStatuses {
|
||||
s.updateDetails(entry.treeId, mapStatus(entry.status))
|
||||
}
|
||||
for _, entry := range updateThreadStatuses {
|
||||
err = s.updateReceiver.UpdateTree(ctx, entry.treeId, entry.status)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
s.updateDetails(entry.treeId, mapStatus(entry.status))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func mapStatus(status SyncStatus) domain.ObjectSyncStatus {
|
||||
if status == StatusSynced {
|
||||
return domain.ObjectSynced
|
||||
return domain.ObjectSyncStatusSynced
|
||||
}
|
||||
return domain.ObjectSyncing
|
||||
return domain.ObjectSyncStatusSyncing
|
||||
}
|
||||
|
||||
func (s *syncStatusService) HeadsReceive(senderId, treeId string, heads []string) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
}
|
||||
|
||||
curTreeHeads, ok := s.treeHeads[treeId]
|
||||
if !ok || curTreeHeads.syncStatus == StatusSynced {
|
||||
return
|
||||
func (s *syncStatusService) addTreeHead(treeId string, heads []string, status SyncStatus) {
|
||||
headsCopy := slice.Copy(heads)
|
||||
slices.Sort(headsCopy)
|
||||
s.treeHeads[treeId] = treeHeadsEntry{
|
||||
heads: headsCopy,
|
||||
syncStatus: status,
|
||||
}
|
||||
|
||||
// checking if other node is responsible
|
||||
if len(heads) == 0 || !s.isSenderResponsible(senderId) {
|
||||
return
|
||||
}
|
||||
|
||||
// checking if we received the head that we are interested in
|
||||
for _, head := range heads {
|
||||
if idx, found := slices.BinarySearch(curTreeHeads.heads, head); found {
|
||||
curTreeHeads.heads[idx] = ""
|
||||
}
|
||||
}
|
||||
curTreeHeads.heads = slice.DiscardFromSlice(curTreeHeads.heads, func(h string) bool {
|
||||
return h == ""
|
||||
})
|
||||
if len(curTreeHeads.heads) == 0 {
|
||||
curTreeHeads.syncStatus = StatusSynced
|
||||
}
|
||||
s.treeHeads[treeId] = curTreeHeads
|
||||
}
|
||||
|
||||
func (s *syncStatusService) Watch(treeId string) (err error) {
|
||||
|
@ -241,13 +255,7 @@ func (s *syncStatusService) Watch(treeId string) (err error) {
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
slices.Sort(heads)
|
||||
s.stateCounter++
|
||||
s.treeHeads[treeId] = treeHeadsEntry{
|
||||
heads: heads,
|
||||
stateCounter: s.stateCounter,
|
||||
syncStatus: StatusUnknown,
|
||||
}
|
||||
s.addTreeHead(treeId, heads, StatusUnknown)
|
||||
}
|
||||
|
||||
s.watchers[treeId] = struct{}{}
|
||||
|
@ -271,14 +279,17 @@ func (s *syncStatusService) RemoveAllExcept(senderId string, differentRemoteIds
|
|||
|
||||
slices.Sort(differentRemoteIds)
|
||||
for treeId, entry := range s.treeHeads {
|
||||
// if the current update is outdated
|
||||
if entry.stateCounter > s.stateCounter {
|
||||
continue
|
||||
}
|
||||
// if we didn't find our treeId in heads ids which are different from us and node
|
||||
if _, found := slices.BinarySearch(differentRemoteIds, treeId); !found {
|
||||
entry.syncStatus = StatusSynced
|
||||
s.treeHeads[treeId] = entry
|
||||
if entry.syncStatus != StatusSynced {
|
||||
entry.syncStatus = StatusSynced
|
||||
s.treeHeads[treeId] = entry
|
||||
}
|
||||
}
|
||||
}
|
||||
for treeId := range s.tempSynced {
|
||||
delete(s.tempSynced, treeId)
|
||||
if _, found := slices.BinarySearch(differentRemoteIds, treeId); !found {
|
||||
s.synced = append(s.synced, treeId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -289,18 +300,9 @@ func (s *syncStatusService) Close(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (s *syncStatusService) isSenderResponsible(senderId string) bool {
|
||||
return slices.Contains(s.configuration.NodeIds(s.spaceId), senderId)
|
||||
return slices.Contains(s.nodeConfService.NodeIds(s.spaceId), senderId)
|
||||
}
|
||||
|
||||
func (s *syncStatusService) updateDetails(treeId string, status domain.ObjectSyncStatus) {
|
||||
var syncErr domain.SyncError
|
||||
if s.nodeStatus.GetNodeStatus(s.spaceId) != nodestatus.Online || s.config.IsLocalOnlyMode() {
|
||||
syncErr = domain.NetworkError
|
||||
status = domain.ObjectError
|
||||
}
|
||||
if s.nodeConfService.NetworkCompatibilityStatus() == nodeconf.NetworkCompatibilityStatusIncompatible {
|
||||
syncErr = domain.IncompatibleVersion
|
||||
status = domain.ObjectError
|
||||
}
|
||||
s.syncDetailsUpdater.UpdateDetails([]string{treeId}, status, syncErr, s.spaceId)
|
||||
s.syncDetailsUpdater.UpdateDetails(treeId, status, s.spaceId)
|
||||
}
|
||||
|
|
|
@ -5,257 +5,141 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/anyproto/any-sync/app"
|
||||
"github.com/anyproto/any-sync/commonspace/object/accountdata"
|
||||
"github.com/anyproto/any-sync/commonspace/object/acl/list"
|
||||
"github.com/anyproto/any-sync/commonspace/object/tree/objecttree"
|
||||
"github.com/anyproto/any-sync/commonspace/object/tree/treechangeproto"
|
||||
"github.com/anyproto/any-sync/commonspace/object/tree/treestorage"
|
||||
"github.com/anyproto/any-sync/commonspace/spacestate"
|
||||
"github.com/anyproto/any-sync/commonspace/spacestorage/mock_spacestorage"
|
||||
"github.com/anyproto/any-sync/nodeconf"
|
||||
"github.com/anyproto/any-sync/nodeconf/mock_nodeconf"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/mock/gomock"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/anytype/config"
|
||||
"github.com/anyproto/anytype-heart/core/domain"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/nodestatus"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/objectsyncstatus/mock_objectsyncstatus"
|
||||
"github.com/anyproto/anytype-heart/pb"
|
||||
"github.com/anyproto/anytype-heart/tests/testutil"
|
||||
)
|
||||
|
||||
func Test_HeadsChange(t *testing.T) {
|
||||
func Test_UseCases(t *testing.T) {
|
||||
t.Run("HeadsChange: new object", func(t *testing.T) {
|
||||
// given
|
||||
s := newFixture(t)
|
||||
s.service.EXPECT().NetworkCompatibilityStatus().Return(nodeconf.NetworkCompatibilityStatusOk)
|
||||
s.detailsUpdater.EXPECT().UpdateDetails([]string{"id"}, domain.ObjectSyncing, domain.Null, "spaceId")
|
||||
s := newFixture(t, "spaceId")
|
||||
s.syncDetailsUpdater.EXPECT().UpdateDetails("id", domain.ObjectSyncStatusSyncing, "spaceId")
|
||||
|
||||
// when
|
||||
s.HeadsChange("id", []string{"head1", "head2"})
|
||||
|
||||
// then
|
||||
assert.NotNil(t, s.treeHeads["id"])
|
||||
assert.Equal(t, []string{"head1", "head2"}, s.treeHeads["id"].heads)
|
||||
})
|
||||
t.Run("local only", func(t *testing.T) {
|
||||
// given
|
||||
s := newFixture(t)
|
||||
s.config.NetworkMode = pb.RpcAccount_LocalOnly
|
||||
s.service.EXPECT().NetworkCompatibilityStatus().Return(nodeconf.NetworkCompatibilityStatusOk)
|
||||
s.detailsUpdater.EXPECT().UpdateDetails([]string{"id"}, domain.ObjectError, domain.NetworkError, "spaceId")
|
||||
t.Run("HeadsChange then HeadsApply: responsible", func(t *testing.T) {
|
||||
s := newFixture(t, "spaceId")
|
||||
s.syncDetailsUpdater.EXPECT().UpdateDetails("id", domain.ObjectSyncStatusSyncing, "spaceId")
|
||||
|
||||
// when
|
||||
s.HeadsChange("id", []string{"head1", "head2"})
|
||||
|
||||
// then
|
||||
assert.NotNil(t, s.treeHeads["id"])
|
||||
assert.Equal(t, []string{"head1", "head2"}, s.treeHeads["id"].heads)
|
||||
})
|
||||
t.Run("HeadsChange: update existing object", func(t *testing.T) {
|
||||
// given
|
||||
s := newFixture(t)
|
||||
s.config.NetworkMode = pb.RpcAccount_DefaultConfig
|
||||
s.detailsUpdater.EXPECT().UpdateDetails([]string{"id"}, domain.ObjectSyncing, domain.Null, "spaceId")
|
||||
s.service.EXPECT().NetworkCompatibilityStatus().Return(nodeconf.NetworkCompatibilityStatusOk).Times(2)
|
||||
|
||||
// when
|
||||
s.HeadsChange("id", []string{"head1", "head2"})
|
||||
s.HeadsChange("id", []string{"head3"})
|
||||
s.nodeConfService.EXPECT().NodeIds("spaceId").Return([]string{"peerId"})
|
||||
|
||||
// then
|
||||
assert.NotNil(t, s.treeHeads["id"])
|
||||
assert.Equal(t, []string{"head3"}, s.treeHeads["id"].heads)
|
||||
})
|
||||
t.Run("HeadsChange: node offline", func(t *testing.T) {
|
||||
// given
|
||||
s := newFixture(t)
|
||||
s.service.EXPECT().NodeIds("spaceId").Return([]string{"peerId"})
|
||||
s.nodeStatus.SetNodesStatus("spaceId", "peerId", nodestatus.ConnectionError)
|
||||
s.detailsUpdater.EXPECT().UpdateDetails([]string{"id"}, domain.ObjectError, domain.NetworkError, "spaceId")
|
||||
s.service.EXPECT().NetworkCompatibilityStatus().Return(nodeconf.NetworkCompatibilityStatusOk).Times(2)
|
||||
s.HeadsApply("peerId", "id", []string{"head1", "head2"}, true)
|
||||
|
||||
// when
|
||||
s.HeadsChange("id", []string{"head1", "head2"})
|
||||
s.HeadsChange("id", []string{"head3"})
|
||||
|
||||
// then
|
||||
assert.NotNil(t, s.treeHeads["id"])
|
||||
assert.Equal(t, []string{"head3"}, s.treeHeads["id"].heads)
|
||||
})
|
||||
t.Run("HeadsChange: network incompatible", func(t *testing.T) {
|
||||
// given
|
||||
s := newFixture(t)
|
||||
s.detailsUpdater.EXPECT().UpdateDetails([]string{"id"}, domain.ObjectError, domain.IncompatibleVersion, "spaceId")
|
||||
s.service.EXPECT().NetworkCompatibilityStatus().Return(nodeconf.NetworkCompatibilityStatusIncompatible).Times(1)
|
||||
|
||||
// when
|
||||
s.HeadsChange("id", []string{"head3"})
|
||||
|
||||
// then
|
||||
assert.NotNil(t, s.treeHeads["id"])
|
||||
assert.Equal(t, []string{"head3"}, s.treeHeads["id"].heads)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncStatusService_HeadsReceive(t *testing.T) {
|
||||
t.Run("HeadsReceive: heads not changed ", func(t *testing.T) {
|
||||
// given
|
||||
s := newFixture(t)
|
||||
|
||||
// when
|
||||
s.HeadsReceive("peerId", "id", []string{"head1", "head2"})
|
||||
|
||||
// then
|
||||
_, ok := s.treeHeads["id"]
|
||||
assert.False(t, ok)
|
||||
})
|
||||
t.Run("HeadsReceive: object synced", func(t *testing.T) {
|
||||
// given
|
||||
s := newFixture(t)
|
||||
|
||||
// when
|
||||
s.treeHeads["id"] = treeHeadsEntry{
|
||||
syncStatus: StatusSynced,
|
||||
}
|
||||
s.HeadsReceive("peerId", "id", []string{"head1", "head2"})
|
||||
|
||||
// then
|
||||
assert.NotNil(t, s.treeHeads["id"])
|
||||
assert.Equal(t, StatusSynced, s.treeHeads["id"].syncStatus)
|
||||
assert.Equal(t, s.synced, []string{"id"})
|
||||
})
|
||||
t.Run("HeadsReceive: sender in not responsible", func(t *testing.T) {
|
||||
// given
|
||||
s := newFixture(t)
|
||||
s.service.EXPECT().NodeIds(s.spaceId).Return([]string{"peerId2"})
|
||||
s.detailsUpdater.EXPECT().UpdateDetails([]string{"id"}, domain.ObjectSyncing, domain.Null, "spaceId")
|
||||
s.service.EXPECT().NetworkCompatibilityStatus().Return(nodeconf.NetworkCompatibilityStatusOk)
|
||||
t.Run("HeadsChange then HeadsApply: not responsible", func(t *testing.T) {
|
||||
s := newFixture(t, "spaceId")
|
||||
s.syncDetailsUpdater.EXPECT().UpdateDetails("id", domain.ObjectSyncStatusSyncing, "spaceId")
|
||||
|
||||
// when
|
||||
s.HeadsChange("id", []string{"head1"})
|
||||
s.HeadsReceive("peerId", "id", []string{"head2"})
|
||||
s.HeadsChange("id", []string{"head1", "head2"})
|
||||
|
||||
assert.NotNil(t, s.treeHeads["id"])
|
||||
assert.Equal(t, []string{"head1", "head2"}, s.treeHeads["id"].heads)
|
||||
|
||||
s.nodeConfService.EXPECT().NodeIds("spaceId").Return([]string{"peerId1"})
|
||||
|
||||
s.HeadsApply("peerId", "id", []string{"head1", "head2"}, true)
|
||||
|
||||
// then
|
||||
assert.NotNil(t, s.treeHeads["id"])
|
||||
assert.Equal(t, StatusNotSynced, s.treeHeads["id"].syncStatus)
|
||||
assert.Contains(t, s.tempSynced, "id")
|
||||
assert.Nil(t, s.synced)
|
||||
})
|
||||
t.Run("HeadsReceive: object is synced", func(t *testing.T) {
|
||||
// given
|
||||
s := newFixture(t)
|
||||
s.service.EXPECT().NodeIds(s.spaceId).Return([]string{"peerId"})
|
||||
s.service.EXPECT().NetworkCompatibilityStatus().Return(nodeconf.NetworkCompatibilityStatusOk)
|
||||
t.Run("ObjectReceive: responsible", func(t *testing.T) {
|
||||
s := newFixture(t, "spaceId")
|
||||
s.nodeConfService.EXPECT().NodeIds("spaceId").Return([]string{"peerId"})
|
||||
|
||||
// when
|
||||
s.detailsUpdater.EXPECT().UpdateDetails([]string{"id"}, domain.ObjectSyncing, domain.Null, "spaceId")
|
||||
s.HeadsChange("id", []string{"head1"})
|
||||
s.HeadsReceive("peerId", "id", []string{"head1"})
|
||||
s.ObjectReceive("peerId", "id", []string{"head1", "head2"})
|
||||
|
||||
// then
|
||||
assert.NotNil(t, s.treeHeads["id"])
|
||||
assert.Equal(t, StatusSynced, s.treeHeads["id"].syncStatus)
|
||||
assert.Equal(t, s.synced, []string{"id"})
|
||||
})
|
||||
t.Run("ObjectReceive: not responsible, but then sync with responsible", func(t *testing.T) {
|
||||
s := newFixture(t, "spaceId")
|
||||
s.nodeConfService.EXPECT().NodeIds("spaceId").Return([]string{"peerId1"})
|
||||
|
||||
s.ObjectReceive("peerId", "id", []string{"head1", "head2"})
|
||||
|
||||
require.Contains(t, s.tempSynced, "id")
|
||||
|
||||
s.nodeConfService.EXPECT().NodeIds("spaceId").Return([]string{"peerId1"})
|
||||
|
||||
s.RemoveAllExcept("peerId1", []string{})
|
||||
|
||||
assert.Equal(t, s.synced, []string{"id"})
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncStatusService_Watch(t *testing.T) {
|
||||
t.Run("Watch: object exist", func(t *testing.T) {
|
||||
// given
|
||||
s := newFixture(t)
|
||||
func TestSyncStatusService_Watch_Unwatch(t *testing.T) {
|
||||
t.Run("watch", func(t *testing.T) {
|
||||
s := newFixture(t, "spaceId")
|
||||
|
||||
// when
|
||||
s.detailsUpdater.EXPECT().UpdateDetails([]string{"id"}, domain.ObjectSyncing, domain.Null, "spaceId")
|
||||
s.service.EXPECT().NetworkCompatibilityStatus().Return(nodeconf.NetworkCompatibilityStatusOk)
|
||||
s.HeadsChange("id", []string{"head1"})
|
||||
s.spaceStorage.EXPECT().TreeStorage("id").Return(treestorage.NewInMemoryTreeStorage(&treechangeproto.RawTreeChangeWithId{Id: "id"}, []string{"head3", "head2", "head1"}, nil))
|
||||
err := s.Watch("id")
|
||||
|
||||
// then
|
||||
assert.Nil(t, err)
|
||||
_, ok := s.watchers["id"]
|
||||
assert.True(t, ok)
|
||||
assert.Contains(t, s.watchers, "id")
|
||||
assert.Equal(t, []string{"head1", "head2", "head3"}, s.treeHeads["id"].heads, "should be sorted")
|
||||
})
|
||||
t.Run("Watch: object not exist", func(t *testing.T) {
|
||||
// given
|
||||
s := newFixture(t)
|
||||
accountKeys, err := accountdata.NewRandom()
|
||||
assert.Nil(t, err)
|
||||
acl, err := list.NewTestDerivedAcl("spaceId", accountKeys)
|
||||
assert.Nil(t, err)
|
||||
t.Run("unwatch", func(t *testing.T) {
|
||||
s := newFixture(t, "spaceId")
|
||||
|
||||
root, err := objecttree.CreateObjectTreeRoot(objecttree.ObjectTreeCreatePayload{
|
||||
PrivKey: accountKeys.SignKey,
|
||||
ChangeType: "changeType",
|
||||
ChangePayload: nil,
|
||||
SpaceId: "spaceId",
|
||||
IsEncrypted: true,
|
||||
}, acl)
|
||||
storage, err := treestorage.NewInMemoryTreeStorage(root, []string{"head1"}, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
s.storage.EXPECT().TreeStorage("id").Return(storage, nil)
|
||||
|
||||
// when
|
||||
err = s.Watch("id")
|
||||
|
||||
// then
|
||||
assert.Nil(t, err)
|
||||
_, ok := s.watchers["id"]
|
||||
assert.True(t, ok)
|
||||
assert.NotNil(t, s.treeHeads["id"])
|
||||
assert.Equal(t, StatusUnknown, s.treeHeads["id"].syncStatus)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncStatusService_Unwatch(t *testing.T) {
|
||||
t.Run("Unwatch: object exist", func(t *testing.T) {
|
||||
// given
|
||||
s := newFixture(t)
|
||||
|
||||
// when
|
||||
s.detailsUpdater.EXPECT().UpdateDetails([]string{"id"}, domain.ObjectSyncing, domain.Null, "spaceId")
|
||||
s.service.EXPECT().NetworkCompatibilityStatus().Return(nodeconf.NetworkCompatibilityStatusOk)
|
||||
s.HeadsChange("id", []string{"head1"})
|
||||
s.spaceStorage.EXPECT().TreeStorage("id").Return(treestorage.NewInMemoryTreeStorage(&treechangeproto.RawTreeChangeWithId{Id: "id"}, []string{"headId"}, nil))
|
||||
err := s.Watch("id")
|
||||
assert.Nil(t, err)
|
||||
|
||||
s.Unwatch("id")
|
||||
|
||||
// then
|
||||
_, ok := s.watchers["id"]
|
||||
assert.False(t, ok)
|
||||
assert.NotContains(t, s.watchers, "id")
|
||||
assert.Equal(t, []string{"headId"}, s.treeHeads["id"].heads)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncStatusService_update(t *testing.T) {
|
||||
t.Run("update: got updates on objects", func(t *testing.T) {
|
||||
// given
|
||||
s := newFixture(t)
|
||||
s := newFixture(t, "spaceId")
|
||||
updateReceiver := NewMockUpdateReceiver(t)
|
||||
updateReceiver.EXPECT().UpdateNodeStatus().Return()
|
||||
updateReceiver.EXPECT().UpdateTree(context.Background(), "id", StatusNotSynced).Return(nil)
|
||||
updateReceiver.EXPECT().UpdateTree(context.Background(), "id", StatusSynced).Return(nil)
|
||||
updateReceiver.EXPECT().UpdateTree(context.Background(), "id2", StatusNotSynced).Return(nil)
|
||||
s.SetUpdateReceiver(updateReceiver)
|
||||
|
||||
// when
|
||||
s.detailsUpdater.EXPECT().UpdateDetails([]string{"id"}, domain.ObjectSyncing, domain.Null, "spaceId")
|
||||
s.service.EXPECT().NetworkCompatibilityStatus().Return(nodeconf.NetworkCompatibilityStatusOk).Times(2)
|
||||
s.HeadsChange("id", []string{"head1"})
|
||||
err := s.Watch("id")
|
||||
assert.Nil(t, err)
|
||||
err = s.update(context.Background())
|
||||
|
||||
// then
|
||||
assert.Nil(t, err)
|
||||
updateReceiver.AssertCalled(t, "UpdateTree", context.Background(), "id", StatusNotSynced)
|
||||
s.syncDetailsUpdater.EXPECT().UpdateDetails("id3", domain.ObjectSyncStatusSynced, "spaceId")
|
||||
s.synced = []string{"id3"}
|
||||
s.tempSynced["id4"] = struct{}{}
|
||||
s.treeHeads["id"] = treeHeadsEntry{syncStatus: StatusSynced, heads: []string{"headId"}}
|
||||
s.treeHeads["id2"] = treeHeadsEntry{syncStatus: StatusNotSynced, heads: []string{"headId"}}
|
||||
s.watchers["id"] = struct{}{}
|
||||
s.watchers["id2"] = struct{}{}
|
||||
err := s.update(context.Background())
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncStatusService_Run(t *testing.T) {
|
||||
t.Run("successful run", func(t *testing.T) {
|
||||
// given
|
||||
s := newFixture(t)
|
||||
s := newFixture(t, "spaceId")
|
||||
|
||||
// when
|
||||
err := s.Run(context.Background())
|
||||
|
||||
// then
|
||||
assert.Nil(t, err)
|
||||
err = s.Close(context.Background())
|
||||
assert.Nil(t, err)
|
||||
|
@ -264,81 +148,67 @@ func TestSyncStatusService_Run(t *testing.T) {
|
|||
|
||||
func TestSyncStatusService_RemoveAllExcept(t *testing.T) {
|
||||
t.Run("no existing id", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixture(t)
|
||||
f.treeHeads["heads"] = treeHeadsEntry{syncStatus: StatusNotSynced}
|
||||
f := newFixture(t, "spaceId")
|
||||
f.treeHeads["id"] = treeHeadsEntry{syncStatus: StatusNotSynced, heads: []string{"heads"}}
|
||||
|
||||
// when
|
||||
f.service.EXPECT().NodeIds(f.spaceId).Return([]string{"peerId"})
|
||||
f.nodeConfService.EXPECT().NodeIds(f.spaceId).Return([]string{"peerId"})
|
||||
f.RemoveAllExcept("peerId", nil)
|
||||
|
||||
// then
|
||||
assert.Equal(t, StatusSynced, f.treeHeads["heads"].syncStatus)
|
||||
assert.Equal(t, StatusSynced, f.treeHeads["id"].syncStatus)
|
||||
})
|
||||
t.Run("same ids", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixture(t)
|
||||
f.treeHeads["heads1"] = treeHeadsEntry{syncStatus: StatusNotSynced}
|
||||
f := newFixture(t, "id")
|
||||
f.treeHeads["id"] = treeHeadsEntry{syncStatus: StatusNotSynced, heads: []string{"heads"}}
|
||||
|
||||
// when
|
||||
f.service.EXPECT().NodeIds(f.spaceId).Return([]string{"peerId"})
|
||||
f.RemoveAllExcept("peerId", []string{"heads", "heads"})
|
||||
f.nodeConfService.EXPECT().NodeIds(f.spaceId).Return([]string{"peerId"})
|
||||
f.RemoveAllExcept("peerId", []string{"id"})
|
||||
|
||||
// then
|
||||
assert.Equal(t, StatusSynced, f.treeHeads["heads1"].syncStatus)
|
||||
assert.Equal(t, StatusNotSynced, f.treeHeads["id"].syncStatus)
|
||||
})
|
||||
t.Run("sender not responsible", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixture(t)
|
||||
f.treeHeads["heads1"] = treeHeadsEntry{syncStatus: StatusNotSynced}
|
||||
f := newFixture(t, "spaceId")
|
||||
f.treeHeads["id"] = treeHeadsEntry{syncStatus: StatusNotSynced, heads: []string{"heads"}}
|
||||
|
||||
// when
|
||||
f.service.EXPECT().NodeIds(f.spaceId).Return([]string{})
|
||||
f.RemoveAllExcept("peerId", []string{"heads"})
|
||||
f.nodeConfService.EXPECT().NodeIds(f.spaceId).Return([]string{"peerId1"})
|
||||
f.RemoveAllExcept("peerId", nil)
|
||||
|
||||
// then
|
||||
assert.Equal(t, StatusNotSynced, f.treeHeads["heads1"].syncStatus)
|
||||
assert.Equal(t, StatusNotSynced, f.treeHeads["id"].syncStatus)
|
||||
})
|
||||
t.Run("current state is outdated", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixture(t)
|
||||
f.treeHeads["heads1"] = treeHeadsEntry{syncStatus: StatusNotSynced, stateCounter: 1}
|
||||
}
|
||||
|
||||
// when
|
||||
f.service.EXPECT().NodeIds(f.spaceId).Return([]string{})
|
||||
f.RemoveAllExcept("peerId", []string{"heads"})
|
||||
func TestHeadsChange(t *testing.T) {
|
||||
fx := newFixture(t, "space1")
|
||||
fx.syncDetailsUpdater.EXPECT().UpdateDetails("obj1", domain.ObjectSyncStatusSyncing, "space1")
|
||||
inputHeads := []string{"b", "c", "a"}
|
||||
|
||||
// then
|
||||
assert.Equal(t, StatusNotSynced, f.treeHeads["heads1"].syncStatus)
|
||||
})
|
||||
t.Run("tree is not synced", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixture(t)
|
||||
f.treeHeads["heads"] = treeHeadsEntry{syncStatus: StatusNotSynced}
|
||||
fx.HeadsChange("obj1", inputHeads)
|
||||
|
||||
// when
|
||||
f.service.EXPECT().NodeIds(f.spaceId).Return([]string{})
|
||||
f.RemoveAllExcept("peerId", []string{"heads"})
|
||||
got, ok := fx.treeHeads["obj1"]
|
||||
require.True(t, ok)
|
||||
|
||||
want := treeHeadsEntry{
|
||||
heads: []string{"a", "b", "c"},
|
||||
syncStatus: StatusNotSynced,
|
||||
}
|
||||
assert.Equal(t, want, got)
|
||||
assert.Equal(t, []string{"b", "c", "a"}, inputHeads, "heads should be copied")
|
||||
|
||||
// then
|
||||
assert.Equal(t, StatusNotSynced, f.treeHeads["heads"].syncStatus)
|
||||
})
|
||||
}
|
||||
|
||||
type fixture struct {
|
||||
*syncStatusService
|
||||
service *mock_nodeconf.MockService
|
||||
storage *mock_spacestorage.MockSpaceStorage
|
||||
config *config.Config
|
||||
detailsUpdater *mock_objectsyncstatus.MockUpdater
|
||||
nodeStatus nodestatus.NodeStatus
|
||||
nodeConfService *mock_nodeconf.MockService
|
||||
spaceStorage *mock_spacestorage.MockSpaceStorage
|
||||
config *config.Config
|
||||
syncDetailsUpdater *mock_objectsyncstatus.MockUpdater
|
||||
nodeStatus nodestatus.NodeStatus
|
||||
}
|
||||
|
||||
func newFixture(t *testing.T) *fixture {
|
||||
func newFixture(t *testing.T, spaceId string) *fixture {
|
||||
ctrl := gomock.NewController(t)
|
||||
service := mock_nodeconf.NewMockService(ctrl)
|
||||
storage := mock_spacestorage.NewMockSpaceStorage(ctrl)
|
||||
spaceState := &spacestate.SpaceState{SpaceId: "spaceId"}
|
||||
spaceState := &spacestate.SpaceState{SpaceId: spaceId}
|
||||
config := &config.Config{}
|
||||
detailsUpdater := mock_objectsyncstatus.NewMockUpdater(t)
|
||||
nodeStatus := nodestatus.NewNodeStatus()
|
||||
|
@ -354,18 +224,15 @@ func newFixture(t *testing.T) *fixture {
|
|||
err := nodeStatus.Init(a)
|
||||
assert.Nil(t, err)
|
||||
|
||||
syncStatusService := &syncStatusService{
|
||||
treeHeads: map[string]treeHeadsEntry{},
|
||||
watchers: map[string]struct{}{},
|
||||
}
|
||||
err = syncStatusService.Init(a)
|
||||
statusService := NewSyncStatusService()
|
||||
err = statusService.Init(a)
|
||||
assert.Nil(t, err)
|
||||
return &fixture{
|
||||
syncStatusService: syncStatusService,
|
||||
service: service,
|
||||
storage: storage,
|
||||
config: config,
|
||||
detailsUpdater: detailsUpdater,
|
||||
nodeStatus: nodeStatus,
|
||||
syncStatusService: statusService.(*syncStatusService),
|
||||
nodeConfService: service,
|
||||
spaceStorage: storage,
|
||||
config: config,
|
||||
syncDetailsUpdater: detailsUpdater,
|
||||
nodeStatus: nodeStatus,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,7 +14,6 @@ import (
|
|||
"github.com/anyproto/anytype-heart/core/filestorage/filesync"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/nodestatus"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/objectsyncstatus"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/spacesyncstatus"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/localstore/objectstore"
|
||||
)
|
||||
|
||||
|
@ -39,8 +38,6 @@ type service struct {
|
|||
|
||||
objectStore objectstore.ObjectStore
|
||||
objectGetter cache.ObjectGetter
|
||||
|
||||
spaceSyncStatus spacesyncstatus.Updater
|
||||
}
|
||||
|
||||
func New() Service {
|
||||
|
@ -52,12 +49,10 @@ func New() Service {
|
|||
func (s *service) Init(a *app.App) (err error) {
|
||||
s.fileSyncService = app.MustComponent[filesync.FileSync](a)
|
||||
s.objectStore = app.MustComponent[objectstore.ObjectStore](a)
|
||||
s.spaceSyncStatus = app.MustComponent[spacesyncstatus.Updater](a)
|
||||
s.objectGetter = app.MustComponent[cache.ObjectGetter](a)
|
||||
s.fileSyncService.OnUploaded(s.onFileUploaded)
|
||||
s.fileSyncService.OnUploadStarted(s.onFileUploadStarted)
|
||||
s.fileSyncService.OnLimited(s.onFileLimited)
|
||||
s.fileSyncService.OnDelete(s.OnFileDelete)
|
||||
|
||||
nodeConfService := app.MustComponent[nodeconf.Service](a)
|
||||
cfg := app.MustComponent[*config.Config](a)
|
||||
|
@ -81,7 +76,7 @@ func (s *service) RegisterSpace(space commonspace.Space, sw objectsyncstatus.Sta
|
|||
|
||||
sw.SetUpdateReceiver(s.updateReceiver)
|
||||
s.objectWatchers[space.Id()] = sw
|
||||
s.updateReceiver.spaceId = space.Id()
|
||||
s.updateReceiver.setSpaceId(space.Id())
|
||||
}
|
||||
|
||||
func (s *service) UnregisterSpace(space commonspace.Space) {
|
||||
|
|
|
@ -1,105 +0,0 @@
|
|||
package spacesyncstatus
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/domain"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/filesyncstatus"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/bundle"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/database"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/localstore/objectstore"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/pb/model"
|
||||
"github.com/anyproto/anytype-heart/util/pbtypes"
|
||||
)
|
||||
|
||||
type FileState struct {
|
||||
fileSyncCountBySpace map[string]int
|
||||
fileSyncStatusBySpace map[string]domain.SpaceSyncStatus
|
||||
filesErrorBySpace map[string]domain.SyncError
|
||||
sync.Mutex
|
||||
|
||||
store objectstore.ObjectStore
|
||||
}
|
||||
|
||||
func NewFileState(store objectstore.ObjectStore) *FileState {
|
||||
return &FileState{
|
||||
fileSyncCountBySpace: make(map[string]int, 0),
|
||||
fileSyncStatusBySpace: make(map[string]domain.SpaceSyncStatus, 0),
|
||||
filesErrorBySpace: make(map[string]domain.SyncError, 0),
|
||||
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FileState) SetObjectsNumber(status *domain.SpaceSync) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
switch status.Status {
|
||||
case domain.Error, domain.Offline:
|
||||
f.fileSyncCountBySpace[status.SpaceId] = 0
|
||||
default:
|
||||
records, err := f.store.Query(database.Query{
|
||||
Filters: []*model.BlockContentDataviewFilter{
|
||||
{
|
||||
RelationKey: bundle.RelationKeyFileBackupStatus.String(),
|
||||
Condition: model.BlockContentDataviewFilter_In,
|
||||
Value: pbtypes.IntList(int(filesyncstatus.Syncing), int(filesyncstatus.Queued)),
|
||||
},
|
||||
{
|
||||
RelationKey: bundle.RelationKeySpaceId.String(),
|
||||
Condition: model.BlockContentDataviewFilter_Equal,
|
||||
Value: pbtypes.String(status.SpaceId),
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("failed to query file status: %s", err)
|
||||
}
|
||||
f.fileSyncCountBySpace[status.SpaceId] = len(records)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FileState) SetSyncStatusAndErr(status domain.SpaceSyncStatus, syncErr domain.SyncError, spaceId string) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
switch status {
|
||||
case domain.Synced:
|
||||
f.fileSyncStatusBySpace[spaceId] = domain.Synced
|
||||
f.filesErrorBySpace[spaceId] = syncErr
|
||||
if number := f.fileSyncCountBySpace[spaceId]; number > 0 {
|
||||
f.fileSyncStatusBySpace[spaceId] = domain.Syncing
|
||||
return
|
||||
}
|
||||
case domain.Error, domain.Syncing, domain.Offline:
|
||||
f.fileSyncStatusBySpace[spaceId] = status
|
||||
f.filesErrorBySpace[spaceId] = syncErr
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FileState) GetSyncStatus(spaceId string) domain.SpaceSyncStatus {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if status, ok := f.fileSyncStatusBySpace[spaceId]; ok {
|
||||
return status
|
||||
}
|
||||
return domain.Unknown
|
||||
}
|
||||
|
||||
func (f *FileState) GetSyncObjectCount(spaceId string) int {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
return f.fileSyncCountBySpace[spaceId]
|
||||
}
|
||||
|
||||
func (f *FileState) ResetSpaceErrorStatus(spaceId string, syncError domain.SyncError) {
|
||||
// show StorageLimitExceed only once
|
||||
if syncError == domain.StorageLimitExceed {
|
||||
f.SetSyncStatusAndErr(domain.Synced, domain.Null, spaceId)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FileState) GetSyncErr(spaceId string) domain.SyncError {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
return f.filesErrorBySpace[spaceId]
|
||||
}
|
|
@ -1,161 +0,0 @@
|
|||
package spacesyncstatus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/domain"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/filesyncstatus"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/bundle"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/localstore/objectstore"
|
||||
"github.com/anyproto/anytype-heart/util/pbtypes"
|
||||
)
|
||||
|
||||
func TestFileState_GetSyncObjectCount(t *testing.T) {
|
||||
t.Run("GetSyncObjectCount", func(t *testing.T) {
|
||||
// given
|
||||
fileState := NewFileState(nil)
|
||||
|
||||
// when
|
||||
fileState.fileSyncCountBySpace["spaceId"] = 1
|
||||
objectCount := fileState.GetSyncObjectCount("spaceId")
|
||||
|
||||
// then
|
||||
assert.Equal(t, 1, objectCount)
|
||||
})
|
||||
t.Run("GetSyncObjectCount: zero value", func(t *testing.T) {
|
||||
// given
|
||||
fileState := NewFileState(nil)
|
||||
|
||||
// when
|
||||
objectCount := fileState.GetSyncObjectCount("spaceId")
|
||||
|
||||
// then
|
||||
assert.Equal(t, 0, objectCount)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFileState_GetSyncStatus(t *testing.T) {
|
||||
t.Run("GetSyncStatus", func(t *testing.T) {
|
||||
// given
|
||||
fileState := NewFileState(nil)
|
||||
|
||||
// when
|
||||
fileState.fileSyncStatusBySpace["spaceId"] = domain.Syncing
|
||||
syncStatus := fileState.GetSyncStatus("spaceId")
|
||||
|
||||
// then
|
||||
assert.Equal(t, domain.Syncing, syncStatus)
|
||||
})
|
||||
t.Run("GetSyncStatus: zero value", func(t *testing.T) {
|
||||
// given
|
||||
fileState := NewFileState(nil)
|
||||
|
||||
// when
|
||||
syncStatus := fileState.GetSyncStatus("spaceId")
|
||||
|
||||
// then
|
||||
assert.Equal(t, domain.Unknown, syncStatus)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFileState_SetObjectsNumber(t *testing.T) {
|
||||
t.Run("SetObjectsNumber", func(t *testing.T) {
|
||||
// given
|
||||
storeFixture := objectstore.NewStoreFixture(t)
|
||||
storeFixture.AddObjects(t, []objectstore.TestObject{
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id1"),
|
||||
bundle.RelationKeyFileBackupStatus: pbtypes.Int64(int64(filesyncstatus.Syncing)),
|
||||
bundle.RelationKeySpaceId: pbtypes.String("spaceId"),
|
||||
},
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id2"),
|
||||
bundle.RelationKeyFileBackupStatus: pbtypes.Int64(int64(filesyncstatus.Synced)),
|
||||
bundle.RelationKeySpaceId: pbtypes.String("spaceId"),
|
||||
},
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id3"),
|
||||
bundle.RelationKeyFileBackupStatus: pbtypes.Int64(int64(filesyncstatus.Syncing)),
|
||||
bundle.RelationKeySpaceId: pbtypes.String("spaceId"),
|
||||
},
|
||||
})
|
||||
fileState := NewFileState(storeFixture)
|
||||
syncStatus := domain.MakeSyncStatus("spaceId", domain.Syncing, domain.Null, domain.Files)
|
||||
|
||||
// when
|
||||
fileState.SetObjectsNumber(syncStatus)
|
||||
|
||||
// then
|
||||
assert.Equal(t, 2, fileState.GetSyncObjectCount("spaceId"))
|
||||
})
|
||||
t.Run("SetObjectsNumber: no file object", func(t *testing.T) {
|
||||
// given
|
||||
storeFixture := objectstore.NewStoreFixture(t)
|
||||
fileState := NewFileState(storeFixture)
|
||||
syncStatus := domain.MakeSyncStatus("spaceId", domain.Synced, domain.Null, domain.Files)
|
||||
|
||||
// when
|
||||
fileState.SetObjectsNumber(syncStatus)
|
||||
|
||||
// then
|
||||
assert.Equal(t, 0, fileState.GetSyncObjectCount("spaceId"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestFileState_SetSyncStatus(t *testing.T) {
|
||||
t.Run("SetSyncStatusAndErr, status synced", func(t *testing.T) {
|
||||
// given
|
||||
fileState := NewFileState(objectstore.NewStoreFixture(t))
|
||||
|
||||
// when
|
||||
fileState.SetSyncStatusAndErr(domain.Synced, domain.Null, "spaceId")
|
||||
|
||||
// then
|
||||
assert.Equal(t, domain.Synced, fileState.GetSyncStatus("spaceId"))
|
||||
})
|
||||
t.Run("SetSyncStatusAndErr, sync in progress", func(t *testing.T) {
|
||||
// given
|
||||
fileState := NewFileState(objectstore.NewStoreFixture(t))
|
||||
|
||||
// when
|
||||
syncStatus := domain.MakeSyncStatus("spaceId", domain.Syncing, domain.Null, domain.Files)
|
||||
fileState.SetSyncStatusAndErr(syncStatus.Status, domain.Null, syncStatus.SpaceId)
|
||||
|
||||
// then
|
||||
assert.Equal(t, domain.Syncing, fileState.GetSyncStatus("spaceId"))
|
||||
})
|
||||
t.Run("SetSyncStatusAndErr, sync is finished with error", func(t *testing.T) {
|
||||
// given
|
||||
fileState := NewFileState(objectstore.NewStoreFixture(t))
|
||||
|
||||
// when
|
||||
syncStatus := domain.MakeSyncStatus("spaceId", domain.Error, domain.Null, domain.Files)
|
||||
fileState.SetSyncStatusAndErr(syncStatus.Status, domain.Null, syncStatus.SpaceId)
|
||||
|
||||
// then
|
||||
assert.Equal(t, domain.Error, fileState.GetSyncStatus("spaceId"))
|
||||
})
|
||||
t.Run("SetSyncStatusAndErr, offline", func(t *testing.T) {
|
||||
// given
|
||||
fileState := NewFileState(objectstore.NewStoreFixture(t))
|
||||
|
||||
// when
|
||||
fileState.SetSyncStatusAndErr(domain.Offline, domain.Null, "spaceId")
|
||||
|
||||
// then
|
||||
assert.Equal(t, domain.Offline, fileState.GetSyncStatus("spaceId"))
|
||||
})
|
||||
t.Run("SetSyncStatusAndErr, syncing status", func(t *testing.T) {
|
||||
// given
|
||||
fileState := NewFileState(objectstore.NewStoreFixture(t))
|
||||
|
||||
// when
|
||||
fileState.fileSyncCountBySpace["spaceId"] = 1
|
||||
fileState.SetSyncStatusAndErr(domain.Synced, domain.Null, "spaceId")
|
||||
|
||||
// then
|
||||
assert.Equal(t, domain.Syncing, fileState.GetSyncStatus("spaceId"))
|
||||
})
|
||||
}
|
|
@ -0,0 +1,173 @@
|
|||
// Code generated by mockery. DO NOT EDIT.
|
||||
|
||||
package mock_spacesyncstatus
|
||||
|
||||
import (
|
||||
app "github.com/anyproto/any-sync/app"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
pb "github.com/anyproto/anytype-heart/pb"
|
||||
)
|
||||
|
||||
// MockNetworkConfig is an autogenerated mock type for the NetworkConfig type
|
||||
type MockNetworkConfig struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockNetworkConfig_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockNetworkConfig) EXPECT() *MockNetworkConfig_Expecter {
|
||||
return &MockNetworkConfig_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// GetNetworkMode provides a mock function with given fields:
|
||||
func (_m *MockNetworkConfig) GetNetworkMode() pb.RpcAccountNetworkMode {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetNetworkMode")
|
||||
}
|
||||
|
||||
var r0 pb.RpcAccountNetworkMode
|
||||
if rf, ok := ret.Get(0).(func() pb.RpcAccountNetworkMode); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(pb.RpcAccountNetworkMode)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockNetworkConfig_GetNetworkMode_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNetworkMode'
|
||||
type MockNetworkConfig_GetNetworkMode_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetNetworkMode is a helper method to define mock.On call
|
||||
func (_e *MockNetworkConfig_Expecter) GetNetworkMode() *MockNetworkConfig_GetNetworkMode_Call {
|
||||
return &MockNetworkConfig_GetNetworkMode_Call{Call: _e.mock.On("GetNetworkMode")}
|
||||
}
|
||||
|
||||
func (_c *MockNetworkConfig_GetNetworkMode_Call) Run(run func()) *MockNetworkConfig_GetNetworkMode_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNetworkConfig_GetNetworkMode_Call) Return(_a0 pb.RpcAccountNetworkMode) *MockNetworkConfig_GetNetworkMode_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNetworkConfig_GetNetworkMode_Call) RunAndReturn(run func() pb.RpcAccountNetworkMode) *MockNetworkConfig_GetNetworkMode_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Init provides a mock function with given fields: a
|
||||
func (_m *MockNetworkConfig) Init(a *app.App) error {
|
||||
ret := _m.Called(a)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Init")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*app.App) error); ok {
|
||||
r0 = rf(a)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockNetworkConfig_Init_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Init'
|
||||
type MockNetworkConfig_Init_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Init is a helper method to define mock.On call
|
||||
// - a *app.App
|
||||
func (_e *MockNetworkConfig_Expecter) Init(a interface{}) *MockNetworkConfig_Init_Call {
|
||||
return &MockNetworkConfig_Init_Call{Call: _e.mock.On("Init", a)}
|
||||
}
|
||||
|
||||
func (_c *MockNetworkConfig_Init_Call) Run(run func(a *app.App)) *MockNetworkConfig_Init_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*app.App))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNetworkConfig_Init_Call) Return(err error) *MockNetworkConfig_Init_Call {
|
||||
_c.Call.Return(err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNetworkConfig_Init_Call) RunAndReturn(run func(*app.App) error) *MockNetworkConfig_Init_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Name provides a mock function with given fields:
|
||||
func (_m *MockNetworkConfig) Name() string {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Name")
|
||||
}
|
||||
|
||||
var r0 string
|
||||
if rf, ok := ret.Get(0).(func() string); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockNetworkConfig_Name_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Name'
|
||||
type MockNetworkConfig_Name_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Name is a helper method to define mock.On call
|
||||
func (_e *MockNetworkConfig_Expecter) Name() *MockNetworkConfig_Name_Call {
|
||||
return &MockNetworkConfig_Name_Call{Call: _e.mock.On("Name")}
|
||||
}
|
||||
|
||||
func (_c *MockNetworkConfig_Name_Call) Run(run func()) *MockNetworkConfig_Name_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNetworkConfig_Name_Call) Return(name string) *MockNetworkConfig_Name_Call {
|
||||
_c.Call.Return(name)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNetworkConfig_Name_Call) RunAndReturn(run func() string) *MockNetworkConfig_Name_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockNetworkConfig creates a new instance of MockNetworkConfig. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockNetworkConfig(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockNetworkConfig {
|
||||
mock := &MockNetworkConfig{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
|
@ -0,0 +1,189 @@
|
|||
// Code generated by mockery. DO NOT EDIT.
|
||||
|
||||
package mock_spacesyncstatus
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
app "github.com/anyproto/any-sync/app"
|
||||
|
||||
files "github.com/anyproto/anytype-heart/core/files"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockNodeUsage is an autogenerated mock type for the NodeUsage type
|
||||
type MockNodeUsage struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockNodeUsage_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockNodeUsage) EXPECT() *MockNodeUsage_Expecter {
|
||||
return &MockNodeUsage_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// GetNodeUsage provides a mock function with given fields: ctx
|
||||
func (_m *MockNodeUsage) GetNodeUsage(ctx context.Context) (*files.NodeUsageResponse, error) {
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetNodeUsage")
|
||||
}
|
||||
|
||||
var r0 *files.NodeUsageResponse
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context) (*files.NodeUsageResponse, error)); ok {
|
||||
return rf(ctx)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *files.NodeUsageResponse); ok {
|
||||
r0 = rf(ctx)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*files.NodeUsageResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
r1 = rf(ctx)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockNodeUsage_GetNodeUsage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNodeUsage'
|
||||
type MockNodeUsage_GetNodeUsage_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetNodeUsage is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
func (_e *MockNodeUsage_Expecter) GetNodeUsage(ctx interface{}) *MockNodeUsage_GetNodeUsage_Call {
|
||||
return &MockNodeUsage_GetNodeUsage_Call{Call: _e.mock.On("GetNodeUsage", ctx)}
|
||||
}
|
||||
|
||||
func (_c *MockNodeUsage_GetNodeUsage_Call) Run(run func(ctx context.Context)) *MockNodeUsage_GetNodeUsage_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNodeUsage_GetNodeUsage_Call) Return(_a0 *files.NodeUsageResponse, _a1 error) *MockNodeUsage_GetNodeUsage_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNodeUsage_GetNodeUsage_Call) RunAndReturn(run func(context.Context) (*files.NodeUsageResponse, error)) *MockNodeUsage_GetNodeUsage_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Init provides a mock function with given fields: a
|
||||
func (_m *MockNodeUsage) Init(a *app.App) error {
|
||||
ret := _m.Called(a)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Init")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*app.App) error); ok {
|
||||
r0 = rf(a)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockNodeUsage_Init_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Init'
|
||||
type MockNodeUsage_Init_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Init is a helper method to define mock.On call
|
||||
// - a *app.App
|
||||
func (_e *MockNodeUsage_Expecter) Init(a interface{}) *MockNodeUsage_Init_Call {
|
||||
return &MockNodeUsage_Init_Call{Call: _e.mock.On("Init", a)}
|
||||
}
|
||||
|
||||
func (_c *MockNodeUsage_Init_Call) Run(run func(a *app.App)) *MockNodeUsage_Init_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*app.App))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNodeUsage_Init_Call) Return(err error) *MockNodeUsage_Init_Call {
|
||||
_c.Call.Return(err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNodeUsage_Init_Call) RunAndReturn(run func(*app.App) error) *MockNodeUsage_Init_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Name provides a mock function with given fields:
|
||||
func (_m *MockNodeUsage) Name() string {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Name")
|
||||
}
|
||||
|
||||
var r0 string
|
||||
if rf, ok := ret.Get(0).(func() string); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockNodeUsage_Name_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Name'
|
||||
type MockNodeUsage_Name_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Name is a helper method to define mock.On call
|
||||
func (_e *MockNodeUsage_Expecter) Name() *MockNodeUsage_Name_Call {
|
||||
return &MockNodeUsage_Name_Call{Call: _e.mock.On("Name")}
|
||||
}
|
||||
|
||||
func (_c *MockNodeUsage_Name_Call) Run(run func()) *MockNodeUsage_Name_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNodeUsage_Name_Call) Return(name string) *MockNodeUsage_Name_Call {
|
||||
_c.Call.Return(name)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockNodeUsage_Name_Call) RunAndReturn(run func() string) *MockNodeUsage_Name_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockNodeUsage creates a new instance of MockNodeUsage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockNodeUsage(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockNodeUsage {
|
||||
mock := &MockNodeUsage{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
|
@ -7,8 +7,6 @@ import (
|
|||
|
||||
app "github.com/anyproto/any-sync/app"
|
||||
|
||||
domain "github.com/anyproto/anytype-heart/core/domain"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
|
@ -162,6 +160,39 @@ func (_c *MockUpdater_Name_Call) RunAndReturn(run func() string) *MockUpdater_Na
|
|||
return _c
|
||||
}
|
||||
|
||||
// Refresh provides a mock function with given fields: spaceId
|
||||
func (_m *MockUpdater) Refresh(spaceId string) {
|
||||
_m.Called(spaceId)
|
||||
}
|
||||
|
||||
// MockUpdater_Refresh_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Refresh'
|
||||
type MockUpdater_Refresh_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Refresh is a helper method to define mock.On call
|
||||
// - spaceId string
|
||||
func (_e *MockUpdater_Expecter) Refresh(spaceId interface{}) *MockUpdater_Refresh_Call {
|
||||
return &MockUpdater_Refresh_Call{Call: _e.mock.On("Refresh", spaceId)}
|
||||
}
|
||||
|
||||
func (_c *MockUpdater_Refresh_Call) Run(run func(spaceId string)) *MockUpdater_Refresh_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockUpdater_Refresh_Call) Return() *MockUpdater_Refresh_Call {
|
||||
_c.Call.Return()
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockUpdater_Refresh_Call) RunAndReturn(run func(string)) *MockUpdater_Refresh_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Run provides a mock function with given fields: ctx
|
||||
func (_m *MockUpdater) Run(ctx context.Context) error {
|
||||
ret := _m.Called(ctx)
|
||||
|
@ -208,35 +239,36 @@ func (_c *MockUpdater_Run_Call) RunAndReturn(run func(context.Context) error) *M
|
|||
return _c
|
||||
}
|
||||
|
||||
// SendUpdate provides a mock function with given fields: spaceSync
|
||||
func (_m *MockUpdater) SendUpdate(spaceSync *domain.SpaceSync) {
|
||||
_m.Called(spaceSync)
|
||||
// UpdateMissingIds provides a mock function with given fields: spaceId, ids
|
||||
func (_m *MockUpdater) UpdateMissingIds(spaceId string, ids []string) {
|
||||
_m.Called(spaceId, ids)
|
||||
}
|
||||
|
||||
// MockUpdater_SendUpdate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendUpdate'
|
||||
type MockUpdater_SendUpdate_Call struct {
|
||||
// MockUpdater_UpdateMissingIds_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateMissingIds'
|
||||
type MockUpdater_UpdateMissingIds_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// SendUpdate is a helper method to define mock.On call
|
||||
// - spaceSync *domain.SpaceSync
|
||||
func (_e *MockUpdater_Expecter) SendUpdate(spaceSync interface{}) *MockUpdater_SendUpdate_Call {
|
||||
return &MockUpdater_SendUpdate_Call{Call: _e.mock.On("SendUpdate", spaceSync)}
|
||||
// UpdateMissingIds is a helper method to define mock.On call
|
||||
// - spaceId string
|
||||
// - ids []string
|
||||
func (_e *MockUpdater_Expecter) UpdateMissingIds(spaceId interface{}, ids interface{}) *MockUpdater_UpdateMissingIds_Call {
|
||||
return &MockUpdater_UpdateMissingIds_Call{Call: _e.mock.On("UpdateMissingIds", spaceId, ids)}
|
||||
}
|
||||
|
||||
func (_c *MockUpdater_SendUpdate_Call) Run(run func(spaceSync *domain.SpaceSync)) *MockUpdater_SendUpdate_Call {
|
||||
func (_c *MockUpdater_UpdateMissingIds_Call) Run(run func(spaceId string, ids []string)) *MockUpdater_UpdateMissingIds_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*domain.SpaceSync))
|
||||
run(args[0].(string), args[1].([]string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockUpdater_SendUpdate_Call) Return() *MockUpdater_SendUpdate_Call {
|
||||
func (_c *MockUpdater_UpdateMissingIds_Call) Return() *MockUpdater_UpdateMissingIds_Call {
|
||||
_c.Call.Return()
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockUpdater_SendUpdate_Call) RunAndReturn(run func(*domain.SpaceSync)) *MockUpdater_SendUpdate_Call {
|
||||
func (_c *MockUpdater_UpdateMissingIds_Call) RunAndReturn(run func(string, []string)) *MockUpdater_UpdateMissingIds_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
|
|
@ -1,112 +0,0 @@
|
|||
package spacesyncstatus
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/domain"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/bundle"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/database"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/localstore/objectstore"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/pb/model"
|
||||
"github.com/anyproto/anytype-heart/util/pbtypes"
|
||||
)
|
||||
|
||||
type ObjectState struct {
|
||||
objectSyncStatusBySpace map[string]domain.SpaceSyncStatus
|
||||
objectSyncCountBySpace map[string]int
|
||||
objectSyncErrBySpace map[string]domain.SyncError
|
||||
sync.Mutex
|
||||
|
||||
store objectstore.ObjectStore
|
||||
}
|
||||
|
||||
func NewObjectState(store objectstore.ObjectStore) *ObjectState {
|
||||
return &ObjectState{
|
||||
objectSyncCountBySpace: make(map[string]int, 0),
|
||||
objectSyncStatusBySpace: make(map[string]domain.SpaceSyncStatus, 0),
|
||||
objectSyncErrBySpace: make(map[string]domain.SyncError, 0),
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *ObjectState) SetObjectsNumber(status *domain.SpaceSync) {
|
||||
o.Lock()
|
||||
defer o.Unlock()
|
||||
switch status.Status {
|
||||
case domain.Error, domain.Offline:
|
||||
o.objectSyncCountBySpace[status.SpaceId] = 0
|
||||
default:
|
||||
records := o.getSyncingObjects(status)
|
||||
o.objectSyncCountBySpace[status.SpaceId] = len(records)
|
||||
}
|
||||
}
|
||||
|
||||
func (o *ObjectState) getSyncingObjects(status *domain.SpaceSync) []database.Record {
|
||||
records, err := o.store.Query(database.Query{
|
||||
Filters: []*model.BlockContentDataviewFilter{
|
||||
{
|
||||
RelationKey: bundle.RelationKeySyncStatus.String(),
|
||||
Condition: model.BlockContentDataviewFilter_Equal,
|
||||
Value: pbtypes.Int64(int64(domain.Syncing)),
|
||||
},
|
||||
{
|
||||
RelationKey: bundle.RelationKeyLayout.String(),
|
||||
Condition: model.BlockContentDataviewFilter_NotIn,
|
||||
Value: pbtypes.IntList(
|
||||
int(model.ObjectType_file),
|
||||
int(model.ObjectType_image),
|
||||
int(model.ObjectType_video),
|
||||
int(model.ObjectType_audio),
|
||||
),
|
||||
},
|
||||
{
|
||||
RelationKey: bundle.RelationKeySpaceId.String(),
|
||||
Condition: model.BlockContentDataviewFilter_Equal,
|
||||
Value: pbtypes.String(status.SpaceId),
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("failed to query file status: %s", err)
|
||||
}
|
||||
return records
|
||||
}
|
||||
|
||||
func (o *ObjectState) SetSyncStatusAndErr(status domain.SpaceSyncStatus, syncErr domain.SyncError, spaceId string) {
|
||||
o.Lock()
|
||||
defer o.Unlock()
|
||||
if objectNumber, ok := o.objectSyncCountBySpace[spaceId]; ok && objectNumber > 0 {
|
||||
o.objectSyncStatusBySpace[spaceId] = domain.Syncing
|
||||
o.objectSyncErrBySpace[spaceId] = domain.Null
|
||||
return
|
||||
} else if ok && objectNumber == 0 && status == domain.Syncing {
|
||||
o.objectSyncStatusBySpace[spaceId] = domain.Synced
|
||||
o.objectSyncErrBySpace[spaceId] = domain.Null
|
||||
return
|
||||
}
|
||||
o.objectSyncStatusBySpace[spaceId] = status
|
||||
o.objectSyncErrBySpace[spaceId] = syncErr
|
||||
}
|
||||
|
||||
func (o *ObjectState) GetSyncStatus(spaceId string) domain.SpaceSyncStatus {
|
||||
o.Lock()
|
||||
defer o.Unlock()
|
||||
if status, ok := o.objectSyncStatusBySpace[spaceId]; ok {
|
||||
return status
|
||||
}
|
||||
return domain.Unknown
|
||||
}
|
||||
|
||||
func (o *ObjectState) GetSyncObjectCount(spaceId string) int {
|
||||
o.Lock()
|
||||
defer o.Unlock()
|
||||
return o.objectSyncCountBySpace[spaceId]
|
||||
}
|
||||
|
||||
func (o *ObjectState) GetSyncErr(spaceId string) domain.SyncError {
|
||||
o.Lock()
|
||||
defer o.Unlock()
|
||||
return o.objectSyncErrBySpace[spaceId]
|
||||
}
|
||||
|
||||
func (o *ObjectState) ResetSpaceErrorStatus(string, domain.SyncError) {}
|
|
@ -1,157 +0,0 @@
|
|||
package spacesyncstatus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/domain"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/bundle"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/localstore/objectstore"
|
||||
"github.com/anyproto/anytype-heart/util/pbtypes"
|
||||
)
|
||||
|
||||
func TestObjectState_GetSyncObjectCount(t *testing.T) {
|
||||
t.Run("GetSyncObjectCount", func(t *testing.T) {
|
||||
// given
|
||||
objectState := NewObjectState(objectstore.NewStoreFixture(t))
|
||||
|
||||
// when
|
||||
objectState.objectSyncCountBySpace["spaceId"] = 1
|
||||
objectCount := objectState.GetSyncObjectCount("spaceId")
|
||||
|
||||
// then
|
||||
assert.Equal(t, 1, objectCount)
|
||||
})
|
||||
t.Run("GetSyncObjectCount: zero value", func(t *testing.T) {
|
||||
// given
|
||||
objectState := NewObjectState(objectstore.NewStoreFixture(t))
|
||||
|
||||
// when
|
||||
objectCount := objectState.GetSyncObjectCount("spaceId")
|
||||
|
||||
// then
|
||||
assert.Equal(t, 0, objectCount)
|
||||
})
|
||||
}
|
||||
|
||||
func TestObjectState_GetSyncStatus(t *testing.T) {
|
||||
t.Run("GetSyncStatus", func(t *testing.T) {
|
||||
// given
|
||||
objectState := NewObjectState(objectstore.NewStoreFixture(t))
|
||||
|
||||
// when
|
||||
objectState.objectSyncStatusBySpace["spaceId"] = domain.Syncing
|
||||
syncStatus := objectState.GetSyncStatus("spaceId")
|
||||
|
||||
// then
|
||||
assert.Equal(t, domain.Syncing, syncStatus)
|
||||
})
|
||||
t.Run("GetSyncStatus: zero value", func(t *testing.T) {
|
||||
// given
|
||||
objectState := NewObjectState(objectstore.NewStoreFixture(t))
|
||||
|
||||
// when
|
||||
syncStatus := objectState.GetSyncStatus("spaceId")
|
||||
|
||||
// then
|
||||
assert.Equal(t, domain.Unknown, syncStatus)
|
||||
})
|
||||
}
|
||||
|
||||
func TestObjectState_SetObjectsNumber(t *testing.T) {
|
||||
t.Run("SetObjectsNumber", func(t *testing.T) {
|
||||
// given
|
||||
storeFixture := objectstore.NewStoreFixture(t)
|
||||
objectState := NewObjectState(storeFixture)
|
||||
syncStatus := domain.MakeSyncStatus("spaceId", domain.Syncing, domain.Null, domain.Objects)
|
||||
storeFixture.AddObjects(t, []objectstore.TestObject{
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id1"),
|
||||
bundle.RelationKeySyncStatus: pbtypes.Int64(int64(domain.Syncing)),
|
||||
bundle.RelationKeySpaceId: pbtypes.String("spaceId"),
|
||||
},
|
||||
{
|
||||
bundle.RelationKeyId: pbtypes.String("id2"),
|
||||
bundle.RelationKeySyncStatus: pbtypes.Int64(int64(domain.Syncing)),
|
||||
bundle.RelationKeySpaceId: pbtypes.String("spaceId"),
|
||||
},
|
||||
})
|
||||
|
||||
// when
|
||||
objectState.SetObjectsNumber(syncStatus)
|
||||
|
||||
// then
|
||||
assert.Equal(t, 2, objectState.GetSyncObjectCount("spaceId"))
|
||||
})
|
||||
t.Run("SetObjectsNumber: no object", func(t *testing.T) {
|
||||
// given
|
||||
objectState := NewObjectState(objectstore.NewStoreFixture(t))
|
||||
syncStatus := domain.MakeSyncStatus("spaceId", domain.Synced, domain.Null, domain.Objects)
|
||||
|
||||
// when
|
||||
objectState.SetObjectsNumber(syncStatus)
|
||||
|
||||
// then
|
||||
assert.Equal(t, 0, objectState.GetSyncObjectCount("spaceId"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestObjectState_SetSyncStatus(t *testing.T) {
|
||||
t.Run("SetSyncStatusAndErr, status synced", func(t *testing.T) {
|
||||
// given
|
||||
objectState := NewObjectState(objectstore.NewStoreFixture(t))
|
||||
|
||||
// when
|
||||
syncStatus := domain.MakeSyncStatus("spaceId", domain.Synced, domain.Null, domain.Objects)
|
||||
objectState.SetSyncStatusAndErr(syncStatus.Status, domain.Null, syncStatus.SpaceId)
|
||||
|
||||
// then
|
||||
assert.Equal(t, domain.Synced, objectState.GetSyncStatus("spaceId"))
|
||||
})
|
||||
t.Run("SetSyncStatusAndErr, sync in progress", func(t *testing.T) {
|
||||
// given
|
||||
objectState := NewObjectState(objectstore.NewStoreFixture(t))
|
||||
|
||||
// when
|
||||
syncStatus := domain.MakeSyncStatus("spaceId", domain.Syncing, domain.Null, domain.Objects)
|
||||
objectState.SetSyncStatusAndErr(syncStatus.Status, domain.Null, syncStatus.SpaceId)
|
||||
|
||||
// then
|
||||
assert.Equal(t, domain.Syncing, objectState.GetSyncStatus("spaceId"))
|
||||
})
|
||||
t.Run("SetSyncStatusAndErr, sync is finished with error", func(t *testing.T) {
|
||||
// given
|
||||
objectState := NewObjectState(objectstore.NewStoreFixture(t))
|
||||
|
||||
// when
|
||||
syncStatus := domain.MakeSyncStatus("spaceId", domain.Error, domain.Null, domain.Objects)
|
||||
objectState.SetSyncStatusAndErr(syncStatus.Status, domain.Null, syncStatus.SpaceId)
|
||||
|
||||
// then
|
||||
assert.Equal(t, domain.Error, objectState.GetSyncStatus("spaceId"))
|
||||
})
|
||||
t.Run("SetSyncStatusAndErr, offline", func(t *testing.T) {
|
||||
// given
|
||||
objectState := NewObjectState(objectstore.NewStoreFixture(t))
|
||||
|
||||
// when
|
||||
syncStatus := domain.MakeSyncStatus("spaceId", domain.Offline, domain.Null, domain.Objects)
|
||||
objectState.SetSyncStatusAndErr(syncStatus.Status, domain.Null, syncStatus.SpaceId)
|
||||
|
||||
// then
|
||||
assert.Equal(t, domain.Offline, objectState.GetSyncStatus("spaceId"))
|
||||
})
|
||||
t.Run("SetSyncStatusAndErr, syncing", func(t *testing.T) {
|
||||
// given
|
||||
objectState := NewObjectState(objectstore.NewStoreFixture(t))
|
||||
|
||||
// when
|
||||
syncStatus := domain.MakeSyncStatus("spaceId", domain.Syncing, domain.Null, domain.Objects)
|
||||
objectState.SetObjectsNumber(syncStatus)
|
||||
objectState.SetSyncStatusAndErr(syncStatus.Status, domain.Null, syncStatus.SpaceId)
|
||||
|
||||
// then
|
||||
assert.Equal(t, domain.Synced, objectState.GetSyncStatus("spaceId"))
|
||||
})
|
||||
}
|
|
@ -2,25 +2,38 @@ package spacesyncstatus
|
|||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/anyproto/any-sync/app"
|
||||
"github.com/cheggaaa/mb/v3"
|
||||
"github.com/anyproto/any-sync/app/logger"
|
||||
"github.com/anyproto/any-sync/nodeconf"
|
||||
"github.com/anyproto/any-sync/util/periodicsync"
|
||||
"github.com/samber/lo"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/domain"
|
||||
"github.com/anyproto/anytype-heart/core/event"
|
||||
"github.com/anyproto/anytype-heart/core/files"
|
||||
"github.com/anyproto/anytype-heart/core/session"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/nodestatus"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/syncsubscriptions"
|
||||
"github.com/anyproto/anytype-heart/pb"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/localstore/objectstore"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/logging"
|
||||
"github.com/anyproto/anytype-heart/util/slice"
|
||||
)
|
||||
|
||||
const service = "core.syncstatus.spacesyncstatus"
|
||||
const CName = "core.syncstatus.spacesyncstatus"
|
||||
|
||||
var log = logging.Logger("anytype-mw-space-status")
|
||||
var log = logging.Logger(CName)
|
||||
|
||||
type Updater interface {
|
||||
app.ComponentRunnable
|
||||
SendUpdate(spaceSync *domain.SpaceSync)
|
||||
Refresh(spaceId string)
|
||||
UpdateMissingIds(spaceId string, ids []string)
|
||||
}
|
||||
|
||||
type NodeUsage interface {
|
||||
app.Component
|
||||
GetNodeUsage(ctx context.Context) (*files.NodeUsageResponse, error)
|
||||
}
|
||||
|
||||
type SpaceIdGetter interface {
|
||||
|
@ -29,52 +42,55 @@ type SpaceIdGetter interface {
|
|||
AllSpaceIds() []string
|
||||
}
|
||||
|
||||
type State interface {
|
||||
SetObjectsNumber(status *domain.SpaceSync)
|
||||
SetSyncStatusAndErr(status domain.SpaceSyncStatus, syncError domain.SyncError, spaceId string)
|
||||
GetSyncStatus(spaceId string) domain.SpaceSyncStatus
|
||||
GetSyncObjectCount(spaceId string) int
|
||||
GetSyncErr(spaceId string) domain.SyncError
|
||||
ResetSpaceErrorStatus(spaceId string, syncError domain.SyncError)
|
||||
}
|
||||
|
||||
type NetworkConfig interface {
|
||||
app.Component
|
||||
GetNetworkMode() pb.RpcAccountNetworkMode
|
||||
}
|
||||
|
||||
type spaceSyncStatus struct {
|
||||
eventSender event.Sender
|
||||
networkConfig NetworkConfig
|
||||
batcher *mb.MB[*domain.SpaceSync]
|
||||
nodeStatus nodestatus.NodeStatus
|
||||
nodeConf nodeconf.Service
|
||||
nodeUsage NodeUsage
|
||||
subs syncsubscriptions.SyncSubscriptions
|
||||
|
||||
filesState State
|
||||
objectsState State
|
||||
|
||||
ctx context.Context
|
||||
ctxCancel context.CancelFunc
|
||||
spaceIdGetter SpaceIdGetter
|
||||
|
||||
finish chan struct{}
|
||||
spaceIdGetter SpaceIdGetter
|
||||
curStatuses map[string]struct{}
|
||||
missingIds map[string][]string
|
||||
lastSentEvents map[string]pb.EventSpaceSyncStatusUpdate
|
||||
mx sync.Mutex
|
||||
periodicCall periodicsync.PeriodicSync
|
||||
loopInterval time.Duration
|
||||
isLocal bool
|
||||
}
|
||||
|
||||
func NewSpaceSyncStatus() Updater {
|
||||
return &spaceSyncStatus{batcher: mb.New[*domain.SpaceSync](0), finish: make(chan struct{})}
|
||||
return &spaceSyncStatus{
|
||||
loopInterval: time.Second * 1,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) Init(a *app.App) (err error) {
|
||||
s.eventSender = app.MustComponent[event.Sender](a)
|
||||
s.networkConfig = app.MustComponent[NetworkConfig](a)
|
||||
store := app.MustComponent[objectstore.ObjectStore](a)
|
||||
s.filesState = NewFileState(store)
|
||||
s.objectsState = NewObjectState(store)
|
||||
s.nodeStatus = app.MustComponent[nodestatus.NodeStatus](a)
|
||||
s.nodeConf = app.MustComponent[nodeconf.Service](a)
|
||||
s.nodeUsage = app.MustComponent[NodeUsage](a)
|
||||
s.curStatuses = make(map[string]struct{})
|
||||
s.subs = app.MustComponent[syncsubscriptions.SyncSubscriptions](a)
|
||||
s.missingIds = make(map[string][]string)
|
||||
s.lastSentEvents = make(map[string]pb.EventSpaceSyncStatusUpdate)
|
||||
s.spaceIdGetter = app.MustComponent[SpaceIdGetter](a)
|
||||
s.isLocal = s.networkConfig.GetNetworkMode() == pb.RpcAccount_LocalOnly
|
||||
sessionHookRunner := app.MustComponent[session.HookRunner](a)
|
||||
sessionHookRunner.RegisterHook(s.sendSyncEventForNewSession)
|
||||
s.periodicCall = periodicsync.NewPeriodicSyncDuration(s.loopInterval, time.Second*5, s.update, logger.CtxLogger{Logger: log.Desugar()})
|
||||
return
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) Name() (name string) {
|
||||
return service
|
||||
return CName
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) sendSyncEventForNewSession(ctx session.Context) error {
|
||||
|
@ -85,24 +101,58 @@ func (s *spaceSyncStatus) sendSyncEventForNewSession(ctx session.Context) error
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) UpdateMissingIds(spaceId string, ids []string) {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
s.missingIds[spaceId] = ids
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) Run(ctx context.Context) (err error) {
|
||||
if s.networkConfig.GetNetworkMode() == pb.RpcAccount_LocalOnly {
|
||||
s.sendLocalOnlyEvent()
|
||||
close(s.finish)
|
||||
return
|
||||
} else {
|
||||
s.sendStartEvent(s.spaceIdGetter.AllSpaceIds())
|
||||
}
|
||||
s.ctx, s.ctxCancel = context.WithCancel(context.Background())
|
||||
go s.processEvents()
|
||||
s.sendStartEvent(s.spaceIdGetter.AllSpaceIds())
|
||||
s.periodicCall.Run()
|
||||
return
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) getMissingIds(spaceId string) []string {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
return slice.Copy(s.missingIds[spaceId])
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) update(ctx context.Context) error {
|
||||
s.mx.Lock()
|
||||
statuses := lo.MapToSlice(s.curStatuses, func(key string, value struct{}) string {
|
||||
delete(s.curStatuses, key)
|
||||
return key
|
||||
})
|
||||
s.mx.Unlock()
|
||||
for _, spaceId := range statuses {
|
||||
if spaceId == s.spaceIdGetter.TechSpaceId() {
|
||||
continue
|
||||
}
|
||||
// if the there are too many updates and this hurts performance,
|
||||
// we may skip some iterations and not do the updates for example
|
||||
s.updateSpaceSyncStatus(spaceId)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) sendEventToSession(spaceId, token string) {
|
||||
if s.isLocal {
|
||||
s.sendLocalOnlyEventToSession(spaceId, token)
|
||||
return
|
||||
}
|
||||
params := syncParams{
|
||||
bytesLeftPercentage: s.getBytesLeftPercentage(spaceId),
|
||||
connectionStatus: s.nodeStatus.GetNodeStatus(spaceId),
|
||||
compatibility: s.nodeConf.NetworkCompatibilityStatus(),
|
||||
filesSyncingCount: s.getFileSyncingObjectsCount(spaceId),
|
||||
objectsSyncingCount: s.getObjectSyncingObjectsCount(spaceId, s.getMissingIds(spaceId)),
|
||||
}
|
||||
s.eventSender.SendToSession(token, &pb.Event{
|
||||
Messages: []*pb.EventMessage{{
|
||||
Value: &pb.EventMessageValueOfSpaceSyncStatusUpdate{
|
||||
SpaceSyncStatusUpdate: s.makeSpaceSyncEvent(spaceId),
|
||||
SpaceSyncStatusUpdate: s.makeSyncEvent(spaceId, params),
|
||||
},
|
||||
}},
|
||||
})
|
||||
|
@ -110,21 +160,16 @@ func (s *spaceSyncStatus) sendEventToSession(spaceId, token string) {
|
|||
|
||||
func (s *spaceSyncStatus) sendStartEvent(spaceIds []string) {
|
||||
for _, id := range spaceIds {
|
||||
s.eventSender.Broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{{
|
||||
Value: &pb.EventMessageValueOfSpaceSyncStatusUpdate{
|
||||
SpaceSyncStatusUpdate: s.makeSpaceSyncEvent(id),
|
||||
},
|
||||
}},
|
||||
})
|
||||
s.updateSpaceSyncStatus(id)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) sendLocalOnlyEvent() {
|
||||
s.eventSender.Broadcast(&pb.Event{
|
||||
func (s *spaceSyncStatus) sendLocalOnlyEvent(spaceId string) {
|
||||
s.broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{{
|
||||
Value: &pb.EventMessageValueOfSpaceSyncStatusUpdate{
|
||||
SpaceSyncStatusUpdate: &pb.EventSpaceSyncStatusUpdate{
|
||||
Id: spaceId,
|
||||
Status: pb.EventSpace_Offline,
|
||||
Network: pb.EventSpace_LocalOnly,
|
||||
},
|
||||
|
@ -133,164 +178,136 @@ func (s *spaceSyncStatus) sendLocalOnlyEvent() {
|
|||
})
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) SendUpdate(status *domain.SpaceSync) {
|
||||
e := s.batcher.Add(context.Background(), status)
|
||||
if e != nil {
|
||||
log.Errorf("failed to add space sync event to queue %s", e)
|
||||
}
|
||||
func eventsEqual(a, b pb.EventSpaceSyncStatusUpdate) bool {
|
||||
return a.Id == b.Id &&
|
||||
a.Status == b.Status &&
|
||||
a.Network == b.Network &&
|
||||
a.Error == b.Error &&
|
||||
a.SyncingObjectsCounter == b.SyncingObjectsCounter
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) processEvents() {
|
||||
defer close(s.finish)
|
||||
for {
|
||||
status, err := s.batcher.WaitOne(s.ctx)
|
||||
if err != nil {
|
||||
log.Errorf("failed to get event from batcher: %s", err)
|
||||
return
|
||||
}
|
||||
if status.SpaceId == s.spaceIdGetter.TechSpaceId() {
|
||||
continue
|
||||
}
|
||||
s.updateSpaceSyncStatus(status)
|
||||
func (s *spaceSyncStatus) broadcast(event *pb.Event) {
|
||||
s.mx.Lock()
|
||||
val := *event.Messages[0].Value.(*pb.EventMessageValueOfSpaceSyncStatusUpdate).SpaceSyncStatusUpdate
|
||||
ev, ok := s.lastSentEvents[val.Id]
|
||||
s.lastSentEvents[val.Id] = val
|
||||
s.mx.Unlock()
|
||||
if ok && eventsEqual(ev, val) {
|
||||
return
|
||||
}
|
||||
s.eventSender.Broadcast(event)
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) updateSpaceSyncStatus(receivedStatus *domain.SpaceSync) {
|
||||
currSyncStatus := s.getSpaceSyncStatus(receivedStatus.SpaceId)
|
||||
if s.isStatusNotChanged(receivedStatus, currSyncStatus) {
|
||||
return
|
||||
}
|
||||
state := s.getCurrentState(receivedStatus)
|
||||
prevObjectNumber := s.getObjectNumber(receivedStatus.SpaceId)
|
||||
state.SetObjectsNumber(receivedStatus)
|
||||
newObjectNumber := s.getObjectNumber(receivedStatus.SpaceId)
|
||||
state.SetSyncStatusAndErr(receivedStatus.Status, receivedStatus.SyncError, receivedStatus.SpaceId)
|
||||
|
||||
spaceStatus := s.getSpaceSyncStatus(receivedStatus.SpaceId)
|
||||
|
||||
// send synced event only if files and objects are all synced
|
||||
if !s.needToSendEvent(spaceStatus, currSyncStatus, prevObjectNumber, newObjectNumber) {
|
||||
return
|
||||
}
|
||||
s.eventSender.Broadcast(&pb.Event{
|
||||
func (s *spaceSyncStatus) sendLocalOnlyEventToSession(spaceId, token string) {
|
||||
s.eventSender.SendToSession(token, &pb.Event{
|
||||
Messages: []*pb.EventMessage{{
|
||||
Value: &pb.EventMessageValueOfSpaceSyncStatusUpdate{
|
||||
SpaceSyncStatusUpdate: s.makeSpaceSyncEvent(receivedStatus.SpaceId),
|
||||
SpaceSyncStatusUpdate: &pb.EventSpaceSyncStatusUpdate{
|
||||
Id: spaceId,
|
||||
Status: pb.EventSpace_Offline,
|
||||
Network: pb.EventSpace_LocalOnly,
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
state.ResetSpaceErrorStatus(receivedStatus.SpaceId, receivedStatus.SyncError)
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) isStatusNotChanged(status *domain.SpaceSync, syncStatus domain.SpaceSyncStatus) bool {
|
||||
if status.Status == domain.Syncing {
|
||||
// we need to check if number of syncing object is changed first
|
||||
return false
|
||||
}
|
||||
syncErrNotChanged := s.getError(status.SpaceId) == mapError(status.SyncError)
|
||||
if syncStatus == domain.Unknown {
|
||||
return false
|
||||
}
|
||||
statusNotChanged := syncStatus == status.Status
|
||||
if syncErrNotChanged && statusNotChanged {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
func (s *spaceSyncStatus) Refresh(spaceId string) {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
s.curStatuses[spaceId] = struct{}{}
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) needToSendEvent(status domain.SpaceSyncStatus, currSyncStatus domain.SpaceSyncStatus, prevObjectNumber int64, newObjectNumber int64) bool {
|
||||
// that because we get update on syncing objects count, so we need to send updated object counter to client
|
||||
return (status == domain.Syncing && prevObjectNumber != newObjectNumber) || currSyncStatus != status
|
||||
func (s *spaceSyncStatus) getObjectSyncingObjectsCount(spaceId string, missingObjects []string) int {
|
||||
curSub, err := s.subs.GetSubscription(spaceId)
|
||||
if err != nil {
|
||||
log.Errorf("failed to get subscription: %s", err)
|
||||
return 0
|
||||
}
|
||||
return curSub.SyncingObjectsCount(missingObjects)
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) getFileSyncingObjectsCount(spaceId string) int {
|
||||
curSub, err := s.subs.GetSubscription(spaceId)
|
||||
if err != nil {
|
||||
log.Errorf("failed to get subscription: %s", err)
|
||||
return 0
|
||||
}
|
||||
return curSub.FileSyncingObjectsCount()
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) getBytesLeftPercentage(spaceId string) float64 {
|
||||
nodeUsage, err := s.nodeUsage.GetNodeUsage(context.Background())
|
||||
if err != nil {
|
||||
log.Errorf("failed to get node usage: %s", err)
|
||||
return 0
|
||||
}
|
||||
return float64(nodeUsage.Usage.BytesLeft) / float64(nodeUsage.Usage.AccountBytesLimit)
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) updateSpaceSyncStatus(spaceId string) {
|
||||
if s.isLocal {
|
||||
s.sendLocalOnlyEvent(spaceId)
|
||||
return
|
||||
}
|
||||
missingObjects := s.getMissingIds(spaceId)
|
||||
params := syncParams{
|
||||
bytesLeftPercentage: s.getBytesLeftPercentage(spaceId),
|
||||
connectionStatus: s.nodeStatus.GetNodeStatus(spaceId),
|
||||
compatibility: s.nodeConf.NetworkCompatibilityStatus(),
|
||||
filesSyncingCount: s.getFileSyncingObjectsCount(spaceId),
|
||||
objectsSyncingCount: s.getObjectSyncingObjectsCount(spaceId, missingObjects),
|
||||
}
|
||||
s.broadcast(&pb.Event{
|
||||
Messages: []*pb.EventMessage{{
|
||||
Value: &pb.EventMessageValueOfSpaceSyncStatusUpdate{
|
||||
SpaceSyncStatusUpdate: s.makeSyncEvent(spaceId, params),
|
||||
},
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) Close(ctx context.Context) (err error) {
|
||||
if s.ctxCancel != nil {
|
||||
s.ctxCancel()
|
||||
}
|
||||
<-s.finish
|
||||
return s.batcher.Close()
|
||||
s.periodicCall.Close()
|
||||
return
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) makeSpaceSyncEvent(spaceId string) *pb.EventSpaceSyncStatusUpdate {
|
||||
type syncParams struct {
|
||||
bytesLeftPercentage float64
|
||||
connectionStatus nodestatus.ConnectionStatus
|
||||
compatibility nodeconf.NetworkCompatibilityStatus
|
||||
filesSyncingCount int
|
||||
objectsSyncingCount int
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) makeSyncEvent(spaceId string, params syncParams) *pb.EventSpaceSyncStatusUpdate {
|
||||
status := pb.EventSpace_Synced
|
||||
err := pb.EventSpace_Null
|
||||
syncingObjectsCount := int64(params.objectsSyncingCount + params.filesSyncingCount)
|
||||
if syncingObjectsCount > 0 {
|
||||
status = pb.EventSpace_Syncing
|
||||
}
|
||||
if params.bytesLeftPercentage < 0.1 {
|
||||
status = pb.EventSpace_Error
|
||||
err = pb.EventSpace_StorageLimitExceed
|
||||
}
|
||||
if params.connectionStatus == nodestatus.ConnectionError {
|
||||
status = pb.EventSpace_Offline
|
||||
err = pb.EventSpace_NetworkError
|
||||
}
|
||||
if params.compatibility == nodeconf.NetworkCompatibilityStatusIncompatible {
|
||||
status = pb.EventSpace_Error
|
||||
err = pb.EventSpace_IncompatibleVersion
|
||||
}
|
||||
return &pb.EventSpaceSyncStatusUpdate{
|
||||
Id: spaceId,
|
||||
Status: mapStatus(s.getSpaceSyncStatus(spaceId)),
|
||||
Status: status,
|
||||
Network: mapNetworkMode(s.networkConfig.GetNetworkMode()),
|
||||
Error: s.getError(spaceId),
|
||||
SyncingObjectsCounter: s.getObjectNumber(spaceId),
|
||||
Error: err,
|
||||
SyncingObjectsCounter: syncingObjectsCount,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) getObjectNumber(spaceId string) int64 {
|
||||
return int64(s.filesState.GetSyncObjectCount(spaceId) + s.objectsState.GetSyncObjectCount(spaceId))
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) getSpaceSyncStatus(spaceId string) domain.SpaceSyncStatus {
|
||||
filesStatus := s.filesState.GetSyncStatus(spaceId)
|
||||
objectsStatus := s.objectsState.GetSyncStatus(spaceId)
|
||||
|
||||
if s.isUnknown(filesStatus, objectsStatus) {
|
||||
return domain.Unknown
|
||||
}
|
||||
if s.isOfflineStatus(filesStatus, objectsStatus) {
|
||||
return domain.Offline
|
||||
}
|
||||
|
||||
if s.isSyncedStatus(filesStatus, objectsStatus) {
|
||||
return domain.Synced
|
||||
}
|
||||
|
||||
if s.isErrorStatus(filesStatus, objectsStatus) {
|
||||
return domain.Error
|
||||
}
|
||||
|
||||
if s.isSyncingStatus(filesStatus, objectsStatus) {
|
||||
return domain.Syncing
|
||||
}
|
||||
return domain.Synced
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) isSyncingStatus(filesStatus domain.SpaceSyncStatus, objectsStatus domain.SpaceSyncStatus) bool {
|
||||
return filesStatus == domain.Syncing || objectsStatus == domain.Syncing
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) isErrorStatus(filesStatus domain.SpaceSyncStatus, objectsStatus domain.SpaceSyncStatus) bool {
|
||||
return filesStatus == domain.Error || objectsStatus == domain.Error
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) isSyncedStatus(filesStatus domain.SpaceSyncStatus, objectsStatus domain.SpaceSyncStatus) bool {
|
||||
return filesStatus == domain.Synced && objectsStatus == domain.Synced
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) isOfflineStatus(filesStatus domain.SpaceSyncStatus, objectsStatus domain.SpaceSyncStatus) bool {
|
||||
return filesStatus == domain.Offline || objectsStatus == domain.Offline
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) getCurrentState(status *domain.SpaceSync) State {
|
||||
if status.SyncType == domain.Files {
|
||||
return s.filesState
|
||||
}
|
||||
return s.objectsState
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) getError(spaceId string) pb.EventSpaceSyncError {
|
||||
syncErr := s.filesState.GetSyncErr(spaceId)
|
||||
if syncErr != domain.Null {
|
||||
return mapError(syncErr)
|
||||
}
|
||||
|
||||
syncErr = s.objectsState.GetSyncErr(spaceId)
|
||||
if syncErr != domain.Null {
|
||||
return mapError(syncErr)
|
||||
}
|
||||
|
||||
return pb.EventSpace_Null
|
||||
}
|
||||
|
||||
func (s *spaceSyncStatus) isUnknown(filesStatus domain.SpaceSyncStatus, objectsStatus domain.SpaceSyncStatus) bool {
|
||||
return filesStatus == domain.Unknown && objectsStatus == domain.Unknown
|
||||
}
|
||||
|
||||
func mapNetworkMode(mode pb.RpcAccountNetworkMode) pb.EventSpaceNetwork {
|
||||
switch mode {
|
||||
case pb.RpcAccount_LocalOnly:
|
||||
|
@ -301,29 +318,3 @@ func mapNetworkMode(mode pb.RpcAccountNetworkMode) pb.EventSpaceNetwork {
|
|||
return pb.EventSpace_Anytype
|
||||
}
|
||||
}
|
||||
|
||||
func mapStatus(status domain.SpaceSyncStatus) pb.EventSpaceStatus {
|
||||
switch status {
|
||||
case domain.Syncing:
|
||||
return pb.EventSpace_Syncing
|
||||
case domain.Offline:
|
||||
return pb.EventSpace_Offline
|
||||
case domain.Error:
|
||||
return pb.EventSpace_Error
|
||||
default:
|
||||
return pb.EventSpace_Synced
|
||||
}
|
||||
}
|
||||
|
||||
func mapError(err domain.SyncError) pb.EventSpaceSyncError {
|
||||
switch err {
|
||||
case domain.NetworkError:
|
||||
return pb.EventSpace_NetworkError
|
||||
case domain.IncompatibleVersion:
|
||||
return pb.EventSpace_IncompatibleVersion
|
||||
case domain.StorageLimitExceed:
|
||||
return pb.EventSpace_StorageLimitExceed
|
||||
default:
|
||||
return pb.EventSpace_Null
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
159
core/syncstatus/syncsubscriptions/objectsubscription.go
Normal file
159
core/syncstatus/syncsubscriptions/objectsubscription.go
Normal file
|
@ -0,0 +1,159 @@
|
|||
package syncsubscriptions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/cheggaaa/mb/v3"
|
||||
"github.com/gogo/protobuf/types"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/subscription"
|
||||
"github.com/anyproto/anytype-heart/pb"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/bundle"
|
||||
"github.com/anyproto/anytype-heart/util/pbtypes"
|
||||
)
|
||||
|
||||
type entry[T any] struct {
|
||||
data T
|
||||
}
|
||||
|
||||
func newEmptyEntry[T any]() *entry[T] {
|
||||
return &entry[T]{}
|
||||
}
|
||||
|
||||
func newEntry[T any](data T) *entry[T] {
|
||||
return &entry[T]{data: data}
|
||||
}
|
||||
|
||||
type (
|
||||
extract[T any] func(*types.Struct) (string, T)
|
||||
update[T any] func(string, *types.Value, T) T
|
||||
unset[T any] func([]string, T) T
|
||||
)
|
||||
|
||||
type SubscriptionParams[T any] struct {
|
||||
Request subscription.SubscribeRequest
|
||||
Extract extract[T]
|
||||
Update update[T]
|
||||
Unset unset[T]
|
||||
}
|
||||
|
||||
func NewIdSubscription(service subscription.Service, request subscription.SubscribeRequest) *ObjectSubscription[struct{}] {
|
||||
return &ObjectSubscription[struct{}]{
|
||||
request: request,
|
||||
service: service,
|
||||
ch: make(chan struct{}),
|
||||
extract: func(t *types.Struct) (string, struct{}) {
|
||||
return pbtypes.GetString(t, bundle.RelationKeyId.String()), struct{}{}
|
||||
},
|
||||
update: func(s string, value *types.Value, s2 struct{}) struct{} {
|
||||
return struct{}{}
|
||||
},
|
||||
unset: func(strings []string, s struct{}) struct{} {
|
||||
return struct{}{}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewObjectSubscription[T any](service subscription.Service, params SubscriptionParams[T]) *ObjectSubscription[T] {
|
||||
return &ObjectSubscription[T]{
|
||||
request: params.Request,
|
||||
service: service,
|
||||
ch: make(chan struct{}),
|
||||
extract: params.Extract,
|
||||
update: params.Update,
|
||||
unset: params.Unset,
|
||||
}
|
||||
}
|
||||
|
||||
type ObjectSubscription[T any] struct {
|
||||
request subscription.SubscribeRequest
|
||||
service subscription.Service
|
||||
ch chan struct{}
|
||||
events *mb.MB[*pb.EventMessage]
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
sub map[string]*entry[T]
|
||||
extract extract[T]
|
||||
update update[T]
|
||||
unset unset[T]
|
||||
mx sync.Mutex
|
||||
}
|
||||
|
||||
func (o *ObjectSubscription[T]) Run() error {
|
||||
resp, err := o.service.Search(o.request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.ctx, o.cancel = context.WithCancel(context.Background())
|
||||
o.events = resp.Output
|
||||
o.sub = map[string]*entry[T]{}
|
||||
for _, rec := range resp.Records {
|
||||
id, data := o.extract(rec)
|
||||
o.sub[id] = newEntry(data)
|
||||
}
|
||||
go o.read()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *ObjectSubscription[T]) Close() {
|
||||
o.cancel()
|
||||
<-o.ch
|
||||
}
|
||||
|
||||
func (o *ObjectSubscription[T]) Len() int {
|
||||
o.mx.Lock()
|
||||
defer o.mx.Unlock()
|
||||
return len(o.sub)
|
||||
}
|
||||
|
||||
func (o *ObjectSubscription[T]) Iterate(iter func(id string, data T) bool) {
|
||||
o.mx.Lock()
|
||||
defer o.mx.Unlock()
|
||||
for id, ent := range o.sub {
|
||||
if !iter(id, ent.data) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (o *ObjectSubscription[T]) read() {
|
||||
defer close(o.ch)
|
||||
readEvent := func(event *pb.EventMessage) {
|
||||
o.mx.Lock()
|
||||
defer o.mx.Unlock()
|
||||
switch v := event.Value.(type) {
|
||||
case *pb.EventMessageValueOfSubscriptionAdd:
|
||||
o.sub[v.SubscriptionAdd.Id] = newEmptyEntry[T]()
|
||||
case *pb.EventMessageValueOfSubscriptionRemove:
|
||||
delete(o.sub, v.SubscriptionRemove.Id)
|
||||
case *pb.EventMessageValueOfObjectDetailsAmend:
|
||||
curEntry := o.sub[v.ObjectDetailsAmend.Id]
|
||||
if curEntry == nil {
|
||||
return
|
||||
}
|
||||
for _, value := range v.ObjectDetailsAmend.Details {
|
||||
curEntry.data = o.update(value.Key, value.Value, curEntry.data)
|
||||
}
|
||||
case *pb.EventMessageValueOfObjectDetailsUnset:
|
||||
curEntry := o.sub[v.ObjectDetailsUnset.Id]
|
||||
if curEntry == nil {
|
||||
return
|
||||
}
|
||||
curEntry.data = o.unset(v.ObjectDetailsUnset.Keys, curEntry.data)
|
||||
case *pb.EventMessageValueOfObjectDetailsSet:
|
||||
curEntry := o.sub[v.ObjectDetailsSet.Id]
|
||||
if curEntry == nil {
|
||||
return
|
||||
}
|
||||
_, curEntry.data = o.extract(v.ObjectDetailsSet.Details)
|
||||
}
|
||||
}
|
||||
for {
|
||||
event, err := o.events.WaitOne(o.ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
readEvent(event)
|
||||
}
|
||||
}
|
130
core/syncstatus/syncsubscriptions/objectsubscription_test.go
Normal file
130
core/syncstatus/syncsubscriptions/objectsubscription_test.go
Normal file
|
@ -0,0 +1,130 @@
|
|||
package syncsubscriptions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cheggaaa/mb/v3"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/subscription"
|
||||
"github.com/anyproto/anytype-heart/core/subscription/mock_subscription"
|
||||
"github.com/anyproto/anytype-heart/pb"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/bundle"
|
||||
"github.com/anyproto/anytype-heart/util/pbtypes"
|
||||
)
|
||||
|
||||
func makeSubscriptionAdd(id string) *pb.EventMessage {
|
||||
return &pb.EventMessage{
|
||||
Value: &pb.EventMessageValueOfSubscriptionAdd{
|
||||
SubscriptionAdd: &pb.EventObjectSubscriptionAdd{
|
||||
Id: id,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeSubscriptionRemove(id string) *pb.EventMessage {
|
||||
return &pb.EventMessage{
|
||||
Value: &pb.EventMessageValueOfSubscriptionRemove{
|
||||
SubscriptionRemove: &pb.EventObjectSubscriptionRemove{
|
||||
Id: id,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeDetailsSet(id string) *pb.EventMessage {
|
||||
return &pb.EventMessage{
|
||||
Value: &pb.EventMessageValueOfObjectDetailsSet{
|
||||
ObjectDetailsSet: &pb.EventObjectDetailsSet{
|
||||
Id: id,
|
||||
Details: &types.Struct{
|
||||
Fields: map[string]*types.Value{
|
||||
"key1": pbtypes.String("value1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeDetailsUnset(id string) *pb.EventMessage {
|
||||
return &pb.EventMessage{
|
||||
Value: &pb.EventMessageValueOfObjectDetailsUnset{
|
||||
ObjectDetailsUnset: &pb.EventObjectDetailsUnset{
|
||||
Id: id,
|
||||
Keys: []string{"key1", "key2"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeDetailsAmend(id string) *pb.EventMessage {
|
||||
return &pb.EventMessage{
|
||||
Value: &pb.EventMessageValueOfObjectDetailsAmend{
|
||||
ObjectDetailsAmend: &pb.EventObjectDetailsAmend{
|
||||
Id: id,
|
||||
Details: []*pb.EventObjectDetailsAmendKeyValue{
|
||||
{
|
||||
Key: "key3",
|
||||
Value: pbtypes.String("value3"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeStructs(ids []string) []*types.Struct {
|
||||
structs := make([]*types.Struct, len(ids))
|
||||
for i, id := range ids {
|
||||
structs[i] = &types.Struct{
|
||||
Fields: map[string]*types.Value{
|
||||
bundle.RelationKeyId.String(): pbtypes.String(id),
|
||||
},
|
||||
}
|
||||
}
|
||||
return structs
|
||||
}
|
||||
|
||||
func TestIdSubscription(t *testing.T) {
|
||||
subService := mock_subscription.NewMockService(t)
|
||||
events := mb.New[*pb.EventMessage](0)
|
||||
records := makeStructs([]string{"1", "2", "3"})
|
||||
// for details amend, set and unset we just check that we handle them correctly (i.e. do nothing)
|
||||
messages := []*pb.EventMessage{
|
||||
makeSubscriptionRemove("2"),
|
||||
makeDetailsSet("1"),
|
||||
makeDetailsUnset("2"),
|
||||
makeDetailsAmend("3"),
|
||||
makeSubscriptionAdd("4"),
|
||||
makeSubscriptionRemove("1"),
|
||||
makeSubscriptionAdd("3"),
|
||||
makeSubscriptionRemove("5"),
|
||||
}
|
||||
for _, msg := range messages {
|
||||
err := events.Add(context.Background(), msg)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
subscribeResponse := &subscription.SubscribeResponse{
|
||||
Output: events,
|
||||
Records: records,
|
||||
}
|
||||
subService.EXPECT().Search(mock.Anything).Return(subscribeResponse, nil)
|
||||
sub := NewIdSubscription(subService, subscription.SubscribeRequest{})
|
||||
err := sub.Run()
|
||||
require.NoError(t, err)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
ids := make(map[string]struct{})
|
||||
sub.Iterate(func(id string, _ struct{}) bool {
|
||||
ids[id] = struct{}{}
|
||||
return true
|
||||
})
|
||||
require.Len(t, ids, 2)
|
||||
require.Contains(t, ids, "3")
|
||||
require.Contains(t, ids, "4")
|
||||
}
|
111
core/syncstatus/syncsubscriptions/syncingobjects.go
Normal file
111
core/syncstatus/syncsubscriptions/syncingobjects.go
Normal file
|
@ -0,0 +1,111 @@
|
|||
package syncsubscriptions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/domain"
|
||||
"github.com/anyproto/anytype-heart/core/subscription"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/filesyncstatus"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/bundle"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/pb/model"
|
||||
"github.com/anyproto/anytype-heart/util/pbtypes"
|
||||
"github.com/anyproto/anytype-heart/util/slice"
|
||||
)
|
||||
|
||||
type syncingObjects struct {
|
||||
fileSubscription *ObjectSubscription[struct{}]
|
||||
objectSubscription *ObjectSubscription[struct{}]
|
||||
service subscription.Service
|
||||
spaceId string
|
||||
}
|
||||
|
||||
func newSyncingObjects(spaceId string, service subscription.Service) *syncingObjects {
|
||||
return &syncingObjects{
|
||||
service: service,
|
||||
spaceId: spaceId,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *syncingObjects) Run() error {
|
||||
objectReq := subscription.SubscribeRequest{
|
||||
SubId: fmt.Sprintf("spacestatus.objects.%s", s.spaceId),
|
||||
Internal: true,
|
||||
NoDepSubscription: true,
|
||||
Keys: []string{bundle.RelationKeyId.String()},
|
||||
Filters: []*model.BlockContentDataviewFilter{
|
||||
{
|
||||
RelationKey: bundle.RelationKeySyncStatus.String(),
|
||||
Condition: model.BlockContentDataviewFilter_Equal,
|
||||
Value: pbtypes.Int64(int64(domain.SpaceSyncStatusSyncing)),
|
||||
},
|
||||
{
|
||||
RelationKey: bundle.RelationKeyLayout.String(),
|
||||
Condition: model.BlockContentDataviewFilter_NotIn,
|
||||
Value: pbtypes.IntList(
|
||||
int(model.ObjectType_file),
|
||||
int(model.ObjectType_image),
|
||||
int(model.ObjectType_video),
|
||||
int(model.ObjectType_audio),
|
||||
),
|
||||
},
|
||||
{
|
||||
RelationKey: bundle.RelationKeySpaceId.String(),
|
||||
Condition: model.BlockContentDataviewFilter_Equal,
|
||||
Value: pbtypes.String(s.spaceId),
|
||||
},
|
||||
},
|
||||
}
|
||||
fileReq := subscription.SubscribeRequest{
|
||||
SubId: fmt.Sprintf("spacestatus.files.%s", s.spaceId),
|
||||
Internal: true,
|
||||
NoDepSubscription: true,
|
||||
Keys: []string{bundle.RelationKeyId.String()},
|
||||
Filters: []*model.BlockContentDataviewFilter{
|
||||
{
|
||||
RelationKey: bundle.RelationKeyFileBackupStatus.String(),
|
||||
Condition: model.BlockContentDataviewFilter_In,
|
||||
Value: pbtypes.IntList(int(filesyncstatus.Syncing), int(filesyncstatus.Queued)),
|
||||
},
|
||||
{
|
||||
RelationKey: bundle.RelationKeySpaceId.String(),
|
||||
Condition: model.BlockContentDataviewFilter_Equal,
|
||||
Value: pbtypes.String(s.spaceId),
|
||||
},
|
||||
},
|
||||
}
|
||||
s.fileSubscription = NewIdSubscription(s.service, fileReq)
|
||||
s.objectSubscription = NewIdSubscription(s.service, objectReq)
|
||||
errFiles := s.fileSubscription.Run()
|
||||
errObjects := s.objectSubscription.Run()
|
||||
if errFiles != nil || errObjects != nil {
|
||||
return fmt.Errorf("error running syncing objects: %w %w", errFiles, errObjects)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *syncingObjects) Close() {
|
||||
s.fileSubscription.Close()
|
||||
s.objectSubscription.Close()
|
||||
}
|
||||
|
||||
func (s *syncingObjects) GetFileSubscription() *ObjectSubscription[struct{}] {
|
||||
return s.fileSubscription
|
||||
}
|
||||
|
||||
func (s *syncingObjects) GetObjectSubscription() *ObjectSubscription[struct{}] {
|
||||
return s.objectSubscription
|
||||
}
|
||||
|
||||
func (s *syncingObjects) SyncingObjectsCount(missing []string) int {
|
||||
ids := make([]string, 0, s.objectSubscription.Len())
|
||||
s.objectSubscription.Iterate(func(id string, _ struct{}) bool {
|
||||
ids = append(ids, id)
|
||||
return true
|
||||
})
|
||||
_, added := slice.DifferenceRemovedAdded(ids, missing)
|
||||
return len(ids) + len(added)
|
||||
}
|
||||
|
||||
func (s *syncingObjects) FileSyncingObjectsCount() int {
|
||||
return s.fileSubscription.Len()
|
||||
}
|
23
core/syncstatus/syncsubscriptions/syncingobjects_test.go
Normal file
23
core/syncstatus/syncsubscriptions/syncingobjects_test.go
Normal file
|
@ -0,0 +1,23 @@
|
|||
package syncsubscriptions
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/subscription"
|
||||
)
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
objSubscription := NewIdSubscription(nil, subscription.SubscribeRequest{})
|
||||
objSubscription.sub = map[string]*entry[struct{}]{
|
||||
"1": newEmptyEntry[struct{}](),
|
||||
"2": newEmptyEntry[struct{}](),
|
||||
"4": newEmptyEntry[struct{}](),
|
||||
}
|
||||
syncing := &syncingObjects{
|
||||
objectSubscription: objSubscription,
|
||||
}
|
||||
cnt := syncing.SyncingObjectsCount([]string{"1", "2", "3"})
|
||||
require.Equal(t, 4, cnt)
|
||||
}
|
80
core/syncstatus/syncsubscriptions/syncsubscriptions.go
Normal file
80
core/syncstatus/syncsubscriptions/syncsubscriptions.go
Normal file
|
@ -0,0 +1,80 @@
|
|||
package syncsubscriptions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/anyproto/any-sync/app"
|
||||
"github.com/samber/lo"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/subscription"
|
||||
)
|
||||
|
||||
const CName = "client.syncstatus.syncsubscriptions"
|
||||
|
||||
type SyncSubscription interface {
|
||||
Run() error
|
||||
Close()
|
||||
GetFileSubscription() *ObjectSubscription[struct{}]
|
||||
GetObjectSubscription() *ObjectSubscription[struct{}]
|
||||
SyncingObjectsCount(missing []string) int
|
||||
FileSyncingObjectsCount() int
|
||||
}
|
||||
|
||||
type SyncSubscriptions interface {
|
||||
app.ComponentRunnable
|
||||
GetSubscription(id string) (SyncSubscription, error)
|
||||
}
|
||||
|
||||
func New() SyncSubscriptions {
|
||||
return &syncSubscriptions{
|
||||
subs: make(map[string]SyncSubscription),
|
||||
}
|
||||
}
|
||||
|
||||
type syncSubscriptions struct {
|
||||
sync.Mutex
|
||||
service subscription.Service
|
||||
subs map[string]SyncSubscription
|
||||
}
|
||||
|
||||
func (s *syncSubscriptions) Init(a *app.App) (err error) {
|
||||
s.service = app.MustComponent[subscription.Service](a)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *syncSubscriptions) Name() (name string) {
|
||||
return CName
|
||||
}
|
||||
|
||||
func (s *syncSubscriptions) Run(ctx context.Context) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *syncSubscriptions) GetSubscription(id string) (SyncSubscription, error) {
|
||||
s.Lock()
|
||||
curSub := s.subs[id]
|
||||
s.Unlock()
|
||||
if curSub != nil {
|
||||
return curSub, nil
|
||||
}
|
||||
sub := newSyncingObjects(id, s.service)
|
||||
err := sub.Run()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.Lock()
|
||||
s.subs[id] = sub
|
||||
s.Unlock()
|
||||
return sub, nil
|
||||
}
|
||||
|
||||
func (s *syncSubscriptions) Close(ctx context.Context) (err error) {
|
||||
s.Lock()
|
||||
subs := lo.Values(s.subs)
|
||||
s.Unlock()
|
||||
for _, sub := range subs {
|
||||
sub.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
116
core/syncstatus/syncsubscriptions/syncsubscriptions_test.go
Normal file
116
core/syncstatus/syncsubscriptions/syncsubscriptions_test.go
Normal file
|
@ -0,0 +1,116 @@
|
|||
package syncsubscriptions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/domain"
|
||||
"github.com/anyproto/anytype-heart/core/subscription"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/filesyncstatus"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/bundle"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/localstore/objectstore"
|
||||
"github.com/anyproto/anytype-heart/pkg/lib/pb/model"
|
||||
"github.com/anyproto/anytype-heart/util/pbtypes"
|
||||
)
|
||||
|
||||
func mapFileStatus(status filesyncstatus.Status) domain.ObjectSyncStatus {
|
||||
switch status {
|
||||
case filesyncstatus.Syncing:
|
||||
return domain.ObjectSyncStatusSyncing
|
||||
case filesyncstatus.Queued:
|
||||
return domain.ObjectSyncStatusSyncing
|
||||
case filesyncstatus.Limited:
|
||||
return domain.ObjectSyncStatusError
|
||||
default:
|
||||
return domain.ObjectSyncStatusSynced
|
||||
}
|
||||
}
|
||||
|
||||
func genFileObject(fileStatus filesyncstatus.Status, spaceId string) objectstore.TestObject {
|
||||
id := fmt.Sprintf("%d", rand.Int())
|
||||
return objectstore.TestObject{
|
||||
bundle.RelationKeyId: pbtypes.String(id),
|
||||
bundle.RelationKeySyncStatus: pbtypes.Int64(int64(mapFileStatus(fileStatus))),
|
||||
bundle.RelationKeyFileBackupStatus: pbtypes.Int64(int64(fileStatus)),
|
||||
bundle.RelationKeyLayout: pbtypes.Int64(int64(model.ObjectType_file)),
|
||||
bundle.RelationKeyName: pbtypes.String("name" + id),
|
||||
bundle.RelationKeySpaceId: pbtypes.String(spaceId),
|
||||
}
|
||||
}
|
||||
|
||||
func genObject(syncStatus domain.ObjectSyncStatus, spaceId string) objectstore.TestObject {
|
||||
id := fmt.Sprintf("%d", rand.Int())
|
||||
return objectstore.TestObject{
|
||||
bundle.RelationKeyId: pbtypes.String(id),
|
||||
bundle.RelationKeySyncStatus: pbtypes.Int64(int64(syncStatus)),
|
||||
bundle.RelationKeyLayout: pbtypes.Int64(int64(model.ObjectType_basic)),
|
||||
bundle.RelationKeyName: pbtypes.String("name" + id),
|
||||
bundle.RelationKeySpaceId: pbtypes.String(spaceId),
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncSubscriptions(t *testing.T) {
|
||||
testSubs := subscription.NewInternalTestService(t)
|
||||
var objects []objectstore.TestObject
|
||||
fileObjs := map[string]struct{}{}
|
||||
objs := map[string]struct{}{}
|
||||
for i := 0; i < 10; i++ {
|
||||
obj := genObject(domain.ObjectSyncStatusSyncing, "spaceId")
|
||||
objects = append(objects, obj)
|
||||
objs[obj[bundle.RelationKeyId].GetStringValue()] = struct{}{}
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
objects = append(objects, genObject(domain.ObjectSyncStatusSynced, "spaceId"))
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
obj := genFileObject(filesyncstatus.Syncing, "spaceId")
|
||||
objects = append(objects, obj)
|
||||
fileObjs[obj[bundle.RelationKeyId].GetStringValue()] = struct{}{}
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
obj := genFileObject(filesyncstatus.Queued, "spaceId")
|
||||
objects = append(objects, obj)
|
||||
fileObjs[obj[bundle.RelationKeyId].GetStringValue()] = struct{}{}
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
objects = append(objects, genFileObject(filesyncstatus.Synced, "spaceId"))
|
||||
}
|
||||
testSubs.AddObjects(t, objects)
|
||||
subs := New()
|
||||
subs.(*syncSubscriptions).service = testSubs
|
||||
err := subs.Run(context.Background())
|
||||
require.NoError(t, err)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
spaceSub, err := subs.GetSubscription("spaceId")
|
||||
require.NoError(t, err)
|
||||
syncCnt := spaceSub.SyncingObjectsCount([]string{"1", "2"})
|
||||
fileCnt := spaceSub.FileSyncingObjectsCount()
|
||||
require.Equal(t, 12, syncCnt)
|
||||
require.Equal(t, 20, fileCnt)
|
||||
require.Len(t, fileObjs, 20)
|
||||
require.Len(t, objs, 10)
|
||||
spaceSub.GetFileSubscription().Iterate(func(id string, data struct{}) bool {
|
||||
delete(fileObjs, id)
|
||||
return true
|
||||
})
|
||||
spaceSub.GetObjectSubscription().Iterate(func(id string, data struct{}) bool {
|
||||
delete(objs, id)
|
||||
return true
|
||||
})
|
||||
require.Empty(t, fileObjs)
|
||||
require.Empty(t, objs)
|
||||
for i := 0; i < 10; i++ {
|
||||
objects[i][bundle.RelationKeySyncStatus] = pbtypes.Int64(int64(domain.ObjectSyncStatusSynced))
|
||||
testSubs.AddObjects(t, []objectstore.TestObject{objects[i]})
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
syncCnt = spaceSub.SyncingObjectsCount([]string{"1", "2"})
|
||||
require.Equal(t, 2, syncCnt)
|
||||
err = subs.Close(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
|
@ -21,11 +21,11 @@ type updateReceiver struct {
|
|||
eventSender event.Sender
|
||||
|
||||
nodeConfService nodeconf.Service
|
||||
sync.Mutex
|
||||
nodeConnected bool
|
||||
objectStore objectstore.ObjectStore
|
||||
nodeStatus nodestatus.NodeStatus
|
||||
spaceId string
|
||||
lock sync.Mutex
|
||||
nodeConnected bool
|
||||
objectStore objectstore.ObjectStore
|
||||
nodeStatus nodestatus.NodeStatus
|
||||
spaceId string
|
||||
}
|
||||
|
||||
func newUpdateReceiver(
|
||||
|
@ -90,14 +90,21 @@ func (r *updateReceiver) getObjectSyncStatus(objectId string, status objectsyncs
|
|||
}
|
||||
|
||||
func (r *updateReceiver) isNodeConnected() bool {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
return r.nodeConnected
|
||||
}
|
||||
|
||||
func (r *updateReceiver) setSpaceId(spaceId string) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
r.spaceId = spaceId
|
||||
}
|
||||
|
||||
func (r *updateReceiver) UpdateNodeStatus() {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
r.nodeConnected = r.nodeStatus.GetNodeStatus(r.spaceId) == nodestatus.Online
|
||||
}
|
||||
|
||||
|
|
12
go.mod
12
go.mod
|
@ -7,7 +7,7 @@ require (
|
|||
github.com/PuerkitoBio/goquery v1.9.2
|
||||
github.com/VividCortex/ewma v1.2.0
|
||||
github.com/adrium/goheif v0.0.0-20230113233934-ca402e77a786
|
||||
github.com/anyproto/any-sync v0.4.21
|
||||
github.com/anyproto/any-sync v0.4.22
|
||||
github.com/anyproto/go-naturaldate/v2 v2.0.2-0.20230524105841-9829cfd13438
|
||||
github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de
|
||||
github.com/avast/retry-go/v4 v4.6.0
|
||||
|
@ -56,8 +56,8 @@ require (
|
|||
github.com/joho/godotenv v1.5.1
|
||||
github.com/jsummers/gobmp v0.0.0-20151104160322-e2ba15ffa76e
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/klauspost/compress v1.17.7
|
||||
github.com/libp2p/go-libp2p v0.33.2
|
||||
github.com/klauspost/compress v1.17.8
|
||||
github.com/libp2p/go-libp2p v0.35.1
|
||||
github.com/libp2p/zeroconf/v2 v2.2.0
|
||||
github.com/logrusorgru/aurora v2.0.3+incompatible
|
||||
github.com/magiconair/properties v1.8.7
|
||||
|
@ -144,7 +144,7 @@ require (
|
|||
github.com/chigopher/pathlib v0.19.1 // indirect
|
||||
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
|
||||
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect
|
||||
github.com/dgraph-io/ristretto v0.1.1 // indirect
|
||||
github.com/dsoprea/go-iptc v0.0.0-20200609062250-162ae6b44feb // indirect
|
||||
|
@ -210,7 +210,7 @@ require (
|
|||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/mschoch/smat v0.2.0 // indirect
|
||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||
github.com/multiformats/go-multiaddr v0.12.3 // indirect
|
||||
github.com/multiformats/go-multiaddr v0.12.4 // indirect
|
||||
github.com/multiformats/go-multicodec v0.9.0 // indirect
|
||||
github.com/multiformats/go-multistream v0.5.0 // indirect
|
||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||
|
@ -221,7 +221,7 @@ require (
|
|||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/polydawn/refmt v0.89.0 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/prometheus/client_model v0.6.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.48.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/pseudomuto/protokit v0.2.1 // indirect
|
||||
|
|
56
go.sum
56
go.sum
|
@ -83,8 +83,8 @@ github.com/andybalholm/cascadia v1.2.0/go.mod h1:YCyR8vOZT9aZ1CHEd8ap0gMVm2aFgxB
|
|||
github.com/andybalholm/cascadia v1.3.1/go.mod h1:R4bJ1UQfqADjvDa4P6HZHLh/3OxWWEqc0Sk8XGwHqvA=
|
||||
github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss=
|
||||
github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU=
|
||||
github.com/anyproto/any-sync v0.4.21 h1:L0/IUUrliZWm74RQgvrf9YKzfuvn9Mya+iuAbDKvU+Q=
|
||||
github.com/anyproto/any-sync v0.4.21/go.mod h1:sO/zUrmnCZKnH/3KaRH3JQSZMuINS3X7ZJa+d4YgfkA=
|
||||
github.com/anyproto/any-sync v0.4.22 h1:f9iAbCv/clTzYtzOzkX1IOXahVM/Art1WkUtIgnwl8U=
|
||||
github.com/anyproto/any-sync v0.4.22/go.mod h1:qHIG2zMvGIthEb2FmcjQN5YZZwV8kPHv7/T0ib7YSDg=
|
||||
github.com/anyproto/badger/v4 v4.2.1-0.20240110160636-80743fa3d580 h1:Ba80IlCCxkZ9H1GF+7vFu/TSpPvbpDCxXJ5ogc4euYc=
|
||||
github.com/anyproto/badger/v4 v4.2.1-0.20240110160636-80743fa3d580/go.mod h1:T/uWAYxrXdaXw64ihI++9RMbKTCpKd/yE9+saARew7k=
|
||||
github.com/anyproto/go-chash v0.1.0 h1:I9meTPjXFRfXZHRJzjOHC/XF7Q5vzysKkiT/grsogXY=
|
||||
|
@ -249,8 +249,8 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR
|
|||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I=
|
||||
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE=
|
||||
github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ=
|
||||
|
@ -752,8 +752,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
|||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
|
||||
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
|
||||
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
|
||||
|
@ -796,8 +796,8 @@ github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xS
|
|||
github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw=
|
||||
github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o=
|
||||
github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo=
|
||||
github.com/libp2p/go-libp2p v0.33.2 h1:vCdwnFxoGOXMKmaGHlDSnL4bM3fQeW8pgIa9DECnb40=
|
||||
github.com/libp2p/go-libp2p v0.33.2/go.mod h1:zTeppLuCvUIkT118pFVzA8xzP/p2dJYOMApCkFh0Yww=
|
||||
github.com/libp2p/go-libp2p v0.35.1 h1:Hm7Ub2BF+GCb14ojcsEK6WAy5it5smPDK02iXSZLl50=
|
||||
github.com/libp2p/go-libp2p v0.35.1/go.mod h1:Dnkgba5hsfSv5dvvXC8nfqk44hH0gIKKno+HOMU0fdc=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
|
||||
github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8=
|
||||
|
@ -1154,6 +1154,38 @@ github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp
|
|||
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pion/datachannel v1.5.6 h1:1IxKJntfSlYkpUj8LlYRSWpYiTTC02nUrOE8T3DqGeg=
|
||||
github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNIVb/NfGW4=
|
||||
github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks=
|
||||
github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
|
||||
github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs=
|
||||
github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
|
||||
github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M=
|
||||
github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4=
|
||||
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
|
||||
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
|
||||
github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8=
|
||||
github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk=
|
||||
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
|
||||
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
||||
github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE=
|
||||
github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4=
|
||||
github.com/pion/rtp v1.8.6 h1:MTmn/b0aWWsAzux2AmP8WGllusBVw4NPYPVFFd7jUPw=
|
||||
github.com/pion/rtp v1.8.6/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU=
|
||||
github.com/pion/sctp v1.8.16 h1:PKrMs+o9EMLRvFfXq59WFsC+V8mN1wnKzqrv+3D/gYY=
|
||||
github.com/pion/sctp v1.8.16/go.mod h1:P6PbDVA++OJMrVNg2AL3XtYHV4uD6dvfyOovCgMs0PE=
|
||||
github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY=
|
||||
github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M=
|
||||
github.com/pion/srtp/v2 v2.0.18 h1:vKpAXfawO9RtTRKZJbG4y0v1b11NZxQnxRl85kGuUlo=
|
||||
github.com/pion/srtp/v2 v2.0.18/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA=
|
||||
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
|
||||
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
|
||||
github.com/pion/transport/v2 v2.2.5 h1:iyi25i/21gQck4hfRhomF6SktmUQjRsRW4WJdhfc3Kc=
|
||||
github.com/pion/transport/v2 v2.2.5/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
|
||||
github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc=
|
||||
github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY=
|
||||
github.com/pion/webrtc/v3 v3.2.40 h1:Wtfi6AZMQg+624cvCXUuSmrKWepSB7zfgYDOYqsSOVU=
|
||||
github.com/pion/webrtc/v3 v3.2.40/go.mod h1:M1RAe3TNTD1tzyvqHrbVODfwdPGSXOUo/OgpoGGJqFY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
|
@ -1184,8 +1216,8 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:
|
|||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
|
||||
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
|
@ -1213,8 +1245,8 @@ github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
|
|||
github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
|
||||
github.com/quic-go/quic-go v0.44.0 h1:So5wOr7jyO4vzL2sd8/pD9Kesciv91zSk8BoFngItQ0=
|
||||
github.com/quic-go/quic-go v0.44.0/go.mod h1:z4cx/9Ny9UtGITIPzmPTXh1ULfOyWh4qGQlpnPcWmek=
|
||||
github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFDD3NxaZLY=
|
||||
github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc=
|
||||
github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg=
|
||||
github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/samber/lo"
|
||||
"github.com/valyala/fastjson"
|
||||
"go.uber.org/atomic"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
|
@ -21,6 +22,8 @@ const (
|
|||
unexpectedErrorCode = -1
|
||||
parsingErrorCode = -2
|
||||
accountSelect = "AccountSelect"
|
||||
accountStop = "AccountStop"
|
||||
accountStopJson = "account_stop.json"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -61,18 +64,98 @@ func SharedTraceInterceptor(ctx context.Context, req any, methodName string, act
|
|||
start := time.Now().UnixMilli()
|
||||
resp, err := actualCall(ctx, req)
|
||||
delta := time.Now().UnixMilli() - start
|
||||
var event *MethodEvent
|
||||
if methodName == accountSelect {
|
||||
if hotSync {
|
||||
SendMethodEvent(methodName+"Hot", err, resp, delta)
|
||||
event = toEvent(methodName+"Hot", err, resp, delta)
|
||||
} else {
|
||||
SendMethodEvent(methodName+"Cold", err, resp, delta)
|
||||
event = toEvent(methodName+"Cold", err, resp, delta)
|
||||
}
|
||||
_ = trySendAccountStop()
|
||||
} else {
|
||||
SendMethodEvent(methodName, err, resp, delta)
|
||||
event = toEvent(methodName, err, resp, delta)
|
||||
}
|
||||
|
||||
if event != nil {
|
||||
if methodName == accountStop {
|
||||
_ = saveAccountStop(event)
|
||||
} else {
|
||||
Service.Send(event)
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func saveAccountStop(event *MethodEvent) error {
|
||||
arena := &fastjson.Arena{}
|
||||
|
||||
json := arena.NewObject()
|
||||
json.Set("method_name", arena.NewString(event.methodName))
|
||||
json.Set("middle_time", arena.NewNumberInt(int(event.middleTime)))
|
||||
json.Set("error_code", arena.NewNumberInt(int(event.errorCode)))
|
||||
json.Set("description", arena.NewString(event.description))
|
||||
|
||||
data := json.MarshalTo(nil)
|
||||
jsonPath := filepath.Join(Service.getWorkingDir(), accountStopJson)
|
||||
_ = os.Remove(jsonPath)
|
||||
return os.WriteFile(jsonPath, data, 0600)
|
||||
}
|
||||
|
||||
func trySendAccountStop() error {
|
||||
jsonPath := filepath.Join(Service.getWorkingDir(), accountStopJson)
|
||||
data, err := os.ReadFile(jsonPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_ = os.Remove(jsonPath)
|
||||
|
||||
parsedJson, err := fastjson.ParseBytes(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Service.Send(&MethodEvent{
|
||||
methodName: string(parsedJson.GetStringBytes("method_name")),
|
||||
middleTime: parsedJson.GetInt64("middle_time"),
|
||||
errorCode: parsedJson.GetInt64("error_code"),
|
||||
description: string(parsedJson.GetStringBytes("description")),
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func toEvent(method string, err error, resp any, delta int64) *MethodEvent {
|
||||
if !lo.Contains(excludedMethods, method) {
|
||||
if err != nil {
|
||||
return &MethodEvent{
|
||||
methodName: method,
|
||||
errorCode: unexpectedErrorCode,
|
||||
description: err.Error(),
|
||||
}
|
||||
}
|
||||
errorCode, description, err := reflection.GetError(resp)
|
||||
if err != nil {
|
||||
return &MethodEvent{
|
||||
methodName: method,
|
||||
errorCode: parsingErrorCode,
|
||||
}
|
||||
}
|
||||
if errorCode > 0 {
|
||||
return &MethodEvent{
|
||||
methodName: method,
|
||||
errorCode: errorCode,
|
||||
description: description,
|
||||
}
|
||||
}
|
||||
return &MethodEvent{
|
||||
methodName: method,
|
||||
middleTime: delta,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func LongMethodsInterceptor(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
|
||||
return SharedLongMethodsInterceptor(ctx, req, extractMethodName(info.FullMethod), handler)
|
||||
}
|
||||
|
@ -149,57 +232,3 @@ func dirExists(path string) (bool, error) {
|
|||
func stackTraceHasMethod(method string, stackTrace []byte) bool {
|
||||
return bytes.Contains(stackTrace, []byte("core.(*Middleware)."+method+"("))
|
||||
}
|
||||
|
||||
func SendMethodEvent(method string, err error, resp any, delta int64) {
|
||||
if !lo.Contains(excludedMethods, method) {
|
||||
if err != nil {
|
||||
sendUnexpectedError(method, err.Error())
|
||||
}
|
||||
errorCode, description, err := reflection.GetError(resp)
|
||||
if err != nil {
|
||||
sendErrorParsingError(method)
|
||||
}
|
||||
if errorCode > 0 {
|
||||
sendExpectedError(method, errorCode, description)
|
||||
}
|
||||
sendSuccess(method, delta)
|
||||
}
|
||||
}
|
||||
|
||||
func sendSuccess(method string, delta int64) {
|
||||
Service.Send(
|
||||
&MethodEvent{
|
||||
methodName: method,
|
||||
middleTime: delta,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func sendExpectedError(method string, code int64, description string) {
|
||||
Service.Send(
|
||||
&MethodEvent{
|
||||
methodName: method,
|
||||
errorCode: code,
|
||||
description: description,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func sendErrorParsingError(method string) {
|
||||
Service.Send(
|
||||
&MethodEvent{
|
||||
methodName: method,
|
||||
errorCode: parsingErrorCode,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func sendUnexpectedError(method string, description string) {
|
||||
Service.Send(
|
||||
&MethodEvent{
|
||||
methodName: method,
|
||||
errorCode: unexpectedErrorCode,
|
||||
description: description,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package metrics
|
|||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -37,7 +38,9 @@ type SamplableEvent interface {
|
|||
|
||||
type MetricsService interface {
|
||||
InitWithKeys(inHouseKey string)
|
||||
SetAppVersion(v string)
|
||||
SetWorkingDir(workingDir string, accountId string)
|
||||
SetAppVersion(path string)
|
||||
getWorkingDir() string
|
||||
SetStartVersion(v string)
|
||||
SetDeviceId(t string)
|
||||
SetPlatform(p string)
|
||||
|
@ -57,6 +60,7 @@ type service struct {
|
|||
userId string
|
||||
deviceId string
|
||||
platform string
|
||||
workingDir string
|
||||
clients [1]*client
|
||||
alreadyRunning bool
|
||||
}
|
||||
|
@ -83,6 +87,18 @@ func NewService() MetricsService {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *service) SetWorkingDir(workingDir string, accountId string) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
s.workingDir = filepath.Join(workingDir, accountId)
|
||||
}
|
||||
|
||||
func (s *service) getWorkingDir() string {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
return s.workingDir
|
||||
}
|
||||
|
||||
func (s *service) InitWithKeys(inHouseKey string) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
|
|
@ -463,7 +463,7 @@ func (s *dsObjectStore) QueryObjectIDs(q database.Query) (ids []string, total in
|
|||
for _, rec := range recs {
|
||||
ids = append(ids, pbtypes.GetString(rec.Details, bundle.RelationKeyId.String()))
|
||||
}
|
||||
return ids, 0, nil
|
||||
return ids, len(recs), nil
|
||||
}
|
||||
|
||||
func (s *dsObjectStore) QueryByID(ids []string) (records []database.Record, err error) {
|
||||
|
|
|
@ -3,6 +3,7 @@ package spaceloader
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/anyproto/any-sync/app/logger"
|
||||
|
@ -36,8 +37,11 @@ type loadingSpace struct {
|
|||
disableRemoteLoad bool
|
||||
latestAclHeadId string
|
||||
space clientspace.Space
|
||||
loadErr error
|
||||
loadCh chan struct{}
|
||||
|
||||
loadCh chan struct{}
|
||||
|
||||
lock sync.Mutex
|
||||
loadErr error
|
||||
}
|
||||
|
||||
func (s *spaceLoader) newLoadingSpace(ctx context.Context, stopIfMandatoryFail, disableRemoteLoad bool, aclHeadId string) *loadingSpace {
|
||||
|
@ -53,9 +57,21 @@ func (s *spaceLoader) newLoadingSpace(ctx context.Context, stopIfMandatoryFail,
|
|||
return ls
|
||||
}
|
||||
|
||||
func (ls *loadingSpace) getLoadErr() error {
|
||||
ls.lock.Lock()
|
||||
defer ls.lock.Unlock()
|
||||
return ls.loadErr
|
||||
}
|
||||
|
||||
func (ls *loadingSpace) setLoadErr(err error) {
|
||||
ls.lock.Lock()
|
||||
defer ls.lock.Unlock()
|
||||
ls.loadErr = err
|
||||
}
|
||||
|
||||
func (ls *loadingSpace) loadRetry(ctx context.Context) {
|
||||
defer func() {
|
||||
if err := ls.spaceServiceProvider.onLoad(ls.space, ls.loadErr); err != nil {
|
||||
if err := ls.spaceServiceProvider.onLoad(ls.space, ls.getLoadErr()); err != nil {
|
||||
log.WarnCtx(ctx, "space onLoad error", zap.Error(err))
|
||||
}
|
||||
close(ls.loadCh)
|
||||
|
@ -67,7 +83,7 @@ func (ls *loadingSpace) loadRetry(ctx context.Context) {
|
|||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
ls.loadErr = ctx.Err()
|
||||
ls.setLoadErr(ctx.Err())
|
||||
return
|
||||
case <-time.After(timeout):
|
||||
if ls.load(ctx) {
|
||||
|
@ -90,7 +106,7 @@ func (ls *loadingSpace) load(ctx context.Context) (ok bool) {
|
|||
err = sp.WaitMandatoryObjects(ctx)
|
||||
if errors.Is(err, treechangeproto.ErrGetTree) || errors.Is(err, objecttree.ErrHasInvalidChanges) || errors.Is(err, list.ErrNoReadKey) {
|
||||
if ls.stopIfMandatoryFail {
|
||||
ls.loadErr = err
|
||||
ls.setLoadErr(err)
|
||||
return true
|
||||
}
|
||||
return ls.disableRemoteLoad
|
||||
|
@ -103,7 +119,7 @@ func (ls *loadingSpace) load(ctx context.Context) (ok bool) {
|
|||
log.WarnCtx(ctx, "space close error", zap.Error(closeErr))
|
||||
}
|
||||
}
|
||||
ls.loadErr = err
|
||||
ls.setLoadErr(err)
|
||||
} else {
|
||||
if ls.latestAclHeadId != "" && !ls.disableRemoteLoad {
|
||||
acl := sp.CommonSpace().Acl()
|
||||
|
|
|
@ -129,7 +129,7 @@ func (s *spaceLoader) WaitLoad(ctx context.Context) (sp clientspace.Space, err e
|
|||
case spaceinfo.LocalStatusLoading:
|
||||
// loading in progress, wait channel and retry
|
||||
waitCh := s.loading.loadCh
|
||||
loadErr := s.loading.loadErr
|
||||
loadErr := s.loading.getLoadErr()
|
||||
s.mx.Unlock()
|
||||
if loadErr != nil {
|
||||
return nil, loadErr
|
||||
|
@ -142,7 +142,7 @@ func (s *spaceLoader) WaitLoad(ctx context.Context) (sp clientspace.Space, err e
|
|||
return s.WaitLoad(ctx)
|
||||
case spaceinfo.LocalStatusMissing:
|
||||
// local missing state means the loader ended with an error
|
||||
err = s.loading.loadErr
|
||||
err = s.loading.getLoadErr()
|
||||
case spaceinfo.LocalStatusOk:
|
||||
sp = s.space
|
||||
default:
|
||||
|
|
|
@ -15,7 +15,6 @@ import (
|
|||
"github.com/anyproto/any-sync/net/peer"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/domain"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/nodestatus"
|
||||
"github.com/anyproto/anytype-heart/space/spacecore/peerstore"
|
||||
)
|
||||
|
@ -29,17 +28,16 @@ var (
|
|||
|
||||
type NodeStatus interface {
|
||||
app.Component
|
||||
SetNodesStatus(spaceId string, senderId string, status nodestatus.ConnectionStatus)
|
||||
SetNodesStatus(spaceId string, status nodestatus.ConnectionStatus)
|
||||
GetNodeStatus(string) nodestatus.ConnectionStatus
|
||||
}
|
||||
|
||||
type Updater interface {
|
||||
app.ComponentRunnable
|
||||
SendUpdate(spaceSync *domain.SpaceSync)
|
||||
Refresh(spaceId string)
|
||||
}
|
||||
|
||||
type PeerToPeerStatus interface {
|
||||
CheckPeerStatus()
|
||||
RegisterSpace(spaceId string)
|
||||
UnregisterSpace(spaceId string)
|
||||
}
|
||||
|
@ -73,7 +71,6 @@ func (n *clientPeerManager) Init(a *app.App) (err error) {
|
|||
n.nodeStatus = app.MustComponent[NodeStatus](a)
|
||||
n.spaceSyncService = app.MustComponent[Updater](a)
|
||||
n.peerToPeerStatus = app.MustComponent[PeerToPeerStatus](a)
|
||||
n.peerToPeerStatus.RegisterSpace(n.spaceId)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -82,6 +79,7 @@ func (n *clientPeerManager) Name() (name string) {
|
|||
}
|
||||
|
||||
func (n *clientPeerManager) Run(ctx context.Context) (err error) {
|
||||
go n.peerToPeerStatus.RegisterSpace(n.spaceId)
|
||||
go n.manageResponsiblePeers()
|
||||
return
|
||||
}
|
||||
|
@ -170,20 +168,16 @@ func (n *clientPeerManager) getStreamResponsiblePeers(ctx context.Context) (peer
|
|||
peerIds = []string{p.Id()}
|
||||
}
|
||||
peerIds = append(peerIds, n.peerStore.LocalPeerIds(n.spaceId)...)
|
||||
var needUpdate bool
|
||||
for _, peerId := range peerIds {
|
||||
p, err := n.p.pool.Get(ctx, peerId)
|
||||
if err != nil {
|
||||
n.peerStore.RemoveLocalPeer(peerId)
|
||||
log.Warn("failed to get peer from stream pool", zap.String("peerId", peerId), zap.Error(err))
|
||||
needUpdate = true
|
||||
continue
|
||||
}
|
||||
peers = append(peers, p)
|
||||
}
|
||||
if needUpdate {
|
||||
n.peerToPeerStatus.CheckPeerStatus()
|
||||
}
|
||||
|
||||
// set node error if no local peers
|
||||
if len(peers) == 0 {
|
||||
err = fmt.Errorf("failed to get peers for stream")
|
||||
|
@ -208,30 +202,22 @@ func (n *clientPeerManager) fetchResponsiblePeers() {
|
|||
p, err := n.p.pool.GetOneOf(n.ctx, n.responsibleNodeIds)
|
||||
if err == nil {
|
||||
peers = []peer.Peer{p}
|
||||
n.nodeStatus.SetNodesStatus(n.spaceId, p.Id(), nodestatus.Online)
|
||||
n.nodeStatus.SetNodesStatus(n.spaceId, nodestatus.Online)
|
||||
} else {
|
||||
log.Info("can't get node peers", zap.Error(err))
|
||||
for _, p := range n.responsiblePeers {
|
||||
n.nodeStatus.SetNodesStatus(n.spaceId, p.Id(), nodestatus.ConnectionError)
|
||||
}
|
||||
n.spaceSyncService.SendUpdate(domain.MakeSyncStatus(n.spaceId, domain.Offline, domain.Null, domain.Objects))
|
||||
n.nodeStatus.SetNodesStatus(n.spaceId, nodestatus.ConnectionError)
|
||||
}
|
||||
|
||||
n.spaceSyncService.Refresh(n.spaceId)
|
||||
peerIds := n.peerStore.LocalPeerIds(n.spaceId)
|
||||
var needUpdate bool
|
||||
for _, peerId := range peerIds {
|
||||
p, err := n.p.pool.Get(n.ctx, peerId)
|
||||
if err != nil {
|
||||
n.peerStore.RemoveLocalPeer(peerId)
|
||||
log.Warn("failed to get local from net pool", zap.String("peerId", peerId), zap.Error(err))
|
||||
needUpdate = true
|
||||
continue
|
||||
}
|
||||
peers = append(peers, p)
|
||||
}
|
||||
if needUpdate {
|
||||
n.peerToPeerStatus.CheckPeerStatus()
|
||||
}
|
||||
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
@ -275,7 +261,3 @@ func (n *clientPeerManager) Close(ctx context.Context) (err error) {
|
|||
n.peerToPeerStatus.UnregisterSpace(n.spaceId)
|
||||
return
|
||||
}
|
||||
|
||||
func (n *clientPeerManager) IsPeerOffline(senderId string) bool {
|
||||
return n.nodeStatus.GetNodeStatus(n.spaceId) != nodestatus.Online
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
"go.uber.org/mock/gomock"
|
||||
"storj.io/drpc"
|
||||
|
||||
"github.com/anyproto/anytype-heart/core/domain"
|
||||
"github.com/anyproto/anytype-heart/core/syncstatus/nodestatus"
|
||||
"github.com/anyproto/anytype-heart/space/spacecore/peermanager/mock_peermanager"
|
||||
"github.com/anyproto/anytype-heart/space/spacecore/peerstore"
|
||||
|
@ -92,29 +91,18 @@ func TestClientPeerManager_GetResponsiblePeers_Deadline(t *testing.T) {
|
|||
func Test_fetchResponsiblePeers(t *testing.T) {
|
||||
spaceId := "spaceId"
|
||||
t.Run("node offline", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixtureManager(t, spaceId)
|
||||
|
||||
// when
|
||||
f.pool.EXPECT().GetOneOf(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("failed"))
|
||||
status := domain.MakeSyncStatus(f.cm.spaceId, domain.Offline, domain.Null, domain.Objects)
|
||||
f.updater.EXPECT().SendUpdate(status)
|
||||
f.updater.EXPECT().Refresh(spaceId)
|
||||
f.cm.fetchResponsiblePeers()
|
||||
|
||||
// then
|
||||
f.updater.AssertCalled(t, "SendUpdate", status)
|
||||
require.Equal(t, f.cm.nodeStatus.GetNodeStatus("spaceId"), nodestatus.ConnectionError)
|
||||
})
|
||||
t.Run("no local peers", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixtureManager(t, spaceId)
|
||||
|
||||
// when
|
||||
f.conf.EXPECT().NodeIds(f.cm.spaceId).Return([]string{"id"})
|
||||
f.pool.EXPECT().GetOneOf(gomock.Any(), gomock.Any()).Return(newTestPeer("id"), nil)
|
||||
f.updater.EXPECT().Refresh(spaceId)
|
||||
f.cm.fetchResponsiblePeers()
|
||||
|
||||
// then
|
||||
f.peerToPeerStatus.AssertNotCalled(t, "CheckPeerStatus")
|
||||
require.Equal(t, f.cm.nodeStatus.GetNodeStatus("spaceId"), nodestatus.Online)
|
||||
})
|
||||
t.Run("local peers connected", func(t *testing.T) {
|
||||
// given
|
||||
|
@ -122,28 +110,22 @@ func Test_fetchResponsiblePeers(t *testing.T) {
|
|||
f.store.UpdateLocalPeer("peerId", []string{spaceId})
|
||||
|
||||
// when
|
||||
f.conf.EXPECT().NodeIds(f.cm.spaceId).Return([]string{"id"})
|
||||
f.pool.EXPECT().GetOneOf(gomock.Any(), gomock.Any()).Return(newTestPeer("id"), nil)
|
||||
f.pool.EXPECT().Get(f.cm.ctx, "peerId").Return(newTestPeer("id1"), nil)
|
||||
f.updater.EXPECT().Refresh(spaceId)
|
||||
f.cm.fetchResponsiblePeers()
|
||||
|
||||
// then
|
||||
f.peerToPeerStatus.AssertNotCalled(t, "CheckPeerStatus")
|
||||
})
|
||||
t.Run("local peer not connected", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixtureManager(t, spaceId)
|
||||
f.store.UpdateLocalPeer("peerId", []string{spaceId})
|
||||
f.peerToPeerStatus.EXPECT().CheckPeerStatus().Return()
|
||||
|
||||
// when
|
||||
f.conf.EXPECT().NodeIds(f.cm.spaceId).Return([]string{"id"})
|
||||
f.pool.EXPECT().GetOneOf(gomock.Any(), gomock.Any()).Return(newTestPeer("id"), nil)
|
||||
f.pool.EXPECT().Get(f.cm.ctx, "peerId").Return(nil, fmt.Errorf("error"))
|
||||
f.updater.EXPECT().Refresh(spaceId)
|
||||
f.cm.fetchResponsiblePeers()
|
||||
|
||||
// then
|
||||
f.peerToPeerStatus.AssertCalled(t, "CheckPeerStatus")
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -161,7 +143,6 @@ func Test_getStreamResponsiblePeers(t *testing.T) {
|
|||
// then
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, peers, 1)
|
||||
f.peerToPeerStatus.AssertNotCalled(t, "CheckPeerStatus")
|
||||
})
|
||||
t.Run("local peers connected", func(t *testing.T) {
|
||||
// given
|
||||
|
@ -177,13 +158,11 @@ func Test_getStreamResponsiblePeers(t *testing.T) {
|
|||
// then
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, peers, 2)
|
||||
f.peerToPeerStatus.AssertNotCalled(t, "CheckPeerStatus")
|
||||
})
|
||||
t.Run("local peer not connected", func(t *testing.T) {
|
||||
// given
|
||||
f := newFixtureManager(t, spaceId)
|
||||
f.store.UpdateLocalPeer("peerId", []string{spaceId})
|
||||
f.peerToPeerStatus.EXPECT().CheckPeerStatus().Return()
|
||||
|
||||
// when
|
||||
f.pool.EXPECT().GetOneOf(gomock.Any(), gomock.Any()).Return(newTestPeer("id"), nil)
|
||||
|
@ -194,7 +173,6 @@ func Test_getStreamResponsiblePeers(t *testing.T) {
|
|||
// then
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, peers, 1)
|
||||
f.peerToPeerStatus.AssertCalled(t, "CheckPeerStatus")
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -297,14 +275,15 @@ func newFixtureManager(t *testing.T, spaceId string) *fixture {
|
|||
updater := mock_peermanager.NewMockUpdater(t)
|
||||
peerToPeerStatus := mock_peermanager.NewMockPeerToPeerStatus(t)
|
||||
cm := &clientPeerManager{
|
||||
p: provider,
|
||||
spaceId: spaceId,
|
||||
peerStore: store,
|
||||
watchingPeers: map[string]struct{}{},
|
||||
ctx: context.Background(),
|
||||
nodeStatus: ns,
|
||||
spaceSyncService: updater,
|
||||
peerToPeerStatus: peerToPeerStatus,
|
||||
responsibleNodeIds: []string{"nodeId"},
|
||||
p: provider,
|
||||
spaceId: spaceId,
|
||||
peerStore: store,
|
||||
watchingPeers: map[string]struct{}{},
|
||||
ctx: context.Background(),
|
||||
nodeStatus: ns,
|
||||
spaceSyncService: updater,
|
||||
peerToPeerStatus: peerToPeerStatus,
|
||||
}
|
||||
return &fixture{
|
||||
cm: cm,
|
||||
|
|
|
@ -17,38 +17,6 @@ func (_m *MockPeerToPeerStatus) EXPECT() *MockPeerToPeerStatus_Expecter {
|
|||
return &MockPeerToPeerStatus_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// CheckPeerStatus provides a mock function with given fields:
|
||||
func (_m *MockPeerToPeerStatus) CheckPeerStatus() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// MockPeerToPeerStatus_CheckPeerStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckPeerStatus'
|
||||
type MockPeerToPeerStatus_CheckPeerStatus_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// CheckPeerStatus is a helper method to define mock.On call
|
||||
func (_e *MockPeerToPeerStatus_Expecter) CheckPeerStatus() *MockPeerToPeerStatus_CheckPeerStatus_Call {
|
||||
return &MockPeerToPeerStatus_CheckPeerStatus_Call{Call: _e.mock.On("CheckPeerStatus")}
|
||||
}
|
||||
|
||||
func (_c *MockPeerToPeerStatus_CheckPeerStatus_Call) Run(run func()) *MockPeerToPeerStatus_CheckPeerStatus_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPeerToPeerStatus_CheckPeerStatus_Call) Return() *MockPeerToPeerStatus_CheckPeerStatus_Call {
|
||||
_c.Call.Return()
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockPeerToPeerStatus_CheckPeerStatus_Call) RunAndReturn(run func()) *MockPeerToPeerStatus_CheckPeerStatus_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// RegisterSpace provides a mock function with given fields: spaceId
|
||||
func (_m *MockPeerToPeerStatus) RegisterSpace(spaceId string) {
|
||||
_m.Called(spaceId)
|
||||
|
|
|
@ -7,8 +7,6 @@ import (
|
|||
|
||||
app "github.com/anyproto/any-sync/app"
|
||||
|
||||
domain "github.com/anyproto/anytype-heart/core/domain"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
|
@ -162,6 +160,39 @@ func (_c *MockUpdater_Name_Call) RunAndReturn(run func() string) *MockUpdater_Na
|
|||
return _c
|
||||
}
|
||||
|
||||
// Refresh provides a mock function with given fields: spaceId
|
||||
func (_m *MockUpdater) Refresh(spaceId string) {
|
||||
_m.Called(spaceId)
|
||||
}
|
||||
|
||||
// MockUpdater_Refresh_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Refresh'
|
||||
type MockUpdater_Refresh_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Refresh is a helper method to define mock.On call
|
||||
// - spaceId string
|
||||
func (_e *MockUpdater_Expecter) Refresh(spaceId interface{}) *MockUpdater_Refresh_Call {
|
||||
return &MockUpdater_Refresh_Call{Call: _e.mock.On("Refresh", spaceId)}
|
||||
}
|
||||
|
||||
func (_c *MockUpdater_Refresh_Call) Run(run func(spaceId string)) *MockUpdater_Refresh_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockUpdater_Refresh_Call) Return() *MockUpdater_Refresh_Call {
|
||||
_c.Call.Return()
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockUpdater_Refresh_Call) RunAndReturn(run func(string)) *MockUpdater_Refresh_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Run provides a mock function with given fields: ctx
|
||||
func (_m *MockUpdater) Run(ctx context.Context) error {
|
||||
ret := _m.Called(ctx)
|
||||
|
@ -208,39 +239,6 @@ func (_c *MockUpdater_Run_Call) RunAndReturn(run func(context.Context) error) *M
|
|||
return _c
|
||||
}
|
||||
|
||||
// SendUpdate provides a mock function with given fields: spaceSync
|
||||
func (_m *MockUpdater) SendUpdate(spaceSync *domain.SpaceSync) {
|
||||
_m.Called(spaceSync)
|
||||
}
|
||||
|
||||
// MockUpdater_SendUpdate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendUpdate'
|
||||
type MockUpdater_SendUpdate_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// SendUpdate is a helper method to define mock.On call
|
||||
// - spaceSync *domain.SpaceSync
|
||||
func (_e *MockUpdater_Expecter) SendUpdate(spaceSync interface{}) *MockUpdater_SendUpdate_Call {
|
||||
return &MockUpdater_SendUpdate_Call{Call: _e.mock.On("SendUpdate", spaceSync)}
|
||||
}
|
||||
|
||||
func (_c *MockUpdater_SendUpdate_Call) Run(run func(spaceSync *domain.SpaceSync)) *MockUpdater_SendUpdate_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*domain.SpaceSync))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockUpdater_SendUpdate_Call) Return() *MockUpdater_SendUpdate_Call {
|
||||
_c.Call.Return()
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockUpdater_SendUpdate_Call) RunAndReturn(run func(*domain.SpaceSync)) *MockUpdater_SendUpdate_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockUpdater creates a new instance of MockUpdater. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockUpdater(t interface {
|
||||
|
|
|
@ -32,7 +32,8 @@ func New() PeerStore {
|
|||
}
|
||||
}
|
||||
|
||||
type Observer func(peerId string, spaceIds []string)
|
||||
// Observer is a function that will be called when a peer is updated
|
||||
type Observer func(peerId string, spaceIdsBefore []string, spaceIdsAfter []string, peerRemoved bool)
|
||||
|
||||
type peerStore struct {
|
||||
nodeConf nodeconf.Service
|
||||
|
@ -61,6 +62,10 @@ func (p *peerStore) AddObserver(observer Observer) {
|
|||
func (p *peerStore) UpdateLocalPeer(peerId string, spaceIds []string) {
|
||||
notify := true
|
||||
p.Lock()
|
||||
var (
|
||||
oldIds []string
|
||||
ok bool
|
||||
)
|
||||
defer func() {
|
||||
observers := p.observers
|
||||
p.Unlock()
|
||||
|
@ -69,10 +74,10 @@ func (p *peerStore) UpdateLocalPeer(peerId string, spaceIds []string) {
|
|||
}
|
||||
|
||||
for _, ob := range observers {
|
||||
ob(peerId, spaceIds)
|
||||
ob(peerId, oldIds, spaceIds, false)
|
||||
}
|
||||
}()
|
||||
if oldIds, ok := p.spacesByLocalPeerIds[peerId]; ok {
|
||||
if oldIds, ok = p.spacesByLocalPeerIds[peerId]; ok {
|
||||
slices.Sort(oldIds)
|
||||
slices.Sort(spaceIds)
|
||||
if slices.Equal(oldIds, spaceIds) {
|
||||
|
@ -126,11 +131,18 @@ func (p *peerStore) LocalPeerIds(spaceId string) []string {
|
|||
|
||||
func (p *peerStore) RemoveLocalPeer(peerId string) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
spaceIds, exists := p.spacesByLocalPeerIds[peerId]
|
||||
if !exists {
|
||||
p.Unlock()
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
observers := p.observers
|
||||
p.Unlock()
|
||||
for _, ob := range observers {
|
||||
ob(peerId, spaceIds, nil, true)
|
||||
}
|
||||
}()
|
||||
// TODO: do we need to notify observer here
|
||||
for _, spaceId := range spaceIds {
|
||||
peerIds := p.localPeerIdsBySpace[spaceId]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue