mirror of
https://github.com/anyproto/any-sync.git
synced 2025-06-07 21:47:02 +09:00
Merge branch 'main' into go-4493-inbox-for-invites
This commit is contained in:
commit
a6dbae6aa4
114 changed files with 6580 additions and 597 deletions
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_accountservice/mock_accountservice.go github.com/anyproto/any-sync/accountservice Service
|
||||
//
|
||||
|
||||
// Package mock_accountservice is a generated GoMock package.
|
||||
package mock_accountservice
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_acl/mock_acl.go github.com/anyproto/any-sync/acl AclService
|
||||
//
|
||||
|
||||
// Package mock_acl is a generated GoMock package.
|
||||
package mock_acl
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_debugstat/mock_debugstat.go github.com/anyproto/any-sync/app/debugstat StatService
|
||||
//
|
||||
|
||||
// Package mock_debugstat is a generated GoMock package.
|
||||
package mock_debugstat
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// Package ldiff provides a container of elements with fixed id and changeable content.
|
||||
// Diff can calculate the difference with another diff container (you can make it remote) with minimum hops and traffic.
|
||||
//
|
||||
//go:generate mockgen -destination mock_ldiff/mock_ldiff.go github.com/anyproto/any-sync/app/ldiff Diff,Remote
|
||||
//go:generate mockgen -destination mock_ldiff/mock_ldiff.go github.com/anyproto/any-sync/app/ldiff Diff,Remote,DiffContainer
|
||||
package ldiff
|
||||
|
||||
import (
|
||||
|
@ -15,6 +15,8 @@ import (
|
|||
"github.com/cespare/xxhash"
|
||||
"github.com/huandu/skiplist"
|
||||
"github.com/zeebo/blake3"
|
||||
|
||||
"github.com/anyproto/any-sync/commonspace/spacesyncproto"
|
||||
)
|
||||
|
||||
// Diff contains elements and can compare it with Remote diff
|
||||
|
@ -36,6 +38,13 @@ type Diff interface {
|
|||
Hash() string
|
||||
// Len returns count of elements in the diff
|
||||
Len() int
|
||||
// DiffType returns the diff type (diff logic and parameters)
|
||||
DiffType() spacesyncproto.DiffType
|
||||
}
|
||||
|
||||
type CompareDiff interface {
|
||||
CompareDiff(ctx context.Context, dl Remote) (newIds, ourChangedIds, theirChangedIds, removedIds []string, err error)
|
||||
Diff
|
||||
}
|
||||
|
||||
// New creates precalculated Diff container
|
||||
|
@ -141,6 +150,10 @@ func (d *diff) Compare(lhs, rhs interface{}) int {
|
|||
}
|
||||
}
|
||||
|
||||
func (d *diff) DiffType() spacesyncproto.DiffType {
|
||||
return spacesyncproto.DiffType_V2
|
||||
}
|
||||
|
||||
// CalcScore implements skiplist interface
|
||||
func (d *diff) CalcScore(key interface{}) float64 {
|
||||
return 0
|
||||
|
@ -237,11 +250,10 @@ func (d *diff) getRange(r Range) (rr RangeResult) {
|
|||
if rng != nil {
|
||||
rr.Hash = rng.hash
|
||||
rr.Count = rng.elements
|
||||
if !r.Elements && rng.isDivided {
|
||||
if !r.Elements {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
el := d.sl.Find(&element{hash: r.From})
|
||||
rr.Elements = make([]Element, 0, d.divideFactor)
|
||||
for el != nil && el.Key().(*element).hash <= r.To {
|
||||
|
@ -266,17 +278,18 @@ func (d *diff) Ranges(ctx context.Context, ranges []Range, resBuf []RangeResult)
|
|||
}
|
||||
|
||||
type diffCtx struct {
|
||||
newIds, changedIds, removedIds []string
|
||||
newIds, changedIds, theirChangedIds, removedIds []string
|
||||
|
||||
toSend, prepare []Range
|
||||
myRes, otherRes []RangeResult
|
||||
compareFunc func(dctx *diffCtx, my, other []Element)
|
||||
}
|
||||
|
||||
var errMismatched = errors.New("query and results mismatched")
|
||||
|
||||
// Diff makes diff with remote container
|
||||
func (d *diff) Diff(ctx context.Context, dl Remote) (newIds, changedIds, removedIds []string, err error) {
|
||||
dctx := &diffCtx{}
|
||||
dctx := &diffCtx{compareFunc: d.compareElementsEqual}
|
||||
dctx.toSend = append(dctx.toSend, Range{
|
||||
From: 0,
|
||||
To: math.MaxUint64,
|
||||
|
@ -307,6 +320,38 @@ func (d *diff) Diff(ctx context.Context, dl Remote) (newIds, changedIds, removed
|
|||
return dctx.newIds, dctx.changedIds, dctx.removedIds, nil
|
||||
}
|
||||
|
||||
func (d *diff) CompareDiff(ctx context.Context, dl Remote) (newIds, ourChangedIds, theirChangedIds, removedIds []string, err error) {
|
||||
dctx := &diffCtx{compareFunc: d.compareElementsGreater}
|
||||
dctx.toSend = append(dctx.toSend, Range{
|
||||
From: 0,
|
||||
To: math.MaxUint64,
|
||||
})
|
||||
for len(dctx.toSend) > 0 {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
return
|
||||
default:
|
||||
}
|
||||
if dctx.otherRes, err = dl.Ranges(ctx, dctx.toSend, dctx.otherRes); err != nil {
|
||||
return
|
||||
}
|
||||
if dctx.myRes, err = d.Ranges(ctx, dctx.toSend, dctx.myRes); err != nil {
|
||||
return
|
||||
}
|
||||
if len(dctx.otherRes) != len(dctx.toSend) || len(dctx.myRes) != len(dctx.toSend) {
|
||||
err = errMismatched
|
||||
return
|
||||
}
|
||||
for i, r := range dctx.toSend {
|
||||
d.compareResults(dctx, r, dctx.myRes[i], dctx.otherRes[i])
|
||||
}
|
||||
dctx.toSend, dctx.prepare = dctx.prepare, dctx.toSend
|
||||
dctx.prepare = dctx.prepare[:0]
|
||||
}
|
||||
return dctx.newIds, dctx.changedIds, dctx.theirChangedIds, dctx.removedIds, nil
|
||||
}
|
||||
|
||||
func (d *diff) compareResults(dctx *diffCtx, r Range, myRes, otherRes RangeResult) {
|
||||
// both hash equals - do nothing
|
||||
if bytes.Equal(myRes.Hash, otherRes.Hash) {
|
||||
|
@ -316,15 +361,14 @@ func (d *diff) compareResults(dctx *diffCtx, r Range, myRes, otherRes RangeResul
|
|||
// other has elements
|
||||
if len(otherRes.Elements) == otherRes.Count {
|
||||
if len(myRes.Elements) == myRes.Count {
|
||||
d.compareElements(dctx, myRes.Elements, otherRes.Elements)
|
||||
dctx.compareFunc(dctx, myRes.Elements, otherRes.Elements)
|
||||
} else {
|
||||
r.Elements = true
|
||||
d.compareElements(dctx, d.getRange(r).Elements, otherRes.Elements)
|
||||
dctx.compareFunc(dctx, d.getRange(r).Elements, otherRes.Elements)
|
||||
}
|
||||
return
|
||||
}
|
||||
// request all elements from other, because we don't have enough
|
||||
if len(myRes.Elements) == myRes.Count {
|
||||
if otherRes.Count <= d.compareThreshold && len(otherRes.Elements) == 0 || len(myRes.Elements) == myRes.Count {
|
||||
r.Elements = true
|
||||
dctx.prepare = append(dctx.prepare, r)
|
||||
return
|
||||
|
@ -336,7 +380,7 @@ func (d *diff) compareResults(dctx *diffCtx, r Range, myRes, otherRes RangeResul
|
|||
return
|
||||
}
|
||||
|
||||
func (d *diff) compareElements(dctx *diffCtx, my, other []Element) {
|
||||
func (d *diff) compareElementsEqual(dctx *diffCtx, my, other []Element) {
|
||||
find := func(list []Element, targetEl Element) (has, eq bool) {
|
||||
for _, el := range list {
|
||||
if el.Id == targetEl.Id {
|
||||
|
@ -364,3 +408,40 @@ func (d *diff) compareElements(dctx *diffCtx, my, other []Element) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *diff) compareElementsGreater(dctx *diffCtx, my, other []Element) {
|
||||
find := func(list []Element, targetEl Element) (has, equal, greater bool) {
|
||||
for _, el := range list {
|
||||
if el.Id == targetEl.Id {
|
||||
if el.Head == targetEl.Head {
|
||||
return true, true, false
|
||||
}
|
||||
return true, false, el.Head > targetEl.Head
|
||||
}
|
||||
}
|
||||
return false, false, false
|
||||
}
|
||||
|
||||
for _, el := range my {
|
||||
has, eq, theirGreater := find(other, el)
|
||||
if !has {
|
||||
dctx.removedIds = append(dctx.removedIds, el.Id)
|
||||
continue
|
||||
} else {
|
||||
if eq {
|
||||
continue
|
||||
}
|
||||
if theirGreater {
|
||||
dctx.theirChangedIds = append(dctx.theirChangedIds, el.Id)
|
||||
} else {
|
||||
dctx.changedIds = append(dctx.changedIds, el.Id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, el := range other {
|
||||
if has, _, _ := find(my, el); !has {
|
||||
dctx.newIds = append(dctx.newIds, el.Id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -139,6 +139,35 @@ func TestDiff_Diff(t *testing.T) {
|
|||
assert.Len(t, changedIds, length/2)
|
||||
assert.Len(t, removedIds, 0)
|
||||
})
|
||||
t.Run("compare diff", func(t *testing.T) {
|
||||
d1 := New(16, 128).(CompareDiff)
|
||||
d2 := New(16, 128)
|
||||
|
||||
length := 10000
|
||||
for i := 0; i < length; i++ {
|
||||
id := fmt.Sprint(i)
|
||||
head := "a" + uuid.NewString()
|
||||
d1.Set(Element{
|
||||
Id: id,
|
||||
Head: head,
|
||||
})
|
||||
}
|
||||
for i := 0; i < length; i++ {
|
||||
id := fmt.Sprint(i)
|
||||
head := "b" + uuid.NewString()
|
||||
d2.Set(Element{
|
||||
Id: id,
|
||||
Head: head,
|
||||
})
|
||||
}
|
||||
|
||||
newIds, changedIds, theirChangedIds, removedIds, err := d1.CompareDiff(ctx, d2)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, newIds, 0)
|
||||
assert.Len(t, changedIds, 0)
|
||||
assert.Len(t, theirChangedIds, length)
|
||||
assert.Len(t, removedIds, 0)
|
||||
})
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
d1 := New(16, 16)
|
||||
d2 := New(16, 16)
|
||||
|
|
65
app/ldiff/diffcontainer.go
Normal file
65
app/ldiff/diffcontainer.go
Normal file
|
@ -0,0 +1,65 @@
|
|||
package ldiff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/zeebo/blake3"
|
||||
)
|
||||
|
||||
type RemoteTypeChecker interface {
|
||||
DiffTypeCheck(ctx context.Context, diffContainer DiffContainer) (needsSync bool, diff Diff, err error)
|
||||
}
|
||||
|
||||
type DiffContainer interface {
|
||||
DiffTypeCheck(ctx context.Context, typeChecker RemoteTypeChecker) (needsSync bool, diff Diff, err error)
|
||||
OldDiff() Diff
|
||||
NewDiff() Diff
|
||||
Set(elements ...Element)
|
||||
RemoveId(id string) error
|
||||
}
|
||||
|
||||
type diffContainer struct {
|
||||
newDiff Diff
|
||||
oldDiff Diff
|
||||
}
|
||||
|
||||
func (d *diffContainer) NewDiff() Diff {
|
||||
return d.newDiff
|
||||
}
|
||||
|
||||
func (d *diffContainer) OldDiff() Diff {
|
||||
return d.oldDiff
|
||||
}
|
||||
|
||||
func (d *diffContainer) Set(elements ...Element) {
|
||||
hasher := hashersPool.Get().(*blake3.Hasher)
|
||||
defer hashersPool.Put(hasher)
|
||||
for _, el := range elements {
|
||||
hasher.Reset()
|
||||
hasher.WriteString(el.Head)
|
||||
stringHash := hex.EncodeToString(hasher.Sum(nil))
|
||||
d.newDiff.Set(Element{
|
||||
Id: el.Id,
|
||||
Head: stringHash,
|
||||
})
|
||||
}
|
||||
d.oldDiff.Set(elements...)
|
||||
}
|
||||
|
||||
func (d *diffContainer) RemoveId(id string) error {
|
||||
_ = d.newDiff.RemoveId(id)
|
||||
_ = d.oldDiff.RemoveId(id)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *diffContainer) DiffTypeCheck(ctx context.Context, typeChecker RemoteTypeChecker) (needsSync bool, diff Diff, err error) {
|
||||
return typeChecker.DiffTypeCheck(ctx, d)
|
||||
}
|
||||
|
||||
func NewDiffContainer(new, old Diff) DiffContainer {
|
||||
return &diffContainer{
|
||||
newDiff: new,
|
||||
oldDiff: old,
|
||||
}
|
||||
}
|
|
@ -1,11 +1,10 @@
|
|||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: github.com/anyproto/any-sync/app/ldiff (interfaces: Diff,Remote)
|
||||
// Source: github.com/anyproto/any-sync/app/ldiff (interfaces: Diff,Remote,DiffContainer)
|
||||
//
|
||||
// Generated by this command:
|
||||
//
|
||||
// mockgen -destination mock_ldiff/mock_ldiff.go github.com/anyproto/any-sync/app/ldiff Diff,Remote
|
||||
// mockgen -destination mock_ldiff/mock_ldiff.go github.com/anyproto/any-sync/app/ldiff Diff,Remote,DiffContainer
|
||||
//
|
||||
|
||||
// Package mock_ldiff is a generated GoMock package.
|
||||
package mock_ldiff
|
||||
|
||||
|
@ -14,6 +13,7 @@ import (
|
|||
reflect "reflect"
|
||||
|
||||
ldiff "github.com/anyproto/any-sync/app/ldiff"
|
||||
spacesyncproto "github.com/anyproto/any-sync/commonspace/spacesyncproto"
|
||||
gomock "go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
|
@ -57,6 +57,20 @@ func (mr *MockDiffMockRecorder) Diff(arg0, arg1 any) *gomock.Call {
|
|||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Diff", reflect.TypeOf((*MockDiff)(nil).Diff), arg0, arg1)
|
||||
}
|
||||
|
||||
// DiffType mocks base method.
|
||||
func (m *MockDiff) DiffType() spacesyncproto.DiffType {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DiffType")
|
||||
ret0, _ := ret[0].(spacesyncproto.DiffType)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// DiffType indicates an expected call of DiffType.
|
||||
func (mr *MockDiffMockRecorder) DiffType() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiffType", reflect.TypeOf((*MockDiff)(nil).DiffType))
|
||||
}
|
||||
|
||||
// Element mocks base method.
|
||||
func (m *MockDiff) Element(arg0 string) (ldiff.Element, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
@ -210,3 +224,100 @@ func (mr *MockRemoteMockRecorder) Ranges(arg0, arg1, arg2 any) *gomock.Call {
|
|||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ranges", reflect.TypeOf((*MockRemote)(nil).Ranges), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// MockDiffContainer is a mock of DiffContainer interface.
|
||||
type MockDiffContainer struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockDiffContainerMockRecorder
|
||||
}
|
||||
|
||||
// MockDiffContainerMockRecorder is the mock recorder for MockDiffContainer.
|
||||
type MockDiffContainerMockRecorder struct {
|
||||
mock *MockDiffContainer
|
||||
}
|
||||
|
||||
// NewMockDiffContainer creates a new mock instance.
|
||||
func NewMockDiffContainer(ctrl *gomock.Controller) *MockDiffContainer {
|
||||
mock := &MockDiffContainer{ctrl: ctrl}
|
||||
mock.recorder = &MockDiffContainerMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockDiffContainer) EXPECT() *MockDiffContainerMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// DiffTypeCheck mocks base method.
|
||||
func (m *MockDiffContainer) DiffTypeCheck(arg0 context.Context, arg1 ldiff.RemoteTypeChecker) (bool, ldiff.Diff, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DiffTypeCheck", arg0, arg1)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(ldiff.Diff)
|
||||
ret2, _ := ret[2].(error)
|
||||
return ret0, ret1, ret2
|
||||
}
|
||||
|
||||
// DiffTypeCheck indicates an expected call of DiffTypeCheck.
|
||||
func (mr *MockDiffContainerMockRecorder) DiffTypeCheck(arg0, arg1 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiffTypeCheck", reflect.TypeOf((*MockDiffContainer)(nil).DiffTypeCheck), arg0, arg1)
|
||||
}
|
||||
|
||||
// NewDiff mocks base method.
|
||||
func (m *MockDiffContainer) NewDiff() ldiff.Diff {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NewDiff")
|
||||
ret0, _ := ret[0].(ldiff.Diff)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// NewDiff indicates an expected call of NewDiff.
|
||||
func (mr *MockDiffContainerMockRecorder) NewDiff() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewDiff", reflect.TypeOf((*MockDiffContainer)(nil).NewDiff))
|
||||
}
|
||||
|
||||
// OldDiff mocks base method.
|
||||
func (m *MockDiffContainer) OldDiff() ldiff.Diff {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "OldDiff")
|
||||
ret0, _ := ret[0].(ldiff.Diff)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// OldDiff indicates an expected call of OldDiff.
|
||||
func (mr *MockDiffContainerMockRecorder) OldDiff() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OldDiff", reflect.TypeOf((*MockDiffContainer)(nil).OldDiff))
|
||||
}
|
||||
|
||||
// RemoveId mocks base method.
|
||||
func (m *MockDiffContainer) RemoveId(arg0 string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "RemoveId", arg0)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// RemoveId indicates an expected call of RemoveId.
|
||||
func (mr *MockDiffContainerMockRecorder) RemoveId(arg0 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveId", reflect.TypeOf((*MockDiffContainer)(nil).RemoveId), arg0)
|
||||
}
|
||||
|
||||
// Set mocks base method.
|
||||
func (m *MockDiffContainer) Set(arg0 ...ldiff.Element) {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []any{}
|
||||
for _, a := range arg0 {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
m.ctrl.Call(m, "Set", varargs...)
|
||||
}
|
||||
|
||||
// Set indicates an expected call of Set.
|
||||
func (mr *MockDiffContainerMockRecorder) Set(arg0 ...any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockDiffContainer)(nil).Set), arg0...)
|
||||
}
|
||||
|
|
322
app/olddiff/diff.go
Normal file
322
app/olddiff/diff.go
Normal file
|
@ -0,0 +1,322 @@
|
|||
package olddiff
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/cespare/xxhash"
|
||||
"github.com/huandu/skiplist"
|
||||
"github.com/zeebo/blake3"
|
||||
|
||||
"github.com/anyproto/any-sync/app/ldiff"
|
||||
"github.com/anyproto/any-sync/commonspace/spacesyncproto"
|
||||
)
|
||||
|
||||
// New creates precalculated Diff container
|
||||
//
|
||||
// divideFactor - means how many hashes you want to ask for once
|
||||
//
|
||||
// it must be 2 or greater
|
||||
// normal value usually between 4 and 64
|
||||
//
|
||||
// compareThreshold - means the maximum count of elements remote diff will send directly
|
||||
//
|
||||
// if elements under range will be more - remote diff will send only hash
|
||||
// it must be 1 or greater
|
||||
// normal value between 8 and 64
|
||||
//
|
||||
// Less threshold and divideFactor - less traffic but more requests
|
||||
func New(divideFactor, compareThreshold int) ldiff.Diff {
|
||||
return newDiff(divideFactor, compareThreshold)
|
||||
}
|
||||
|
||||
func newDiff(divideFactor, compareThreshold int) ldiff.Diff {
|
||||
if divideFactor < 2 {
|
||||
divideFactor = 2
|
||||
}
|
||||
if compareThreshold < 1 {
|
||||
compareThreshold = 1
|
||||
}
|
||||
d := &diff{
|
||||
divideFactor: divideFactor,
|
||||
compareThreshold: compareThreshold,
|
||||
}
|
||||
d.sl = skiplist.New(d)
|
||||
d.ranges = newHashRanges(divideFactor, compareThreshold, d.sl)
|
||||
d.ranges.dirty[d.ranges.topRange] = struct{}{}
|
||||
d.ranges.recalculateHashes()
|
||||
return d
|
||||
}
|
||||
|
||||
var hashersPool = &sync.Pool{
|
||||
New: func() any {
|
||||
return blake3.New()
|
||||
},
|
||||
}
|
||||
|
||||
var ErrElementNotFound = errors.New("ldiff: element not found")
|
||||
|
||||
type element struct {
|
||||
ldiff.Element
|
||||
hash uint64
|
||||
}
|
||||
|
||||
// Diff contains elements and can compare it with Remote diff
|
||||
type diff struct {
|
||||
sl *skiplist.SkipList
|
||||
divideFactor int
|
||||
compareThreshold int
|
||||
ranges *hashRanges
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// Compare implements skiplist interface
|
||||
func (d *diff) Compare(lhs, rhs interface{}) int {
|
||||
lhe := lhs.(*element)
|
||||
rhe := rhs.(*element)
|
||||
if lhe.Id == rhe.Id {
|
||||
return 0
|
||||
}
|
||||
if lhe.hash > rhe.hash {
|
||||
return 1
|
||||
} else if lhe.hash < rhe.hash {
|
||||
return -1
|
||||
}
|
||||
if lhe.Id > rhe.Id {
|
||||
return 1
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// CalcScore implements skiplist interface
|
||||
func (d *diff) CalcScore(key interface{}) float64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Set adds or update element in container
|
||||
func (d *diff) Set(elements ...ldiff.Element) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
for _, e := range elements {
|
||||
hash := xxhash.Sum64([]byte(e.Id))
|
||||
el := &element{Element: e, hash: hash}
|
||||
d.sl.Remove(el)
|
||||
d.sl.Set(el, nil)
|
||||
d.ranges.addElement(hash)
|
||||
}
|
||||
d.ranges.recalculateHashes()
|
||||
}
|
||||
|
||||
func (d *diff) Ids() (ids []string) {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
ids = make([]string, 0, d.sl.Len())
|
||||
|
||||
cur := d.sl.Front()
|
||||
for cur != nil {
|
||||
el := cur.Key().(*element).Element
|
||||
ids = append(ids, el.Id)
|
||||
cur = cur.Next()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *diff) Len() int {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
return d.sl.Len()
|
||||
}
|
||||
|
||||
func (d *diff) DiffType() spacesyncproto.DiffType {
|
||||
return spacesyncproto.DiffType_V1
|
||||
}
|
||||
|
||||
func (d *diff) Elements() (elements []ldiff.Element) {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
elements = make([]ldiff.Element, 0, d.sl.Len())
|
||||
|
||||
cur := d.sl.Front()
|
||||
for cur != nil {
|
||||
el := cur.Key().(*element).Element
|
||||
elements = append(elements, el)
|
||||
cur = cur.Next()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *diff) Element(id string) (ldiff.Element, error) {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
el := d.sl.Get(&element{Element: ldiff.Element{Id: id}, hash: xxhash.Sum64([]byte(id))})
|
||||
if el == nil {
|
||||
return ldiff.Element{}, ErrElementNotFound
|
||||
}
|
||||
if e, ok := el.Key().(*element); ok {
|
||||
return e.Element, nil
|
||||
}
|
||||
return ldiff.Element{}, ErrElementNotFound
|
||||
}
|
||||
|
||||
func (d *diff) Hash() string {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
return hex.EncodeToString(d.ranges.hash())
|
||||
}
|
||||
|
||||
// RemoveId removes element by id
|
||||
func (d *diff) RemoveId(id string) error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
hash := xxhash.Sum64([]byte(id))
|
||||
el := &element{Element: ldiff.Element{
|
||||
Id: id,
|
||||
}, hash: hash}
|
||||
if d.sl.Remove(el) == nil {
|
||||
return ErrElementNotFound
|
||||
}
|
||||
d.ranges.removeElement(hash)
|
||||
d.ranges.recalculateHashes()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *diff) getRange(r ldiff.Range) (rr ldiff.RangeResult) {
|
||||
rng := d.ranges.getRange(r.From, r.To)
|
||||
// if we have the division for this range
|
||||
if rng != nil {
|
||||
rr.Hash = rng.hash
|
||||
rr.Count = rng.elements
|
||||
if !r.Elements && rng.isDivided {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
el := d.sl.Find(&element{hash: r.From})
|
||||
rr.Elements = make([]ldiff.Element, 0, d.divideFactor)
|
||||
for el != nil && el.Key().(*element).hash <= r.To {
|
||||
elem := el.Key().(*element).Element
|
||||
el = el.Next()
|
||||
rr.Elements = append(rr.Elements, elem)
|
||||
}
|
||||
rr.Count = len(rr.Elements)
|
||||
return
|
||||
}
|
||||
|
||||
// Ranges calculates given ranges and return results
|
||||
func (d *diff) Ranges(ctx context.Context, ranges []ldiff.Range, resBuf []ldiff.RangeResult) (results []ldiff.RangeResult, err error) {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
results = resBuf[:0]
|
||||
for _, r := range ranges {
|
||||
results = append(results, d.getRange(r))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type diffCtx struct {
|
||||
newIds, changedIds, removedIds []string
|
||||
|
||||
toSend, prepare []ldiff.Range
|
||||
myRes, otherRes []ldiff.RangeResult
|
||||
}
|
||||
|
||||
var errMismatched = errors.New("query and results mismatched")
|
||||
|
||||
// Diff makes diff with remote container
|
||||
func (d *diff) Diff(ctx context.Context, dl ldiff.Remote) (newIds, changedIds, removedIds []string, err error) {
|
||||
dctx := &diffCtx{}
|
||||
dctx.toSend = append(dctx.toSend, ldiff.Range{
|
||||
From: 0,
|
||||
To: math.MaxUint64,
|
||||
})
|
||||
for len(dctx.toSend) > 0 {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
return
|
||||
default:
|
||||
}
|
||||
if dctx.otherRes, err = dl.Ranges(ctx, dctx.toSend, dctx.otherRes); err != nil {
|
||||
return
|
||||
}
|
||||
if dctx.myRes, err = d.Ranges(ctx, dctx.toSend, dctx.myRes); err != nil {
|
||||
return
|
||||
}
|
||||
if len(dctx.otherRes) != len(dctx.toSend) || len(dctx.myRes) != len(dctx.toSend) {
|
||||
err = errMismatched
|
||||
return
|
||||
}
|
||||
for i, r := range dctx.toSend {
|
||||
d.compareResults(dctx, r, dctx.myRes[i], dctx.otherRes[i])
|
||||
}
|
||||
dctx.toSend, dctx.prepare = dctx.prepare, dctx.toSend
|
||||
dctx.prepare = dctx.prepare[:0]
|
||||
}
|
||||
return dctx.newIds, dctx.changedIds, dctx.removedIds, nil
|
||||
}
|
||||
|
||||
func (d *diff) compareResults(dctx *diffCtx, r ldiff.Range, myRes, otherRes ldiff.RangeResult) {
|
||||
// both hash equals - do nothing
|
||||
if bytes.Equal(myRes.Hash, otherRes.Hash) {
|
||||
return
|
||||
}
|
||||
|
||||
// other has elements
|
||||
if len(otherRes.Elements) == otherRes.Count {
|
||||
if len(myRes.Elements) == myRes.Count {
|
||||
d.compareElements(dctx, myRes.Elements, otherRes.Elements)
|
||||
} else {
|
||||
r.Elements = true
|
||||
d.compareElements(dctx, d.getRange(r).Elements, otherRes.Elements)
|
||||
}
|
||||
return
|
||||
}
|
||||
// request all elements from other, because we don't have enough
|
||||
if len(myRes.Elements) == myRes.Count {
|
||||
r.Elements = true
|
||||
dctx.prepare = append(dctx.prepare, r)
|
||||
return
|
||||
}
|
||||
rangeTuples := genTupleRanges(r.From, r.To, d.divideFactor)
|
||||
for _, tuple := range rangeTuples {
|
||||
dctx.prepare = append(dctx.prepare, ldiff.Range{From: tuple.from, To: tuple.to})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *diff) compareElements(dctx *diffCtx, my, other []ldiff.Element) {
|
||||
find := func(list []ldiff.Element, targetEl ldiff.Element) (has, eq bool) {
|
||||
for _, el := range list {
|
||||
if el.Id == targetEl.Id {
|
||||
return true, el.Head == targetEl.Head
|
||||
}
|
||||
}
|
||||
return false, false
|
||||
}
|
||||
|
||||
for _, el := range my {
|
||||
has, eq := find(other, el)
|
||||
if !has {
|
||||
dctx.removedIds = append(dctx.removedIds, el.Id)
|
||||
continue
|
||||
} else {
|
||||
if !eq {
|
||||
dctx.changedIds = append(dctx.changedIds, el.Id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, el := range other {
|
||||
if has, _ := find(my, el); !has {
|
||||
dctx.newIds = append(dctx.newIds, el.Id)
|
||||
}
|
||||
}
|
||||
}
|
428
app/olddiff/diff_test.go
Normal file
428
app/olddiff/diff_test.go
Normal file
|
@ -0,0 +1,428 @@
|
|||
package olddiff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/anyproto/any-sync/app/ldiff"
|
||||
)
|
||||
|
||||
func TestDiff_fillRange(t *testing.T) {
|
||||
d := New(4, 4).(*diff)
|
||||
for i := 0; i < 10; i++ {
|
||||
el := ldiff.Element{
|
||||
Id: fmt.Sprint(i),
|
||||
Head: fmt.Sprint("h", i),
|
||||
}
|
||||
d.Set(el)
|
||||
}
|
||||
t.Log(d.sl.Len())
|
||||
|
||||
t.Run("elements", func(t *testing.T) {
|
||||
r := ldiff.Range{From: 0, To: math.MaxUint64}
|
||||
res := d.getRange(r)
|
||||
assert.NotNil(t, res.Hash)
|
||||
assert.Equal(t, res.Count, 10)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDiff_Diff(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
t.Run("basic", func(t *testing.T) {
|
||||
d1 := New(16, 16)
|
||||
d2 := New(16, 16)
|
||||
for i := 0; i < 1000; i++ {
|
||||
id := fmt.Sprint(i)
|
||||
head := uuid.NewString()
|
||||
d1.Set(ldiff.Element{
|
||||
Id: id,
|
||||
Head: head,
|
||||
})
|
||||
d2.Set(ldiff.Element{
|
||||
Id: id,
|
||||
Head: head,
|
||||
})
|
||||
}
|
||||
|
||||
newIds, changedIds, removedIds, err := d1.Diff(ctx, d2)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, newIds, 0)
|
||||
assert.Len(t, changedIds, 0)
|
||||
assert.Len(t, removedIds, 0)
|
||||
|
||||
d2.Set(ldiff.Element{
|
||||
Id: "newD1",
|
||||
Head: "newD1",
|
||||
})
|
||||
d2.Set(ldiff.Element{
|
||||
Id: "1",
|
||||
Head: "changed",
|
||||
})
|
||||
require.NoError(t, d2.RemoveId("0"))
|
||||
|
||||
newIds, changedIds, removedIds, err = d1.Diff(ctx, d2)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, newIds, 1)
|
||||
assert.Len(t, changedIds, 1)
|
||||
assert.Len(t, removedIds, 1)
|
||||
})
|
||||
t.Run("complex", func(t *testing.T) {
|
||||
d1 := New(16, 128)
|
||||
d2 := New(16, 128)
|
||||
length := 10000
|
||||
for i := 0; i < length; i++ {
|
||||
id := fmt.Sprint(i)
|
||||
head := uuid.NewString()
|
||||
d1.Set(ldiff.Element{
|
||||
Id: id,
|
||||
Head: head,
|
||||
})
|
||||
}
|
||||
|
||||
newIds, changedIds, removedIds, err := d1.Diff(ctx, d2)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, newIds, 0)
|
||||
assert.Len(t, changedIds, 0)
|
||||
assert.Len(t, removedIds, length)
|
||||
|
||||
for i := 0; i < length; i++ {
|
||||
id := fmt.Sprint(i)
|
||||
head := uuid.NewString()
|
||||
d2.Set(ldiff.Element{
|
||||
Id: id,
|
||||
Head: head,
|
||||
})
|
||||
}
|
||||
|
||||
newIds, changedIds, removedIds, err = d1.Diff(ctx, d2)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, newIds, 0)
|
||||
assert.Len(t, changedIds, length)
|
||||
assert.Len(t, removedIds, 0)
|
||||
|
||||
for i := 0; i < length; i++ {
|
||||
id := fmt.Sprint(i)
|
||||
head := uuid.NewString()
|
||||
d2.Set(ldiff.Element{
|
||||
Id: id,
|
||||
Head: head,
|
||||
})
|
||||
}
|
||||
|
||||
res, err := d1.Ranges(
|
||||
context.Background(),
|
||||
[]ldiff.Range{{From: 0, To: math.MaxUint64, Elements: true}},
|
||||
nil)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, res, 1)
|
||||
for i, el := range res[0].Elements {
|
||||
if i < length/2 {
|
||||
continue
|
||||
}
|
||||
id := el.Id
|
||||
head := el.Head
|
||||
d2.Set(ldiff.Element{
|
||||
Id: id,
|
||||
Head: head,
|
||||
})
|
||||
}
|
||||
|
||||
newIds, changedIds, removedIds, err = d1.Diff(ctx, d2)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, newIds, 0)
|
||||
assert.Len(t, changedIds, length/2)
|
||||
assert.Len(t, removedIds, 0)
|
||||
})
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
d1 := New(16, 16)
|
||||
d2 := New(16, 16)
|
||||
newIds, changedIds, removedIds, err := d1.Diff(ctx, d2)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, newIds, 0)
|
||||
assert.Len(t, changedIds, 0)
|
||||
assert.Len(t, removedIds, 0)
|
||||
})
|
||||
t.Run("one empty", func(t *testing.T) {
|
||||
d1 := New(4, 4)
|
||||
d2 := New(4, 4)
|
||||
length := 10000
|
||||
for i := 0; i < length; i++ {
|
||||
d2.Set(ldiff.Element{
|
||||
Id: fmt.Sprint(i),
|
||||
Head: uuid.NewString(),
|
||||
})
|
||||
}
|
||||
newIds, changedIds, removedIds, err := d1.Diff(ctx, d2)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, newIds, length)
|
||||
assert.Len(t, changedIds, 0)
|
||||
assert.Len(t, removedIds, 0)
|
||||
})
|
||||
t.Run("not intersecting", func(t *testing.T) {
|
||||
d1 := New(16, 16)
|
||||
d2 := New(16, 16)
|
||||
length := 10000
|
||||
for i := 0; i < length; i++ {
|
||||
d1.Set(ldiff.Element{
|
||||
Id: fmt.Sprint(i),
|
||||
Head: uuid.NewString(),
|
||||
})
|
||||
}
|
||||
for i := length; i < length*2; i++ {
|
||||
d2.Set(ldiff.Element{
|
||||
Id: fmt.Sprint(i),
|
||||
Head: uuid.NewString(),
|
||||
})
|
||||
}
|
||||
newIds, changedIds, removedIds, err := d1.Diff(ctx, d2)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, newIds, length)
|
||||
assert.Len(t, changedIds, 0)
|
||||
assert.Len(t, removedIds, length)
|
||||
})
|
||||
t.Run("context cancel", func(t *testing.T) {
|
||||
d1 := New(4, 4)
|
||||
d2 := New(4, 4)
|
||||
for i := 0; i < 10; i++ {
|
||||
d2.Set(ldiff.Element{
|
||||
Id: fmt.Sprint(i),
|
||||
Head: uuid.NewString(),
|
||||
})
|
||||
}
|
||||
var cancel func()
|
||||
ctx, cancel = context.WithCancel(ctx)
|
||||
cancel()
|
||||
_, _, _, err := d1.Diff(ctx, d2)
|
||||
assert.ErrorIs(t, err, context.Canceled)
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkDiff_Ranges(b *testing.B) {
|
||||
d := New(16, 16)
|
||||
for i := 0; i < 10000; i++ {
|
||||
id := fmt.Sprint(i)
|
||||
head := uuid.NewString()
|
||||
d.Set(ldiff.Element{
|
||||
Id: id,
|
||||
Head: head,
|
||||
})
|
||||
}
|
||||
ctx := context.Background()
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
var resBuf []ldiff.RangeResult
|
||||
var ranges = []ldiff.Range{{From: 0, To: math.MaxUint64}}
|
||||
for i := 0; i < b.N; i++ {
|
||||
d.Ranges(ctx, ranges, resBuf)
|
||||
resBuf = resBuf[:0]
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiff_Hash(t *testing.T) {
|
||||
d := New(16, 16)
|
||||
h1 := d.Hash()
|
||||
assert.NotEmpty(t, h1)
|
||||
d.Set(ldiff.Element{Id: "1"})
|
||||
h2 := d.Hash()
|
||||
assert.NotEmpty(t, h2)
|
||||
assert.NotEqual(t, h1, h2)
|
||||
}
|
||||
|
||||
func TestDiff_Element(t *testing.T) {
|
||||
d := New(16, 16)
|
||||
for i := 0; i < 10; i++ {
|
||||
d.Set(ldiff.Element{Id: fmt.Sprint("id", i), Head: fmt.Sprint("head", i)})
|
||||
}
|
||||
_, err := d.Element("not found")
|
||||
assert.Equal(t, ErrElementNotFound, err)
|
||||
|
||||
el, err := d.Element("id5")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "head5", el.Head)
|
||||
|
||||
d.Set(ldiff.Element{"id5", "otherHead"})
|
||||
el, err = d.Element("id5")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "otherHead", el.Head)
|
||||
}
|
||||
|
||||
func TestDiff_Ids(t *testing.T) {
|
||||
d := New(16, 16)
|
||||
var ids []string
|
||||
for i := 0; i < 10; i++ {
|
||||
id := fmt.Sprint("id", i)
|
||||
d.Set(ldiff.Element{Id: id, Head: fmt.Sprint("head", i)})
|
||||
ids = append(ids, id)
|
||||
}
|
||||
gotIds := d.Ids()
|
||||
sort.Strings(gotIds)
|
||||
assert.Equal(t, ids, gotIds)
|
||||
assert.Equal(t, len(ids), d.Len())
|
||||
}
|
||||
|
||||
func TestDiff_Elements(t *testing.T) {
|
||||
d := New(16, 16)
|
||||
var els []ldiff.Element
|
||||
for i := 0; i < 10; i++ {
|
||||
id := fmt.Sprint("id", i)
|
||||
el := ldiff.Element{Id: id, Head: fmt.Sprint("head", i)}
|
||||
d.Set(el)
|
||||
els = append(els, el)
|
||||
}
|
||||
gotEls := d.Elements()
|
||||
sort.Slice(gotEls, func(i, j int) bool {
|
||||
return gotEls[i].Id < gotEls[j].Id
|
||||
})
|
||||
assert.Equal(t, els, gotEls)
|
||||
}
|
||||
|
||||
func TestRangesAddRemove(t *testing.T) {
|
||||
length := 10000
|
||||
divideFactor := 4
|
||||
compareThreshold := 4
|
||||
addTwice := func() string {
|
||||
d := New(divideFactor, compareThreshold)
|
||||
var els []ldiff.Element
|
||||
for i := 0; i < length; i++ {
|
||||
if i < length/20 {
|
||||
continue
|
||||
}
|
||||
els = append(els, ldiff.Element{
|
||||
Id: fmt.Sprint(i),
|
||||
Head: fmt.Sprint("h", i),
|
||||
})
|
||||
}
|
||||
d.Set(els...)
|
||||
els = els[:0]
|
||||
for i := 0; i < length/20; i++ {
|
||||
els = append(els, ldiff.Element{
|
||||
Id: fmt.Sprint(i),
|
||||
Head: fmt.Sprint("h", i),
|
||||
})
|
||||
}
|
||||
d.Set(els...)
|
||||
return d.Hash()
|
||||
}
|
||||
addOnce := func() string {
|
||||
d := New(divideFactor, compareThreshold)
|
||||
var els []ldiff.Element
|
||||
for i := 0; i < length; i++ {
|
||||
els = append(els, ldiff.Element{
|
||||
Id: fmt.Sprint(i),
|
||||
Head: fmt.Sprint("h", i),
|
||||
})
|
||||
}
|
||||
d.Set(els...)
|
||||
return d.Hash()
|
||||
}
|
||||
addRemove := func() string {
|
||||
d := New(divideFactor, compareThreshold)
|
||||
var els []ldiff.Element
|
||||
for i := 0; i < length; i++ {
|
||||
els = append(els, ldiff.Element{
|
||||
Id: fmt.Sprint(i),
|
||||
Head: fmt.Sprint("h", i),
|
||||
})
|
||||
}
|
||||
d.Set(els...)
|
||||
for i := 0; i < length/20; i++ {
|
||||
err := d.RemoveId(fmt.Sprint(i))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
els = els[:0]
|
||||
for i := 0; i < length/20; i++ {
|
||||
els = append(els, ldiff.Element{
|
||||
Id: fmt.Sprint(i),
|
||||
Head: fmt.Sprint("h", i),
|
||||
})
|
||||
}
|
||||
d.Set(els...)
|
||||
return d.Hash()
|
||||
}
|
||||
require.Equal(t, addTwice(), addOnce(), addRemove())
|
||||
}
|
||||
|
||||
func printBestParams() {
|
||||
numTests := 10
|
||||
length := 100000
|
||||
calcParams := func(divideFactor, compareThreshold, length int) (total, maxLevel, avgLevel, zeroEls int) {
|
||||
d := New(divideFactor, compareThreshold).(*diff)
|
||||
var els []ldiff.Element
|
||||
for i := 0; i < length; i++ {
|
||||
els = append(els, ldiff.Element{
|
||||
Id: uuid.NewString(),
|
||||
Head: uuid.NewString(),
|
||||
})
|
||||
}
|
||||
d.Set(els...)
|
||||
for _, rng := range d.ranges.ranges {
|
||||
if rng.elements == 0 {
|
||||
zeroEls++
|
||||
}
|
||||
if rng.level > maxLevel {
|
||||
maxLevel = rng.level
|
||||
}
|
||||
avgLevel += rng.level
|
||||
}
|
||||
total = len(d.ranges.ranges)
|
||||
avgLevel = avgLevel / total
|
||||
return
|
||||
}
|
||||
type result struct {
|
||||
divFactor, compThreshold, numRanges, maxLevel, avgLevel, zeroEls int
|
||||
}
|
||||
sf := func(i, j result) int {
|
||||
if i.numRanges < j.numRanges {
|
||||
return -1
|
||||
} else if i.numRanges == j.numRanges {
|
||||
return 0
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
var results []result
|
||||
for divFactor := 0; divFactor < 6; divFactor++ {
|
||||
df := 1 << divFactor
|
||||
for compThreshold := 0; compThreshold < 10; compThreshold++ {
|
||||
ct := 1 << compThreshold
|
||||
fmt.Println("starting, df:", df, "ct:", ct)
|
||||
var rngs []result
|
||||
for i := 0; i < numTests; i++ {
|
||||
total, maxLevel, avgLevel, zeroEls := calcParams(df, ct, length)
|
||||
rngs = append(rngs, result{
|
||||
divFactor: df,
|
||||
compThreshold: ct,
|
||||
numRanges: total,
|
||||
maxLevel: maxLevel,
|
||||
avgLevel: avgLevel,
|
||||
zeroEls: zeroEls,
|
||||
})
|
||||
}
|
||||
slices.SortFunc(rngs, sf)
|
||||
ranges := rngs[len(rngs)/2]
|
||||
results = append(results, ranges)
|
||||
}
|
||||
}
|
||||
slices.SortFunc(results, sf)
|
||||
fmt.Println(results)
|
||||
// 100000 - [{16 512 273 2 1 0} {4 512 341 4 3 0} {2 512 511 8 7 0} {1 512 511 8 7 0}
|
||||
// {8 256 585 3 2 0} {8 512 585 3 2 0} {1 256 1023 9 8 0} {2 256 1023 9 8 0}
|
||||
// {32 256 1057 2 1 0} {32 512 1057 2 1 0} {32 128 1089 3 1 0} {4 256 1365 5 4 0}
|
||||
// {4 128 1369 6 4 0} {2 128 2049 11 9 0} {1 128 2049 11 9 0} {1 64 4157 12 10 0}
|
||||
// {2 64 4159 12 10 0} {16 128 4369 3 2 0} {16 64 4369 3 2 0} {16 256 4369 3 2 0}
|
||||
// {8 64 4681 4 3 0} {8 128 4681 4 3 0} {4 64 5461 6 5 0} {4 32 6389 7 5 0}
|
||||
// {8 32 6505 5 4 17} {16 32 8033 4 3 374} {2 32 8619 13 11 0} {1 32 8621 13 11 0}
|
||||
// {2 16 17837 15 12 0} {1 16 17847 15 12 0} {4 16 21081 8 6 22} {32 64 33825 3 2 1578}
|
||||
// {32 32 33825 3 2 1559} {32 16 33825 3 2 1518} {8 16 35881 5 4 1313} {16 16 66737 4 3 13022}]
|
||||
// 1000000 - [{8 256 11753 5 4 0}]
|
||||
// 1000000 - [{16 128 69905 4 3 0}]
|
||||
// 1000000 - [{32 256 33825 3 2 0}]
|
||||
}
|
223
app/olddiff/hashrange.go
Normal file
223
app/olddiff/hashrange.go
Normal file
|
@ -0,0 +1,223 @@
|
|||
package olddiff
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/huandu/skiplist"
|
||||
"github.com/zeebo/blake3"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type hashRange struct {
|
||||
from, to uint64
|
||||
parent *hashRange
|
||||
isDivided bool
|
||||
elements int
|
||||
level int
|
||||
hash []byte
|
||||
}
|
||||
|
||||
type rangeTuple struct {
|
||||
from, to uint64
|
||||
}
|
||||
|
||||
type hashRanges struct {
|
||||
ranges map[rangeTuple]*hashRange
|
||||
topRange *hashRange
|
||||
sl *skiplist.SkipList
|
||||
dirty map[*hashRange]struct{}
|
||||
divideFactor int
|
||||
compareThreshold int
|
||||
}
|
||||
|
||||
func newHashRanges(divideFactor, compareThreshold int, sl *skiplist.SkipList) *hashRanges {
|
||||
h := &hashRanges{
|
||||
ranges: make(map[rangeTuple]*hashRange),
|
||||
dirty: make(map[*hashRange]struct{}),
|
||||
divideFactor: divideFactor,
|
||||
compareThreshold: compareThreshold,
|
||||
sl: sl,
|
||||
}
|
||||
h.topRange = &hashRange{
|
||||
from: 0,
|
||||
to: math.MaxUint64,
|
||||
isDivided: true,
|
||||
level: 0,
|
||||
}
|
||||
h.ranges[rangeTuple{from: 0, to: math.MaxUint64}] = h.topRange
|
||||
h.makeBottomRanges(h.topRange)
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *hashRanges) hash() []byte {
|
||||
return h.topRange.hash
|
||||
}
|
||||
|
||||
func (h *hashRanges) addElement(elHash uint64) {
|
||||
rng := h.topRange
|
||||
rng.elements++
|
||||
for rng.isDivided {
|
||||
rng = h.getBottomRange(rng, elHash)
|
||||
rng.elements++
|
||||
}
|
||||
h.dirty[rng] = struct{}{}
|
||||
if rng.elements > h.compareThreshold {
|
||||
rng.isDivided = true
|
||||
h.makeBottomRanges(rng)
|
||||
}
|
||||
if rng.parent != nil {
|
||||
if _, ok := h.dirty[rng.parent]; ok {
|
||||
delete(h.dirty, rng.parent)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hashRanges) removeElement(elHash uint64) {
|
||||
rng := h.topRange
|
||||
rng.elements--
|
||||
for rng.isDivided {
|
||||
rng = h.getBottomRange(rng, elHash)
|
||||
rng.elements--
|
||||
}
|
||||
parent := rng.parent
|
||||
if parent.elements <= h.compareThreshold && parent != h.topRange {
|
||||
ranges := genTupleRanges(parent.from, parent.to, h.divideFactor)
|
||||
for _, tuple := range ranges {
|
||||
child := h.ranges[tuple]
|
||||
delete(h.ranges, tuple)
|
||||
delete(h.dirty, child)
|
||||
}
|
||||
parent.isDivided = false
|
||||
h.dirty[parent] = struct{}{}
|
||||
} else {
|
||||
h.dirty[rng] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hashRanges) recalculateHashes() {
|
||||
for len(h.dirty) > 0 {
|
||||
var slDirty []*hashRange
|
||||
for rng := range h.dirty {
|
||||
slDirty = append(slDirty, rng)
|
||||
}
|
||||
slices.SortFunc(slDirty, func(a, b *hashRange) int {
|
||||
if a.level < b.level {
|
||||
return -1
|
||||
} else if a.level > b.level {
|
||||
return 1
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
})
|
||||
for _, rng := range slDirty {
|
||||
if rng.isDivided {
|
||||
rng.hash = h.calcDividedHash(rng)
|
||||
} else {
|
||||
rng.hash, rng.elements = h.calcElementsHash(rng.from, rng.to)
|
||||
}
|
||||
delete(h.dirty, rng)
|
||||
if rng.parent != nil {
|
||||
h.dirty[rng.parent] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hashRanges) getRange(from, to uint64) *hashRange {
|
||||
return h.ranges[rangeTuple{from: from, to: to}]
|
||||
}
|
||||
|
||||
func (h *hashRanges) getBottomRange(rng *hashRange, elHash uint64) *hashRange {
|
||||
df := uint64(h.divideFactor)
|
||||
perRange := (rng.to - rng.from) / df
|
||||
align := ((rng.to-rng.from)%df + 1) % df
|
||||
if align == 0 {
|
||||
perRange++
|
||||
}
|
||||
bucket := (elHash - rng.from) / perRange
|
||||
tuple := rangeTuple{from: rng.from + bucket*perRange, to: rng.from - 1 + (bucket+1)*perRange}
|
||||
if bucket == df-1 {
|
||||
tuple.to += align
|
||||
}
|
||||
return h.ranges[tuple]
|
||||
}
|
||||
|
||||
func (h *hashRanges) makeBottomRanges(rng *hashRange) {
|
||||
ranges := genTupleRanges(rng.from, rng.to, h.divideFactor)
|
||||
for _, tuple := range ranges {
|
||||
newRange := h.makeRange(tuple, rng)
|
||||
h.ranges[tuple] = newRange
|
||||
if newRange.elements > h.compareThreshold {
|
||||
if _, ok := h.dirty[rng]; ok {
|
||||
delete(h.dirty, rng)
|
||||
}
|
||||
h.dirty[newRange] = struct{}{}
|
||||
newRange.isDivided = true
|
||||
h.makeBottomRanges(newRange)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hashRanges) makeRange(tuple rangeTuple, parent *hashRange) *hashRange {
|
||||
newRange := &hashRange{
|
||||
from: tuple.from,
|
||||
to: tuple.to,
|
||||
parent: parent,
|
||||
}
|
||||
hash, els := h.calcElementsHash(tuple.from, tuple.to)
|
||||
newRange.hash = hash
|
||||
newRange.level = parent.level + 1
|
||||
newRange.elements = els
|
||||
return newRange
|
||||
}
|
||||
|
||||
func (h *hashRanges) calcDividedHash(rng *hashRange) (hash []byte) {
|
||||
hasher := hashersPool.Get().(*blake3.Hasher)
|
||||
defer hashersPool.Put(hasher)
|
||||
hasher.Reset()
|
||||
ranges := genTupleRanges(rng.from, rng.to, h.divideFactor)
|
||||
for _, tuple := range ranges {
|
||||
child := h.ranges[tuple]
|
||||
hasher.Write(child.hash)
|
||||
}
|
||||
hash = hasher.Sum(nil)
|
||||
return
|
||||
}
|
||||
|
||||
func genTupleRanges(from, to uint64, divideFactor int) (prepare []rangeTuple) {
|
||||
df := uint64(divideFactor)
|
||||
perRange := (to - from) / df
|
||||
align := ((to-from)%df + 1) % df
|
||||
if align == 0 {
|
||||
perRange++
|
||||
}
|
||||
var j = from
|
||||
for i := 0; i < divideFactor; i++ {
|
||||
if i == divideFactor-1 {
|
||||
perRange += align
|
||||
}
|
||||
prepare = append(prepare, rangeTuple{from: j, to: j + perRange - 1})
|
||||
j += perRange
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (h *hashRanges) calcElementsHash(from, to uint64) (hash []byte, els int) {
|
||||
hasher := hashersPool.Get().(*blake3.Hasher)
|
||||
defer hashersPool.Put(hasher)
|
||||
hasher.Reset()
|
||||
|
||||
el := h.sl.Find(&element{hash: from})
|
||||
for el != nil && el.Key().(*element).hash <= to {
|
||||
elem := el.Key().(*element).Element
|
||||
el = el.Next()
|
||||
|
||||
hasher.WriteString(elem.Id)
|
||||
hasher.WriteString(elem.Head)
|
||||
els++
|
||||
}
|
||||
if els != 0 {
|
||||
hash = hasher.Sum(nil)
|
||||
}
|
||||
return
|
||||
}
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_aclclient/mock_aclclient.go github.com/anyproto/any-sync/commonspace/acl/aclclient AclJoiningClient,AclSpaceClient
|
||||
//
|
||||
|
||||
// Package mock_aclclient is a generated GoMock package.
|
||||
package mock_aclclient
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_credentialprovider/mock_credentialprovider.go github.com/anyproto/any-sync/commonspace/credentialprovider CredentialProvider
|
||||
//
|
||||
|
||||
// Package mock_credentialprovider is a generated GoMock package.
|
||||
package mock_credentialprovider
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
"github.com/anyproto/any-sync/commonspace/object/accountdata"
|
||||
"github.com/anyproto/any-sync/commonspace/object/tree/objecttree"
|
||||
"github.com/anyproto/any-sync/commonspace/spacepayloads"
|
||||
"github.com/anyproto/any-sync/commonspace/spacestorage"
|
||||
"github.com/anyproto/any-sync/commonspace/syncstatus"
|
||||
"github.com/anyproto/any-sync/util/crypto"
|
||||
|
@ -47,7 +48,7 @@ func TestSpaceDeleteIdsMarkDeleted(t *testing.T) {
|
|||
totalObjs := 1000
|
||||
|
||||
// creating space
|
||||
sp, err := fx.spaceService.CreateSpace(ctx, SpaceCreatePayload{
|
||||
sp, err := fx.spaceService.CreateSpace(ctx, spacepayloads.SpaceCreatePayload{
|
||||
SigningKey: acc.SignKey,
|
||||
SpaceType: "type",
|
||||
ReadKey: rk,
|
||||
|
@ -140,7 +141,7 @@ func TestSpaceDeleteIds(t *testing.T) {
|
|||
totalObjs := 1000
|
||||
|
||||
// creating space
|
||||
sp, err := fx.spaceService.CreateSpace(ctx, SpaceCreatePayload{
|
||||
sp, err := fx.spaceService.CreateSpace(ctx, spacepayloads.SpaceCreatePayload{
|
||||
SigningKey: acc.SignKey,
|
||||
SpaceType: "type",
|
||||
ReadKey: rk,
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_deletionmanager/mock_deletionmanager.go github.com/anyproto/any-sync/commonspace/deletionmanager DeletionManager,Deleter
|
||||
//
|
||||
|
||||
// Package mock_deletionmanager is a generated GoMock package.
|
||||
package mock_deletionmanager
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_deletionstate/mock_deletionstate.go github.com/anyproto/any-sync/commonspace/deletionstate ObjectDeletionState
|
||||
//
|
||||
|
||||
// Package mock_deletionstate is a generated GoMock package.
|
||||
package mock_deletionstate
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/quic-go/quic-go"
|
||||
|
||||
"github.com/anyproto/any-sync/commonspace/headsync/headstorage"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces"
|
||||
"github.com/anyproto/any-sync/commonspace/object/treesyncer"
|
||||
"github.com/anyproto/any-sync/net/rpc/rpcerr"
|
||||
|
||||
|
@ -36,12 +37,13 @@ const logPeriodSecs = 200
|
|||
|
||||
func newDiffSyncer(hs *headSync) DiffSyncer {
|
||||
return &diffSyncer{
|
||||
diff: hs.diff,
|
||||
diffContainer: hs.diffContainer,
|
||||
spaceId: hs.spaceId,
|
||||
storage: hs.storage,
|
||||
peerManager: hs.peerManager,
|
||||
clientFactory: spacesyncproto.ClientFactoryFunc(spacesyncproto.NewDRPCSpaceSyncClient),
|
||||
credentialProvider: hs.credentialProvider,
|
||||
keyValue: hs.keyValue,
|
||||
log: newSyncLogger(hs.log, logPeriodSecs),
|
||||
deletionState: hs.deletionState,
|
||||
syncAcl: hs.syncAcl,
|
||||
|
@ -51,7 +53,7 @@ func newDiffSyncer(hs *headSync) DiffSyncer {
|
|||
|
||||
type diffSyncer struct {
|
||||
spaceId string
|
||||
diff ldiff.Diff
|
||||
diffContainer ldiff.DiffContainer
|
||||
peerManager peermanager.PeerManager
|
||||
headUpdater *headUpdater
|
||||
treeManager treemanager.TreeManager
|
||||
|
@ -63,6 +65,7 @@ type diffSyncer struct {
|
|||
cancel context.CancelFunc
|
||||
deletionState deletionstate.ObjectDeletionState
|
||||
credentialProvider credentialprovider.CredentialProvider
|
||||
keyValue kvinterfaces.KeyValueService
|
||||
syncAcl syncacl.SyncAcl
|
||||
}
|
||||
|
||||
|
@ -82,7 +85,7 @@ func (d *diffSyncer) OnUpdate(headsUpdate headstorage.HeadsUpdate) {
|
|||
|
||||
func (d *diffSyncer) updateHeads(update headstorage.HeadsUpdate) {
|
||||
if update.DeletedStatus != nil {
|
||||
_ = d.diff.RemoveId(update.Id)
|
||||
_ = d.diffContainer.RemoveId(update.Id)
|
||||
} else {
|
||||
if d.deletionState.Exists(update.Id) {
|
||||
return
|
||||
|
@ -90,13 +93,22 @@ func (d *diffSyncer) updateHeads(update headstorage.HeadsUpdate) {
|
|||
if update.IsDerived != nil && *update.IsDerived && len(update.Heads) == 1 && update.Heads[0] == update.Id {
|
||||
return
|
||||
}
|
||||
d.diff.Set(ldiff.Element{
|
||||
Id: update.Id,
|
||||
Head: concatStrings(update.Heads),
|
||||
})
|
||||
if update.Id == d.keyValue.DefaultStore().Id() {
|
||||
d.diffContainer.NewDiff().Set(ldiff.Element{
|
||||
Id: update.Id,
|
||||
Head: concatStrings(update.Heads),
|
||||
})
|
||||
} else {
|
||||
d.diffContainer.Set(ldiff.Element{
|
||||
Id: update.Id,
|
||||
Head: concatStrings(update.Heads),
|
||||
})
|
||||
}
|
||||
}
|
||||
// probably we should somehow batch the updates
|
||||
err := d.storage.StateStorage().SetHash(d.ctx, d.diff.Hash())
|
||||
oldHash := d.diffContainer.OldDiff().Hash()
|
||||
newHash := d.diffContainer.NewDiff().Hash()
|
||||
err := d.storage.StateStorage().SetHash(d.ctx, oldHash, newHash)
|
||||
if err != nil {
|
||||
d.log.Warn("can't write space hash", zap.Error(err))
|
||||
}
|
||||
|
@ -117,13 +129,13 @@ func (d *diffSyncer) Sync(ctx context.Context) error {
|
|||
d.log.DebugCtx(ctx, "start diffsync", zap.Strings("peerIds", peerIds))
|
||||
for _, p := range peers {
|
||||
if err = d.syncWithPeer(peer.CtxWithPeerAddr(ctx, p.Id()), p); err != nil {
|
||||
if !errors.Is(err, &quic.IdleTimeoutError{}) && !errors.Is(err, context.DeadlineExceeded) {
|
||||
var idleTimeoutErr *quic.IdleTimeoutError
|
||||
if !errors.As(err, &idleTimeoutErr) && !errors.Is(err, context.DeadlineExceeded) {
|
||||
d.log.ErrorCtx(ctx, "can't sync with peer", zap.String("peer", p.Id()), zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
d.log.DebugCtx(ctx, "diff done", zap.String("spaceId", d.spaceId), zap.Duration("dur", time.Since(st)))
|
||||
|
||||
d.peerManager.KeepAlive(ctx)
|
||||
return nil
|
||||
}
|
||||
|
@ -150,28 +162,49 @@ func (d *diffSyncer) syncWithPeer(ctx context.Context, p peer.Peer) (err error)
|
|||
syncAclId = d.syncAcl.Id()
|
||||
newIds, changedIds, removedIds []string
|
||||
)
|
||||
|
||||
newIds, changedIds, removedIds, err = d.diff.Diff(ctx, rdiff)
|
||||
storageId := d.keyValue.DefaultStore().Id()
|
||||
needsSync, diff, err := d.diffContainer.DiffTypeCheck(ctx, rdiff)
|
||||
err = rpcerr.Unwrap(err)
|
||||
if err != nil {
|
||||
return d.onDiffError(ctx, p, cl, err)
|
||||
}
|
||||
if needsSync {
|
||||
newIds, changedIds, removedIds, err = diff.Diff(ctx, rdiff)
|
||||
err = rpcerr.Unwrap(err)
|
||||
if err != nil {
|
||||
return d.onDiffError(ctx, p, cl, err)
|
||||
}
|
||||
}
|
||||
totalLen := len(newIds) + len(changedIds) + len(removedIds)
|
||||
// not syncing ids which were removed through settings document
|
||||
missingIds := d.deletionState.Filter(newIds)
|
||||
existingIds := append(d.deletionState.Filter(removedIds), d.deletionState.Filter(changedIds)...)
|
||||
|
||||
prevExistingLen := len(existingIds)
|
||||
var (
|
||||
isStorage = false
|
||||
isAcl = false
|
||||
)
|
||||
existingIds = slice.DiscardFromSlice(existingIds, func(s string) bool {
|
||||
return s == syncAclId
|
||||
if s == storageId {
|
||||
isStorage = true
|
||||
return true
|
||||
}
|
||||
if s == syncAclId {
|
||||
isAcl = true
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
// if we removed acl head from the list
|
||||
if len(existingIds) < prevExistingLen {
|
||||
if isAcl {
|
||||
if syncErr := d.syncAcl.SyncWithPeer(ctx, p); syncErr != nil {
|
||||
log.Warn("failed to send acl sync message to peer", zap.String("aclId", syncAclId))
|
||||
}
|
||||
}
|
||||
if isStorage {
|
||||
if err = d.keyValue.SyncWithPeer(p); err != nil {
|
||||
log.Warn("failed to send storage sync message to peer", zap.String("storageId", storageId))
|
||||
}
|
||||
}
|
||||
|
||||
// treeSyncer should not get acl id, that's why we filter existing ids before
|
||||
err = d.treeSyncer.SyncAll(ctx, p, existingIds, missingIds)
|
||||
|
|
|
@ -80,6 +80,7 @@ func TestDiffSyncer(t *testing.T) {
|
|||
fx.peerManagerMock.EXPECT().
|
||||
GetResponsiblePeers(gomock.Any()).
|
||||
Return([]peer.Peer{mPeer}, nil)
|
||||
fx.diffContainerMock.EXPECT().DiffTypeCheck(gomock.Any(), gomock.Any()).Return(true, fx.diffMock, nil)
|
||||
fx.diffMock.EXPECT().
|
||||
Diff(gomock.Any(), gomock.Eq(NewRemoteDiff(fx.spaceState.SpaceId, fx.clientMock))).
|
||||
Return([]string{"new"}, []string{"changed"}, nil, nil)
|
||||
|
@ -103,6 +104,7 @@ func TestDiffSyncer(t *testing.T) {
|
|||
fx.peerManagerMock.EXPECT().
|
||||
GetResponsiblePeers(gomock.Any()).
|
||||
Return([]peer.Peer{mPeer}, nil)
|
||||
fx.diffContainerMock.EXPECT().DiffTypeCheck(gomock.Any(), gomock.Any()).Return(true, fx.diffMock, nil)
|
||||
fx.diffMock.EXPECT().
|
||||
Diff(gomock.Any(), gomock.Eq(remDiff)).
|
||||
Return([]string{"new"}, []string{"changed"}, nil, nil)
|
||||
|
@ -116,6 +118,31 @@ func TestDiffSyncer(t *testing.T) {
|
|||
require.NoError(t, fx.diffSyncer.Sync(ctx))
|
||||
})
|
||||
|
||||
t.Run("diff syncer sync, store changed", func(t *testing.T) {
|
||||
fx := newHeadSyncFixture(t)
|
||||
fx.initDiffSyncer(t)
|
||||
defer fx.stop()
|
||||
mPeer := rpctest.MockPeer{}
|
||||
remDiff := NewRemoteDiff(fx.spaceState.SpaceId, fx.clientMock)
|
||||
fx.treeSyncerMock.EXPECT().ShouldSync(gomock.Any()).Return(true)
|
||||
fx.aclMock.EXPECT().Id().AnyTimes().Return("aclId")
|
||||
fx.peerManagerMock.EXPECT().
|
||||
GetResponsiblePeers(gomock.Any()).
|
||||
Return([]peer.Peer{mPeer}, nil)
|
||||
fx.diffContainerMock.EXPECT().DiffTypeCheck(gomock.Any(), gomock.Any()).Return(true, fx.diffMock, nil)
|
||||
fx.diffMock.EXPECT().
|
||||
Diff(gomock.Any(), gomock.Eq(remDiff)).
|
||||
Return([]string{"new"}, []string{"changed"}, nil, nil)
|
||||
fx.deletionStateMock.EXPECT().Filter([]string{"new"}).Return([]string{"new"}).Times(1)
|
||||
fx.deletionStateMock.EXPECT().Filter([]string{"changed"}).Return([]string{"changed", "store"}).Times(1)
|
||||
fx.deletionStateMock.EXPECT().Filter(nil).Return(nil).Times(1)
|
||||
fx.treeSyncerMock.EXPECT().SyncAll(gomock.Any(), mPeer, []string{"changed"}, []string{"new"}).Return(nil)
|
||||
fx.kvMock.EXPECT().SyncWithPeer(mPeer).Return(nil)
|
||||
fx.peerManagerMock.EXPECT().KeepAlive(gomock.Any())
|
||||
|
||||
require.NoError(t, fx.diffSyncer.Sync(ctx))
|
||||
})
|
||||
|
||||
t.Run("diff syncer sync conf error", func(t *testing.T) {
|
||||
fx := newHeadSyncFixture(t)
|
||||
fx.initDiffSyncer(t)
|
||||
|
@ -133,9 +160,12 @@ func TestDiffSyncer(t *testing.T) {
|
|||
fx.initDiffSyncer(t)
|
||||
defer fx.stop()
|
||||
deletedId := "id"
|
||||
fx.diffMock.EXPECT().RemoveId(deletedId).Return(nil)
|
||||
fx.diffMock.EXPECT().Hash().Return("hash")
|
||||
fx.stateStorage.EXPECT().SetHash(gomock.Any(), "hash").Return(nil)
|
||||
fx.diffContainerMock.EXPECT().RemoveId(deletedId).Return(nil)
|
||||
fx.diffContainerMock.EXPECT().NewDiff().Return(fx.diffMock)
|
||||
fx.diffContainerMock.EXPECT().OldDiff().Return(fx.diffMock)
|
||||
fx.diffMock.EXPECT().Hash().AnyTimes().Return("hash")
|
||||
fx.stateStorage.EXPECT().SetHash(gomock.Any(), "hash", "hash").Return(nil)
|
||||
|
||||
upd := headstorage.DeletedStatusDeleted
|
||||
fx.diffSyncer.updateHeads(headstorage.HeadsUpdate{
|
||||
Id: "id",
|
||||
|
@ -150,11 +180,14 @@ func TestDiffSyncer(t *testing.T) {
|
|||
updatedId := "id"
|
||||
fx.diffMock.EXPECT().Hash().Return("hash")
|
||||
fx.deletionStateMock.EXPECT().Exists(updatedId).Return(false)
|
||||
fx.diffMock.EXPECT().Set(ldiff.Element{
|
||||
fx.diffContainerMock.EXPECT().Set(ldiff.Element{
|
||||
Id: updatedId,
|
||||
Head: "head",
|
||||
})
|
||||
fx.stateStorage.EXPECT().SetHash(gomock.Any(), "hash").Return(nil)
|
||||
fx.diffContainerMock.EXPECT().NewDiff().Return(fx.diffMock)
|
||||
fx.diffContainerMock.EXPECT().OldDiff().Return(fx.diffMock)
|
||||
fx.diffMock.EXPECT().Hash().AnyTimes().Return("hash")
|
||||
fx.stateStorage.EXPECT().SetHash(gomock.Any(), "hash", "hash").Return(nil)
|
||||
fx.diffSyncer.updateHeads(headstorage.HeadsUpdate{
|
||||
Id: "id",
|
||||
Heads: []string{"head"},
|
||||
|
@ -180,6 +213,7 @@ func TestDiffSyncer(t *testing.T) {
|
|||
fx.peerManagerMock.EXPECT().
|
||||
GetResponsiblePeers(gomock.Any()).
|
||||
Return([]peer.Peer{rpctest.MockPeer{}}, nil)
|
||||
fx.diffContainerMock.EXPECT().DiffTypeCheck(gomock.Any(), gomock.Any()).Return(true, fx.diffMock, nil)
|
||||
fx.diffMock.EXPECT().
|
||||
Diff(gomock.Any(), gomock.Eq(remDiff)).
|
||||
Return(nil, nil, nil, spacesyncproto.ErrSpaceMissing)
|
||||
|
@ -219,6 +253,8 @@ func TestDiffSyncer(t *testing.T) {
|
|||
fx.peerManagerMock.EXPECT().
|
||||
GetResponsiblePeers(gomock.Any()).
|
||||
Return([]peer.Peer{rpctest.MockPeer{}}, nil)
|
||||
|
||||
fx.diffContainerMock.EXPECT().DiffTypeCheck(gomock.Any(), gomock.Any()).Return(true, fx.diffMock, nil)
|
||||
fx.diffMock.EXPECT().
|
||||
Diff(gomock.Any(), gomock.Eq(remDiff)).
|
||||
Return(nil, nil, nil, spacesyncproto.ErrUnexpected)
|
||||
|
@ -232,15 +268,12 @@ func TestDiffSyncer(t *testing.T) {
|
|||
fx.initDiffSyncer(t)
|
||||
defer fx.stop()
|
||||
mPeer := rpctest.MockPeer{}
|
||||
remDiff := NewRemoteDiff(fx.spaceState.SpaceId, fx.clientMock)
|
||||
fx.treeSyncerMock.EXPECT().ShouldSync(gomock.Any()).Return(true)
|
||||
fx.aclMock.EXPECT().Id().AnyTimes().Return("aclId")
|
||||
fx.peerManagerMock.EXPECT().
|
||||
GetResponsiblePeers(gomock.Any()).
|
||||
Return([]peer.Peer{mPeer}, nil)
|
||||
fx.diffMock.EXPECT().
|
||||
Diff(gomock.Any(), gomock.Eq(remDiff)).
|
||||
Return(nil, nil, nil, spacesyncproto.ErrSpaceIsDeleted)
|
||||
fx.diffContainerMock.EXPECT().DiffTypeCheck(gomock.Any(), gomock.Any()).Return(true, fx.diffMock, spacesyncproto.ErrSpaceIsDeleted)
|
||||
fx.peerManagerMock.EXPECT().KeepAlive(gomock.Any())
|
||||
|
||||
require.NoError(t, fx.diffSyncer.Sync(ctx))
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_headstorage/mock_headstorage.go github.com/anyproto/any-sync/commonspace/headsync/headstorage HeadStorage
|
||||
//
|
||||
|
||||
// Package mock_headstorage is a generated GoMock package.
|
||||
package mock_headstorage
|
||||
|
||||
|
|
|
@ -10,11 +10,13 @@ import (
|
|||
"github.com/anyproto/any-sync/app"
|
||||
"github.com/anyproto/any-sync/app/ldiff"
|
||||
"github.com/anyproto/any-sync/app/logger"
|
||||
"github.com/anyproto/any-sync/app/olddiff"
|
||||
"github.com/anyproto/any-sync/commonspace/config"
|
||||
"github.com/anyproto/any-sync/commonspace/credentialprovider"
|
||||
"github.com/anyproto/any-sync/commonspace/deletionstate"
|
||||
"github.com/anyproto/any-sync/commonspace/headsync/headstorage"
|
||||
"github.com/anyproto/any-sync/commonspace/object/acl/syncacl"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces"
|
||||
"github.com/anyproto/any-sync/commonspace/object/treesyncer"
|
||||
"github.com/anyproto/any-sync/commonspace/peermanager"
|
||||
"github.com/anyproto/any-sync/commonspace/spacestate"
|
||||
|
@ -37,7 +39,6 @@ type TreeHeads struct {
|
|||
type HeadSync interface {
|
||||
app.ComponentRunnable
|
||||
ExternalIds() []string
|
||||
DebugAllHeads() (res []TreeHeads)
|
||||
AllIds() []string
|
||||
HandleRangeRequest(ctx context.Context, req *spacesyncproto.HeadSyncRequest) (resp *spacesyncproto.HeadSyncResponse, err error)
|
||||
}
|
||||
|
@ -49,7 +50,7 @@ type headSync struct {
|
|||
|
||||
periodicSync periodicsync.PeriodicSync
|
||||
storage spacestorage.SpaceStorage
|
||||
diff ldiff.Diff
|
||||
diffContainer ldiff.DiffContainer
|
||||
log logger.CtxLogger
|
||||
syncer DiffSyncer
|
||||
configuration nodeconf.NodeConf
|
||||
|
@ -58,6 +59,7 @@ type headSync struct {
|
|||
credentialProvider credentialprovider.CredentialProvider
|
||||
deletionState deletionstate.ObjectDeletionState
|
||||
syncAcl syncacl.SyncAcl
|
||||
keyValue kvinterfaces.KeyValueService
|
||||
}
|
||||
|
||||
func New() HeadSync {
|
||||
|
@ -75,11 +77,12 @@ func (h *headSync) Init(a *app.App) (err error) {
|
|||
h.configuration = a.MustComponent(nodeconf.CName).(nodeconf.NodeConf)
|
||||
h.log = log.With(zap.String("spaceId", h.spaceId))
|
||||
h.storage = a.MustComponent(spacestorage.CName).(spacestorage.SpaceStorage)
|
||||
h.diff = ldiff.New(32, 256)
|
||||
h.diffContainer = ldiff.NewDiffContainer(ldiff.New(32, 256), olddiff.New(32, 256))
|
||||
h.peerManager = a.MustComponent(peermanager.CName).(peermanager.PeerManager)
|
||||
h.credentialProvider = a.MustComponent(credentialprovider.CName).(credentialprovider.CredentialProvider)
|
||||
h.treeSyncer = a.MustComponent(treesyncer.CName).(treesyncer.TreeSyncer)
|
||||
h.deletionState = a.MustComponent(deletionstate.CName).(deletionstate.ObjectDeletionState)
|
||||
h.keyValue = a.MustComponent(kvinterfaces.CName).(kvinterfaces.KeyValueService)
|
||||
h.syncer = createDiffSyncer(h)
|
||||
sync := func(ctx context.Context) (err error) {
|
||||
return h.syncer.Sync(ctx)
|
||||
|
@ -103,39 +106,26 @@ func (h *headSync) Run(ctx context.Context) (err error) {
|
|||
}
|
||||
|
||||
func (h *headSync) HandleRangeRequest(ctx context.Context, req *spacesyncproto.HeadSyncRequest) (resp *spacesyncproto.HeadSyncResponse, err error) {
|
||||
resp, err = HandleRangeRequest(ctx, h.diff, req)
|
||||
if err != nil {
|
||||
return
|
||||
if req.DiffType == spacesyncproto.DiffType_V2 {
|
||||
return HandleRangeRequest(ctx, h.diffContainer.NewDiff(), req)
|
||||
} else {
|
||||
return HandleRangeRequest(ctx, h.diffContainer.OldDiff(), req)
|
||||
}
|
||||
// this is done to fix the problem with compatibility with old clients
|
||||
resp.DiffType = spacesyncproto.DiffType_Precalculated
|
||||
return
|
||||
}
|
||||
|
||||
func (h *headSync) AllIds() []string {
|
||||
return h.diff.Ids()
|
||||
return h.diffContainer.NewDiff().Ids()
|
||||
}
|
||||
|
||||
func (h *headSync) ExternalIds() []string {
|
||||
settingsId := h.storage.StateStorage().SettingsId()
|
||||
aclId := h.syncAcl.Id()
|
||||
keyValueId := h.keyValue.DefaultStore().Id()
|
||||
return slice.DiscardFromSlice(h.AllIds(), func(id string) bool {
|
||||
return id == settingsId || id == aclId
|
||||
return id == settingsId || id == aclId || id == keyValueId
|
||||
})
|
||||
}
|
||||
|
||||
func (h *headSync) DebugAllHeads() (res []TreeHeads) {
|
||||
els := h.diff.Elements()
|
||||
for _, el := range els {
|
||||
idHead := TreeHeads{
|
||||
Id: el.Id,
|
||||
Heads: splitString(el.Head),
|
||||
}
|
||||
res = append(res, idHead)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (h *headSync) Close(ctx context.Context) (err error) {
|
||||
h.syncer.Close()
|
||||
h.periodicSync.Close()
|
||||
|
@ -144,14 +134,23 @@ func (h *headSync) Close(ctx context.Context) (err error) {
|
|||
|
||||
func (h *headSync) fillDiff(ctx context.Context) error {
|
||||
var els = make([]ldiff.Element, 0, 100)
|
||||
var aclOrStorage []ldiff.Element
|
||||
err := h.storage.HeadStorage().IterateEntries(ctx, headstorage.IterOpts{}, func(entry headstorage.HeadsEntry) (bool, error) {
|
||||
if entry.IsDerived && entry.Heads[0] == entry.Id {
|
||||
return true, nil
|
||||
}
|
||||
els = append(els, ldiff.Element{
|
||||
Id: entry.Id,
|
||||
Head: concatStrings(entry.Heads),
|
||||
})
|
||||
if entry.CommonSnapshot != "" {
|
||||
els = append(els, ldiff.Element{
|
||||
Id: entry.Id,
|
||||
Head: concatStrings(entry.Heads),
|
||||
})
|
||||
} else {
|
||||
// this whole stuff is done to prevent storage hash from being set to old diff
|
||||
aclOrStorage = append(aclOrStorage, ldiff.Element{
|
||||
Id: entry.Id,
|
||||
Head: concatStrings(entry.Heads),
|
||||
})
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -162,8 +161,12 @@ func (h *headSync) fillDiff(ctx context.Context) error {
|
|||
Head: h.syncAcl.Head().Id,
|
||||
})
|
||||
log.Debug("setting acl", zap.String("aclId", h.syncAcl.Id()), zap.String("headId", h.syncAcl.Head().Id))
|
||||
h.diff.Set(els...)
|
||||
if err := h.storage.StateStorage().SetHash(ctx, h.diff.Hash()); err != nil {
|
||||
h.diffContainer.Set(els...)
|
||||
// acl will be set twice to the diff but it doesn't matter
|
||||
h.diffContainer.NewDiff().Set(aclOrStorage...)
|
||||
oldHash := h.diffContainer.OldDiff().Hash()
|
||||
newHash := h.diffContainer.NewDiff().Hash()
|
||||
if err := h.storage.StateStorage().SetHash(ctx, oldHash, newHash); err != nil {
|
||||
h.log.Error("can't write space hash", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -22,6 +22,9 @@ import (
|
|||
"github.com/anyproto/any-sync/commonspace/object/acl/list"
|
||||
"github.com/anyproto/any-sync/commonspace/object/acl/syncacl"
|
||||
"github.com/anyproto/any-sync/commonspace/object/acl/syncacl/mock_syncacl"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage/mock_keyvaluestorage"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces/mock_kvinterfaces"
|
||||
"github.com/anyproto/any-sync/commonspace/object/treemanager"
|
||||
"github.com/anyproto/any-sync/commonspace/object/treemanager/mock_treemanager"
|
||||
"github.com/anyproto/any-sync/commonspace/object/treesyncer"
|
||||
|
@ -34,6 +37,7 @@ import (
|
|||
"github.com/anyproto/any-sync/commonspace/spacesyncproto/mock_spacesyncproto"
|
||||
"github.com/anyproto/any-sync/nodeconf"
|
||||
"github.com/anyproto/any-sync/nodeconf/mock_nodeconf"
|
||||
"github.com/anyproto/any-sync/testutil/anymock"
|
||||
)
|
||||
|
||||
type mockConfig struct {
|
||||
|
@ -57,6 +61,8 @@ type headSyncFixture struct {
|
|||
app *app.App
|
||||
|
||||
configurationMock *mock_nodeconf.MockService
|
||||
kvMock *mock_kvinterfaces.MockKeyValueService
|
||||
defStoreMock *mock_keyvaluestorage.MockStorage
|
||||
storageMock *mock_spacestorage.MockSpaceStorage
|
||||
peerManagerMock *mock_peermanager.MockPeerManager
|
||||
credentialProviderMock *mock_credentialprovider.MockCredentialProvider
|
||||
|
@ -65,6 +71,7 @@ type headSyncFixture struct {
|
|||
diffSyncerMock *mock_headsync.MockDiffSyncer
|
||||
treeSyncerMock *mock_treesyncer.MockTreeSyncer
|
||||
diffMock *mock_ldiff.MockDiff
|
||||
diffContainerMock *mock_ldiff.MockDiffContainer
|
||||
clientMock *mock_spacesyncproto.MockDRPCSpaceSyncClient
|
||||
aclMock *mock_syncacl.MockSyncAcl
|
||||
headStorage *mock_headstorage.MockHeadStorage
|
||||
|
@ -91,9 +98,15 @@ func newHeadSyncFixture(t *testing.T) *headSyncFixture {
|
|||
deletionStateMock := mock_deletionstate.NewMockObjectDeletionState(ctrl)
|
||||
deletionStateMock.EXPECT().Name().AnyTimes().Return(deletionstate.CName)
|
||||
diffSyncerMock := mock_headsync.NewMockDiffSyncer(ctrl)
|
||||
diffContainerMock := mock_ldiff.NewMockDiffContainer(ctrl)
|
||||
treeSyncerMock := mock_treesyncer.NewMockTreeSyncer(ctrl)
|
||||
headStorage := mock_headstorage.NewMockHeadStorage(ctrl)
|
||||
stateStorage := mock_statestorage.NewMockStateStorage(ctrl)
|
||||
kvMock := mock_kvinterfaces.NewMockKeyValueService(ctrl)
|
||||
anymock.ExpectComp(kvMock.EXPECT(), kvinterfaces.CName)
|
||||
defStore := mock_keyvaluestorage.NewMockStorage(ctrl)
|
||||
kvMock.EXPECT().DefaultStore().Return(defStore).AnyTimes()
|
||||
defStore.EXPECT().Id().Return("store").AnyTimes()
|
||||
storageMock.EXPECT().HeadStorage().AnyTimes().Return(headStorage)
|
||||
storageMock.EXPECT().StateStorage().AnyTimes().Return(stateStorage)
|
||||
treeSyncerMock.EXPECT().Name().AnyTimes().Return(treesyncer.CName)
|
||||
|
@ -106,6 +119,7 @@ func newHeadSyncFixture(t *testing.T) *headSyncFixture {
|
|||
a := &app.App{}
|
||||
a.Register(spaceState).
|
||||
Register(aclMock).
|
||||
Register(kvMock).
|
||||
Register(mockConfig{}).
|
||||
Register(configurationMock).
|
||||
Register(storageMock).
|
||||
|
@ -119,8 +133,11 @@ func newHeadSyncFixture(t *testing.T) *headSyncFixture {
|
|||
spaceState: spaceState,
|
||||
ctrl: ctrl,
|
||||
app: a,
|
||||
kvMock: kvMock,
|
||||
defStoreMock: defStore,
|
||||
configurationMock: configurationMock,
|
||||
storageMock: storageMock,
|
||||
diffContainerMock: diffContainerMock,
|
||||
peerManagerMock: peerManagerMock,
|
||||
credentialProviderMock: credentialProviderMock,
|
||||
treeManagerMock: treeManagerMock,
|
||||
|
@ -144,7 +161,7 @@ func (fx *headSyncFixture) init(t *testing.T) {
|
|||
fx.headStorage.EXPECT().AddObserver(gomock.Any())
|
||||
err := fx.headSync.Init(fx.app)
|
||||
require.NoError(t, err)
|
||||
fx.headSync.diff = fx.diffMock
|
||||
fx.headSync.diffContainer = fx.diffContainerMock
|
||||
}
|
||||
|
||||
func (fx *headSyncFixture) stop() {
|
||||
|
@ -161,14 +178,16 @@ func TestHeadSync(t *testing.T) {
|
|||
|
||||
headEntries := []headstorage.HeadsEntry{
|
||||
{
|
||||
Id: "id1",
|
||||
Heads: []string{"h1", "h2"},
|
||||
IsDerived: false,
|
||||
Id: "id1",
|
||||
Heads: []string{"h1", "h2"},
|
||||
CommonSnapshot: "id1",
|
||||
IsDerived: false,
|
||||
},
|
||||
{
|
||||
Id: "id2",
|
||||
Heads: []string{"h3", "h4"},
|
||||
IsDerived: false,
|
||||
Id: "id2",
|
||||
Heads: []string{"h3", "h4"},
|
||||
CommonSnapshot: "id2",
|
||||
IsDerived: false,
|
||||
},
|
||||
}
|
||||
fx.headStorage.EXPECT().IterateEntries(gomock.Any(), gomock.Any(), gomock.Any()).
|
||||
|
@ -183,7 +202,7 @@ func TestHeadSync(t *testing.T) {
|
|||
fx.aclMock.EXPECT().Id().AnyTimes().Return("aclId")
|
||||
fx.aclMock.EXPECT().Head().AnyTimes().Return(&list.AclRecord{Id: "headId"})
|
||||
|
||||
fx.diffMock.EXPECT().Set(ldiff.Element{
|
||||
fx.diffContainerMock.EXPECT().Set(ldiff.Element{
|
||||
Id: "id1",
|
||||
Head: "h1h2",
|
||||
}, ldiff.Element{
|
||||
|
@ -193,8 +212,11 @@ func TestHeadSync(t *testing.T) {
|
|||
Id: "aclId",
|
||||
Head: "headId",
|
||||
})
|
||||
fx.diffMock.EXPECT().Hash().Return("hash")
|
||||
fx.stateStorage.EXPECT().SetHash(gomock.Any(), "hash").Return(nil)
|
||||
fx.diffMock.EXPECT().Set([]ldiff.Element{})
|
||||
fx.diffContainerMock.EXPECT().NewDiff().AnyTimes().Return(fx.diffMock)
|
||||
fx.diffContainerMock.EXPECT().OldDiff().AnyTimes().Return(fx.diffMock)
|
||||
fx.diffMock.EXPECT().Hash().AnyTimes().Return("hash")
|
||||
fx.stateStorage.EXPECT().SetHash(gomock.Any(), "hash", "hash").Return(nil)
|
||||
fx.diffSyncerMock.EXPECT().Sync(gomock.Any()).Return(nil)
|
||||
fx.diffSyncerMock.EXPECT().Close()
|
||||
err := fx.headSync.Run(ctx)
|
||||
|
@ -210,14 +232,16 @@ func TestHeadSync(t *testing.T) {
|
|||
|
||||
headEntries := []headstorage.HeadsEntry{
|
||||
{
|
||||
Id: "id1",
|
||||
Heads: []string{"id1"},
|
||||
IsDerived: true,
|
||||
Id: "id1",
|
||||
Heads: []string{"id1"},
|
||||
CommonSnapshot: "id1",
|
||||
IsDerived: true,
|
||||
},
|
||||
{
|
||||
Id: "id2",
|
||||
Heads: []string{"h3", "h4"},
|
||||
IsDerived: false,
|
||||
Id: "id2",
|
||||
Heads: []string{"h3", "h4"},
|
||||
CommonSnapshot: "id2",
|
||||
IsDerived: false,
|
||||
},
|
||||
}
|
||||
fx.headStorage.EXPECT().IterateEntries(gomock.Any(), gomock.Any(), gomock.Any()).
|
||||
|
@ -232,15 +256,18 @@ func TestHeadSync(t *testing.T) {
|
|||
fx.aclMock.EXPECT().Id().AnyTimes().Return("aclId")
|
||||
fx.aclMock.EXPECT().Head().AnyTimes().Return(&list.AclRecord{Id: "headId"})
|
||||
|
||||
fx.diffMock.EXPECT().Set(ldiff.Element{
|
||||
fx.diffContainerMock.EXPECT().Set(ldiff.Element{
|
||||
Id: "id2",
|
||||
Head: "h3h4",
|
||||
}, ldiff.Element{
|
||||
Id: "aclId",
|
||||
Head: "headId",
|
||||
})
|
||||
fx.diffMock.EXPECT().Hash().Return("hash")
|
||||
fx.stateStorage.EXPECT().SetHash(gomock.Any(), "hash").Return(nil)
|
||||
fx.diffMock.EXPECT().Set([]ldiff.Element{})
|
||||
fx.diffContainerMock.EXPECT().NewDiff().AnyTimes().Return(fx.diffMock)
|
||||
fx.diffContainerMock.EXPECT().OldDiff().AnyTimes().Return(fx.diffMock)
|
||||
fx.diffMock.EXPECT().Hash().AnyTimes().Return("hash")
|
||||
fx.stateStorage.EXPECT().SetHash(gomock.Any(), "hash", "hash").Return(nil)
|
||||
fx.diffSyncerMock.EXPECT().Sync(gomock.Any()).Return(nil)
|
||||
fx.diffSyncerMock.EXPECT().Close()
|
||||
err := fx.headSync.Run(ctx)
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_headsync/mock_headsync.go github.com/anyproto/any-sync/commonspace/headsync DiffSyncer
|
||||
//
|
||||
|
||||
// Package mock_headsync is a generated GoMock package.
|
||||
package mock_headsync
|
||||
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
package headsync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"math"
|
||||
|
||||
"github.com/anyproto/any-sync/app/ldiff"
|
||||
"github.com/anyproto/any-sync/commonspace/spacesyncproto"
|
||||
|
@ -12,6 +15,7 @@ type Client interface {
|
|||
}
|
||||
|
||||
type RemoteDiff interface {
|
||||
ldiff.RemoteTypeChecker
|
||||
ldiff.Remote
|
||||
}
|
||||
|
||||
|
@ -68,6 +72,39 @@ func (r *remote) Ranges(ctx context.Context, ranges []ldiff.Range, resBuf []ldif
|
|||
return
|
||||
}
|
||||
|
||||
func (r *remote) DiffTypeCheck(ctx context.Context, diffContainer ldiff.DiffContainer) (needsSync bool, diff ldiff.Diff, err error) {
|
||||
req := &spacesyncproto.HeadSyncRequest{
|
||||
SpaceId: r.spaceId,
|
||||
DiffType: spacesyncproto.DiffType_V2,
|
||||
Ranges: []*spacesyncproto.HeadSyncRange{{From: 0, To: math.MaxUint64}},
|
||||
}
|
||||
resp, err := r.client.HeadSync(ctx, req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
needsSync = true
|
||||
checkHash := func(diff ldiff.Diff) (bool, error) {
|
||||
hashB, err := hex.DecodeString(diff.Hash())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(resp.Results) != 0 && bytes.Equal(hashB, resp.Results[0].Hash) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
r.diffType = resp.DiffType
|
||||
switch resp.DiffType {
|
||||
case spacesyncproto.DiffType_V2:
|
||||
diff = diffContainer.NewDiff()
|
||||
needsSync, err = checkHash(diff)
|
||||
default:
|
||||
diff = diffContainer.OldDiff()
|
||||
needsSync, err = checkHash(diff)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func HandleRangeRequest(ctx context.Context, d ldiff.Diff, req *spacesyncproto.HeadSyncRequest) (resp *spacesyncproto.HeadSyncResponse, err error) {
|
||||
ranges := make([]ldiff.Range, 0, len(req.Ranges))
|
||||
// basically we gather data applicable for both diffs
|
||||
|
@ -104,5 +141,6 @@ func HandleRangeRequest(ctx context.Context, d ldiff.Diff, req *spacesyncproto.H
|
|||
Count: uint32(rangeRes.Count),
|
||||
})
|
||||
}
|
||||
resp.DiffType = d.DiffType()
|
||||
return
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package headsync
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
|
@ -9,46 +10,88 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/anyproto/any-sync/app/ldiff"
|
||||
"github.com/anyproto/any-sync/app/olddiff"
|
||||
"github.com/anyproto/any-sync/commonspace/spacesyncproto"
|
||||
)
|
||||
|
||||
func TestRemote(t *testing.T) {
|
||||
contLocal := ldiff.New(32, 256)
|
||||
contRemote := ldiff.New(32, 256)
|
||||
func benchmarkDifferentDiffs(t *testing.T, diffFactory func() ldiff.Diff, headLength int) {
|
||||
moduloValues := []int{1, 10, 100, 1000, 10000, 100000}
|
||||
totalElements := 100000
|
||||
|
||||
test := func(t *testing.T, ldLocal, ldRemote ldiff.Diff) {
|
||||
var (
|
||||
localEls []ldiff.Element
|
||||
remoteEls []ldiff.Element
|
||||
)
|
||||
for _, modVal := range moduloValues {
|
||||
t.Run(fmt.Sprintf("New_%d", totalElements/modVal), func(t *testing.T) {
|
||||
// Create a new diff instance for each test using the factory
|
||||
contLocal := diffFactory()
|
||||
contRemote := diffFactory()
|
||||
remClient := &mockClient{t: t, l: contRemote}
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
el := ldiff.Element{
|
||||
Id: fmt.Sprint(i),
|
||||
Head: fmt.Sprint(i),
|
||||
var (
|
||||
localEls []ldiff.Element
|
||||
remoteEls []ldiff.Element
|
||||
)
|
||||
|
||||
buf := make([]byte, headLength)
|
||||
_, _ = rand.Read(buf)
|
||||
|
||||
for i := 0; i < totalElements; i++ {
|
||||
el := ldiff.Element{
|
||||
Id: fmt.Sprint(i),
|
||||
Head: string(buf),
|
||||
}
|
||||
remoteEls = append(remoteEls, el)
|
||||
if i%modVal != 0 {
|
||||
localEls = append(localEls, el)
|
||||
}
|
||||
}
|
||||
remoteEls = append(remoteEls, el)
|
||||
if i%100 == 0 {
|
||||
localEls = append(localEls, el)
|
||||
}
|
||||
}
|
||||
ldLocal.Set(localEls...)
|
||||
ldRemote.Set(remoteEls...)
|
||||
|
||||
rd := NewRemoteDiff("1", &mockClient{l: ldRemote})
|
||||
newIds, changedIds, removedIds, err := ldLocal.Diff(context.Background(), rd)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, newIds, 99000)
|
||||
assert.Len(t, changedIds, 0)
|
||||
assert.Len(t, removedIds, 0)
|
||||
contLocal.Set(localEls...)
|
||||
remClient.l.Set(remoteEls...)
|
||||
|
||||
rd := NewRemoteDiff("1", remClient)
|
||||
newIds, changedIds, removedIds, err := contLocal.Diff(context.Background(), rd)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedNewCount := totalElements / modVal
|
||||
assert.Len(t, newIds, expectedNewCount)
|
||||
assert.Len(t, changedIds, 0)
|
||||
assert.Len(t, removedIds, 0)
|
||||
|
||||
fmt.Printf("New count %d: total bytes sent: %d, %d\n", expectedNewCount, remClient.totalInSent, remClient.totalOutSent)
|
||||
})
|
||||
}
|
||||
test(t, contLocal, contRemote)
|
||||
}
|
||||
|
||||
func TestBenchRemoteWithDifferentCounts(t *testing.T) {
|
||||
t.Run("StandardLdiff", func(t *testing.T) {
|
||||
benchmarkDifferentDiffs(t, func() ldiff.Diff {
|
||||
return ldiff.New(32, 256)
|
||||
}, 32)
|
||||
})
|
||||
//old has higher head lengths because of hashes
|
||||
t.Run("OldLdiff", func(t *testing.T) {
|
||||
benchmarkDifferentDiffs(t, func() ldiff.Diff {
|
||||
return olddiff.New(32, 256)
|
||||
}, 100)
|
||||
})
|
||||
}
|
||||
|
||||
type mockClient struct {
|
||||
l ldiff.Diff
|
||||
l ldiff.Diff
|
||||
totalInSent int
|
||||
totalOutSent int
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (m *mockClient) HeadSync(ctx context.Context, in *spacesyncproto.HeadSyncRequest) (*spacesyncproto.HeadSyncResponse, error) {
|
||||
return HandleRangeRequest(ctx, m.l, in)
|
||||
res, err := in.Marshal()
|
||||
require.NoError(m.t, err)
|
||||
m.totalInSent += len(res)
|
||||
resp, err := HandleRangeRequest(ctx, m.l, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
marsh, err := resp.Marshal()
|
||||
require.NoError(m.t, err)
|
||||
m.totalOutSent += len(marsh)
|
||||
return resp, nil
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_statestorage/mock_statestorage.go github.com/anyproto/any-sync/commonspace/headsync/statestorage StateStorage
|
||||
//
|
||||
|
||||
// Package mock_statestorage is a generated GoMock package.
|
||||
package mock_statestorage
|
||||
|
||||
|
@ -56,17 +55,17 @@ func (mr *MockStateStorageMockRecorder) GetState(arg0 any) *gomock.Call {
|
|||
}
|
||||
|
||||
// SetHash mocks base method.
|
||||
func (m *MockStateStorage) SetHash(arg0 context.Context, arg1 string) error {
|
||||
func (m *MockStateStorage) SetHash(arg0 context.Context, arg1, arg2 string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SetHash", arg0, arg1)
|
||||
ret := m.ctrl.Call(m, "SetHash", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// SetHash indicates an expected call of SetHash.
|
||||
func (mr *MockStateStorageMockRecorder) SetHash(arg0, arg1 any) *gomock.Call {
|
||||
func (mr *MockStateStorageMockRecorder) SetHash(arg0, arg1, arg2 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHash", reflect.TypeOf((*MockStateStorage)(nil).SetHash), arg0, arg1)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHash", reflect.TypeOf((*MockStateStorage)(nil).SetHash), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// SetObserver mocks base method.
|
||||
|
|
|
@ -9,7 +9,8 @@ import (
|
|||
)
|
||||
|
||||
type State struct {
|
||||
Hash string
|
||||
OldHash string
|
||||
NewHash string
|
||||
AclId string
|
||||
SettingsId string
|
||||
SpaceId string
|
||||
|
@ -17,20 +18,22 @@ type State struct {
|
|||
}
|
||||
|
||||
type Observer interface {
|
||||
OnHashChange(hash string)
|
||||
OnHashChange(oldHash, newHash string)
|
||||
}
|
||||
|
||||
type StateStorage interface {
|
||||
GetState(ctx context.Context) (State, error)
|
||||
SettingsId() string
|
||||
SetHash(ctx context.Context, hash string) error
|
||||
SetHash(ctx context.Context, oldHash, newHash string) error
|
||||
SetObserver(observer Observer)
|
||||
}
|
||||
|
||||
const (
|
||||
stateCollectionKey = "state"
|
||||
idKey = "id"
|
||||
hashKey = "h"
|
||||
oldHashKey = "oh"
|
||||
newHashKey = "nh"
|
||||
legacyHashKey = "h"
|
||||
headerKey = "e"
|
||||
aclIdKey = "a"
|
||||
settingsIdKey = "s"
|
||||
|
@ -58,10 +61,10 @@ func (s *stateStorage) SetObserver(observer Observer) {
|
|||
s.observer = observer
|
||||
}
|
||||
|
||||
func (s *stateStorage) SetHash(ctx context.Context, hash string) (err error) {
|
||||
func (s *stateStorage) SetHash(ctx context.Context, oldHash, newHash string) (err error) {
|
||||
defer func() {
|
||||
if s.observer != nil && err == nil {
|
||||
s.observer.OnHashChange(hash)
|
||||
s.observer.OnHashChange(oldHash, newHash)
|
||||
}
|
||||
}()
|
||||
tx, err := s.stateColl.WriteTx(ctx)
|
||||
|
@ -69,7 +72,8 @@ func (s *stateStorage) SetHash(ctx context.Context, hash string) (err error) {
|
|||
return err
|
||||
}
|
||||
mod := query.ModifyFunc(func(a *anyenc.Arena, v *anyenc.Value) (result *anyenc.Value, modified bool, err error) {
|
||||
v.Set(hashKey, a.NewString(hash))
|
||||
v.Set(oldHashKey, a.NewString(oldHash))
|
||||
v.Set(newHashKey, a.NewString(newHash))
|
||||
return v, true, nil
|
||||
})
|
||||
_, err = s.stateColl.UpsertId(tx.Context(), s.spaceId, mod)
|
||||
|
@ -99,13 +103,22 @@ func New(ctx context.Context, spaceId string, store anystore.DB) (StateStorage,
|
|||
return storage, nil
|
||||
}
|
||||
|
||||
func Create(ctx context.Context, state State, store anystore.DB) (StateStorage, error) {
|
||||
arena := &anyenc.Arena{}
|
||||
stateCollection, err := store.Collection(ctx, stateCollectionKey)
|
||||
func Create(ctx context.Context, state State, store anystore.DB) (st StateStorage, err error) {
|
||||
tx, err := store.WriteTx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tx, err := stateCollection.WriteTx(ctx)
|
||||
storage, err := CreateTx(tx.Context(), state, store)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return nil, err
|
||||
}
|
||||
return storage, tx.Commit()
|
||||
}
|
||||
|
||||
func CreateTx(ctx context.Context, state State, store anystore.DB) (StateStorage, error) {
|
||||
arena := &anyenc.Arena{}
|
||||
stateCollection, err := store.Collection(ctx, stateCollectionKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -115,9 +128,8 @@ func Create(ctx context.Context, state State, store anystore.DB) (StateStorage,
|
|||
doc.Set(settingsIdKey, arena.NewString(state.SettingsId))
|
||||
doc.Set(headerKey, arena.NewBinary(state.SpaceHeader))
|
||||
doc.Set(aclIdKey, arena.NewString(state.AclId))
|
||||
err = stateCollection.Insert(tx.Context(), doc)
|
||||
err = stateCollection.Insert(ctx, doc)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return nil, err
|
||||
}
|
||||
return &stateStorage{
|
||||
|
@ -126,7 +138,7 @@ func Create(ctx context.Context, state State, store anystore.DB) (StateStorage,
|
|||
settingsId: state.SettingsId,
|
||||
stateColl: stateCollection,
|
||||
arena: arena,
|
||||
}, tx.Commit()
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *stateStorage) SettingsId() string {
|
||||
|
@ -134,11 +146,21 @@ func (s *stateStorage) SettingsId() string {
|
|||
}
|
||||
|
||||
func (s *stateStorage) stateFromDoc(doc anystore.Doc) State {
|
||||
var (
|
||||
oldHash = doc.Value().GetString(oldHashKey)
|
||||
newHash = doc.Value().GetString(newHashKey)
|
||||
)
|
||||
// legacy hash is used for backward compatibility, which was due to a mistake in key names
|
||||
if oldHash == "" || newHash == "" {
|
||||
oldHash = doc.Value().GetString(legacyHashKey)
|
||||
newHash = oldHash
|
||||
}
|
||||
return State{
|
||||
SpaceId: doc.Value().GetString(idKey),
|
||||
SettingsId: doc.Value().GetString(settingsIdKey),
|
||||
AclId: doc.Value().GetString(aclIdKey),
|
||||
Hash: doc.Value().GetString(hashKey),
|
||||
OldHash: oldHash,
|
||||
NewHash: newHash,
|
||||
SpaceHeader: doc.Value().GetBytes(headerKey),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,8 +1,13 @@
|
|||
package headsync
|
||||
|
||||
import "strings"
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func concatStrings(strs []string) string {
|
||||
if len(strs) == 1 {
|
||||
return strs[0]
|
||||
}
|
||||
var (
|
||||
b strings.Builder
|
||||
totalLen int
|
||||
|
@ -17,11 +22,3 @@ func concatStrings(strs []string) string {
|
|||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func splitString(str string) (res []string) {
|
||||
const cidLen = 59
|
||||
for i := 0; i < len(str); i += cidLen {
|
||||
res = append(res, str[i:i+cidLen])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_commonspace/mock_commonspace.go github.com/anyproto/any-sync/commonspace Space
|
||||
//
|
||||
|
||||
// Package mock_commonspace is a generated GoMock package.
|
||||
package mock_commonspace
|
||||
|
||||
|
@ -18,6 +17,7 @@ import (
|
|||
aclclient "github.com/anyproto/any-sync/commonspace/acl/aclclient"
|
||||
headsync "github.com/anyproto/any-sync/commonspace/headsync"
|
||||
syncacl "github.com/anyproto/any-sync/commonspace/object/acl/syncacl"
|
||||
kvinterfaces "github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces"
|
||||
treesyncer "github.com/anyproto/any-sync/commonspace/object/treesyncer"
|
||||
objecttreebuilder "github.com/anyproto/any-sync/commonspace/objecttreebuilder"
|
||||
spacestorage "github.com/anyproto/any-sync/commonspace/spacestorage"
|
||||
|
@ -223,6 +223,20 @@ func (mr *MockSpaceMockRecorder) Init(arg0 any) *gomock.Call {
|
|||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockSpace)(nil).Init), arg0)
|
||||
}
|
||||
|
||||
// KeyValue mocks base method.
|
||||
func (m *MockSpace) KeyValue() kvinterfaces.KeyValueService {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "KeyValue")
|
||||
ret0, _ := ret[0].(kvinterfaces.KeyValueService)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// KeyValue indicates an expected call of KeyValue.
|
||||
func (mr *MockSpaceMockRecorder) KeyValue() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeyValue", reflect.TypeOf((*MockSpace)(nil).KeyValue))
|
||||
}
|
||||
|
||||
// Storage mocks base method.
|
||||
func (m *MockSpace) Storage() spacestorage.SpaceStorage {
|
||||
m.ctrl.T.Helper()
|
||||
|
|
|
@ -31,6 +31,7 @@ const (
|
|||
AclUserPermissions_Admin AclUserPermissions = 2
|
||||
AclUserPermissions_Writer AclUserPermissions = 3
|
||||
AclUserPermissions_Reader AclUserPermissions = 4
|
||||
AclUserPermissions_Guest AclUserPermissions = 5
|
||||
)
|
||||
|
||||
var AclUserPermissions_name = map[int32]string{
|
||||
|
@ -39,6 +40,7 @@ var AclUserPermissions_name = map[int32]string{
|
|||
2: "Admin",
|
||||
3: "Writer",
|
||||
4: "Reader",
|
||||
5: "Guest",
|
||||
}
|
||||
|
||||
var AclUserPermissions_value = map[string]int32{
|
||||
|
@ -47,6 +49,7 @@ var AclUserPermissions_value = map[string]int32{
|
|||
"Admin": 2,
|
||||
"Writer": 3,
|
||||
"Reader": 4,
|
||||
"Guest": 5,
|
||||
}
|
||||
|
||||
func (x AclUserPermissions) String() string {
|
||||
|
@ -1335,71 +1338,71 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptor_c8e9f754f34e929b = []byte{
|
||||
// 1014 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x6f, 0x1b, 0x45,
|
||||
0x14, 0xdf, 0xb5, 0x93, 0x38, 0x7e, 0x4e, 0x52, 0x77, 0x0a, 0xed, 0x36, 0x05, 0xcb, 0x0c, 0x6a,
|
||||
0x65, 0x55, 0xa8, 0xa9, 0x8c, 0x5a, 0xa1, 0x0a, 0x94, 0x6e, 0x93, 0x0a, 0xa7, 0xa5, 0x50, 0x4d,
|
||||
0x45, 0x41, 0x48, 0x20, 0x4d, 0x66, 0x47, 0x65, 0x61, 0xff, 0x98, 0xd9, 0xb1, 0x91, 0xbf, 0x05,
|
||||
0x37, 0xbe, 0x08, 0x57, 0xee, 0x1c, 0x38, 0xe4, 0xc8, 0x11, 0x25, 0x37, 0x6e, 0x7c, 0x03, 0x34,
|
||||
0xb3, 0xeb, 0xdd, 0x9d, 0xf5, 0xda, 0x49, 0x0e, 0x1c, 0x92, 0xec, 0xbc, 0x7f, 0xf3, 0xde, 0xef,
|
||||
0xfd, 0xe6, 0xcd, 0x04, 0x3e, 0x66, 0x71, 0x18, 0xc6, 0x51, 0x32, 0xa6, 0x8c, 0xef, 0xc5, 0xc7,
|
||||
0x3f, 0x70, 0x26, 0xf7, 0x28, 0x0b, 0xd4, 0x8f, 0xe0, 0x2c, 0x16, 0xde, 0x58, 0xc4, 0x32, 0xde,
|
||||
0xd3, 0xbf, 0x93, 0x42, 0x7a, 0x4f, 0x0b, 0x50, 0x3b, 0x17, 0xe0, 0x7f, 0x1b, 0xd0, 0x72, 0x59,
|
||||
0x40, 0xe2, 0x58, 0xa2, 0x5d, 0xd8, 0xf4, 0x3d, 0x1e, 0x49, 0x5f, 0xce, 0x1c, 0xbb, 0x6f, 0x0f,
|
||||
0xb6, 0x48, 0xbe, 0x46, 0xef, 0x40, 0x3b, 0xa4, 0x89, 0xe4, 0xe2, 0x39, 0x9f, 0x39, 0x0d, 0xad,
|
||||
0x2c, 0x04, 0xc8, 0x81, 0x96, 0x4e, 0xe5, 0xc8, 0x73, 0x9a, 0x7d, 0x7b, 0xd0, 0x26, 0xf3, 0x25,
|
||||
0xba, 0x0b, 0x5d, 0x1e, 0x31, 0x31, 0x1b, 0x4b, 0xee, 0x11, 0x4e, 0x3d, 0xe5, 0xbe, 0xa6, 0xdd,
|
||||
0x17, 0xe4, 0x6a, 0x0f, 0xe9, 0x87, 0x3c, 0x91, 0x34, 0x1c, 0x3b, 0xeb, 0x7d, 0x7b, 0xd0, 0x24,
|
||||
0x85, 0x00, 0x7d, 0x00, 0x57, 0xe7, 0xd9, 0xbc, 0xf2, 0xdf, 0x44, 0x54, 0x4e, 0x04, 0x77, 0x36,
|
||||
0x74, 0xa8, 0x45, 0x05, 0xba, 0x03, 0x3b, 0x21, 0x97, 0xd4, 0xa3, 0x92, 0xbe, 0x9c, 0x1c, 0xab,
|
||||
0x5d, 0x5b, 0xda, 0xb4, 0x22, 0x45, 0x8f, 0xc0, 0xc9, 0xf3, 0x78, 0x31, 0x57, 0x09, 0x7f, 0xaa,
|
||||
0x3c, 0x36, 0xb5, 0xc7, 0x52, 0x3d, 0x7a, 0x08, 0xd7, 0x73, 0xdd, 0x17, 0x3f, 0x47, 0x5c, 0xcc,
|
||||
0x0d, 0x9c, 0xb6, 0xf6, 0x5c, 0xa2, 0xc5, 0xf7, 0xa1, 0xeb, 0xb2, 0xc0, 0x65, 0x2c, 0x9e, 0x44,
|
||||
0xf2, 0x28, 0x9a, 0xfa, 0x92, 0xab, 0xda, 0x7d, 0xfd, 0xa5, 0x36, 0x4e, 0xc1, 0x2f, 0x04, 0xf8,
|
||||
0x77, 0x1b, 0xde, 0x2e, 0x5c, 0x08, 0xff, 0x69, 0xc2, 0x13, 0xf9, 0x2c, 0xf6, 0x23, 0x55, 0x67,
|
||||
0x6a, 0x76, 0x64, 0x76, 0xae, 0x22, 0x2d, 0xec, 0x88, 0xee, 0xfb, 0x91, 0xa7, 0x9b, 0xd8, 0x26,
|
||||
0x15, 0x29, 0xfa, 0x08, 0x6e, 0x98, 0x9e, 0x05, 0xd6, 0x4d, 0x1d, 0x78, 0x99, 0x5a, 0xb1, 0x67,
|
||||
0x8e, 0x6d, 0xd6, 0xe1, 0x7c, 0x8d, 0xff, 0xb4, 0xe1, 0xc6, 0x42, 0xfe, 0x2e, 0x63, 0x7c, 0xbc,
|
||||
0x9a, 0x75, 0x03, 0xb8, 0x22, 0x52, 0xe3, 0x4a, 0xda, 0x55, 0x71, 0x2d, 0xcf, 0x9a, 0x4b, 0x78,
|
||||
0xb6, 0x0f, 0x9d, 0x31, 0x17, 0xa1, 0x9f, 0x24, 0x7e, 0x1c, 0x25, 0x3a, 0xd9, 0x9d, 0xe1, 0xbb,
|
||||
0xf7, 0x8a, 0x53, 0xe2, 0xb2, 0xe0, 0xcb, 0x84, 0x8b, 0x97, 0x85, 0x11, 0x29, 0x7b, 0xe0, 0x43,
|
||||
0x70, 0x16, 0xaa, 0x39, 0xe4, 0x2c, 0xf0, 0x23, 0x5e, 0x97, 0xb2, 0x5d, 0x9b, 0x32, 0x7e, 0x0c,
|
||||
0xd7, 0xab, 0x34, 0x20, 0x7c, 0x1a, 0xff, 0xc8, 0x6b, 0x9a, 0x65, 0xd7, 0x35, 0x0b, 0x7f, 0x0b,
|
||||
0xd7, 0x5c, 0x16, 0x3c, 0xad, 0xd6, 0xb7, 0x0a, 0xd1, 0x3a, 0x9c, 0x1a, 0xf5, 0x38, 0xe1, 0xef,
|
||||
0xe0, 0x56, 0x91, 0x60, 0x01, 0xc6, 0xc1, 0xf7, 0x34, 0x7a, 0xc3, 0x13, 0xb4, 0x0f, 0x2d, 0x96,
|
||||
0x7e, 0x3a, 0x76, 0xbf, 0x39, 0xe8, 0x0c, 0x6f, 0x9b, 0x10, 0x2e, 0x71, 0x24, 0x73, 0x2f, 0x3c,
|
||||
0x82, 0x9d, 0xc2, 0x2c, 0x71, 0x3d, 0x0f, 0x3d, 0x84, 0x36, 0xf5, 0x3c, 0x5f, 0xea, 0xbe, 0xa4,
|
||||
0x41, 0x9d, 0xda, 0xa0, 0xae, 0xe7, 0x91, 0xc2, 0x14, 0xff, 0x66, 0xc3, 0xb6, 0xa1, 0x5c, 0x89,
|
||||
0x41, 0xa5, 0xff, 0x8d, 0xcb, 0xf6, 0xdf, 0xa0, 0x7a, 0xd3, 0xa4, 0xfa, 0x65, 0x06, 0x1e, 0x7e,
|
||||
0x50, 0x73, 0x2a, 0x0e, 0x68, 0xc4, 0x78, 0xa0, 0xb6, 0x10, 0x66, 0xf3, 0xf3, 0x35, 0x9e, 0xc1,
|
||||
0xee, 0x72, 0x78, 0xff, 0xd7, 0xca, 0xf1, 0x3f, 0xb6, 0x9e, 0x5d, 0x59, 0x01, 0xd9, 0x8e, 0x8f,
|
||||
0xa1, 0x43, 0xd3, 0x64, 0x9e, 0xf3, 0xd9, 0xbc, 0x6f, 0x3d, 0x33, 0x6a, 0x95, 0xa4, 0xa4, 0xec,
|
||||
0x52, 0x33, 0xad, 0x1b, 0x97, 0x9e, 0xd6, 0xcd, 0x73, 0xa6, 0xf5, 0x7d, 0xb8, 0x56, 0xcc, 0xe3,
|
||||
0xa0, 0xd2, 0x9b, 0x3a, 0x15, 0x9e, 0x94, 0xe7, 0x34, 0xe1, 0x61, 0x3c, 0xe5, 0xa8, 0x07, 0x90,
|
||||
0xa1, 0xe9, 0x67, 0xbc, 0xdf, 0x22, 0x25, 0x09, 0x72, 0x61, 0x5b, 0x94, 0xc1, 0xd1, 0x85, 0x74,
|
||||
0x86, 0xb7, 0x4c, 0x34, 0x0c, 0xfc, 0x88, 0xe9, 0x81, 0x6f, 0xd6, 0xb0, 0x22, 0xdd, 0x1d, 0xff,
|
||||
0xda, 0x82, 0x2b, 0x2e, 0x0b, 0x0e, 0xe2, 0x48, 0xf2, 0x48, 0xbe, 0xa6, 0xc1, 0x84, 0xa3, 0x07,
|
||||
0xb0, 0x91, 0x8e, 0x05, 0xdd, 0xed, 0x85, 0xad, 0x8c, 0xf9, 0x32, 0xb2, 0x48, 0x66, 0x8c, 0x3e,
|
||||
0x85, 0x2d, 0xbf, 0x34, 0x73, 0xb2, 0x3c, 0xdf, 0x5b, 0xe1, 0x9c, 0x1a, 0x8e, 0x2c, 0x62, 0x38,
|
||||
0xa2, 0x43, 0xe8, 0x88, 0xe2, 0x42, 0xd2, 0x6d, 0xe8, 0x0c, 0xfb, 0xb5, 0x71, 0x4a, 0x17, 0xd7,
|
||||
0xc8, 0x22, 0x65, 0x37, 0xf4, 0x4c, 0xe1, 0x56, 0xba, 0x16, 0x74, 0x5f, 0x3a, 0x43, 0xbc, 0x2a,
|
||||
0x4e, 0x6a, 0x39, 0xb2, 0x88, 0xe9, 0x8a, 0x5e, 0x41, 0x77, 0x5c, 0x39, 0x15, 0xfa, 0x39, 0x71,
|
||||
0xd1, 0x09, 0x35, 0xb2, 0xc8, 0x42, 0x00, 0x74, 0x00, 0xdb, 0xb4, 0xcc, 0x04, 0xfd, 0xf4, 0x58,
|
||||
0x86, 0x76, 0x6a, 0xa2, 0x32, 0x33, 0x7c, 0x54, 0x10, 0x93, 0x1d, 0xad, 0x73, 0xd9, 0x91, 0x96,
|
||||
0x57, 0x3e, 0x6e, 0x2f, 0x60, 0x47, 0x18, 0x77, 0x8e, 0x7e, 0xa8, 0x74, 0x86, 0xef, 0xaf, 0xc2,
|
||||
0x2a, 0x33, 0x1d, 0x59, 0xa4, 0xe2, 0x8c, 0xbe, 0x86, 0xb7, 0x68, 0x0d, 0xd7, 0xf4, 0x1b, 0xe6,
|
||||
0x9c, 0x06, 0xe4, 0x65, 0xd6, 0x46, 0x40, 0xaf, 0xe1, 0x6a, 0x15, 0xc6, 0xc4, 0x01, 0x1d, 0xf6,
|
||||
0xce, 0x85, 0x1a, 0x91, 0x8c, 0x2c, 0xb2, 0x18, 0x02, 0x7d, 0x92, 0xcf, 0x1b, 0x75, 0x69, 0x38,
|
||||
0x1d, 0x1d, 0xf1, 0x66, 0x6d, 0x44, 0x65, 0xa0, 0xa8, 0x56, 0xb2, 0x2f, 0x51, 0x2d, 0x9d, 0xb5,
|
||||
0xce, 0xd6, 0xf9, 0x95, 0xa6, 0x96, 0x25, 0xaa, 0xa5, 0x82, 0x27, 0x2d, 0x58, 0x9f, 0xaa, 0x53,
|
||||
0x88, 0x9f, 0xea, 0x67, 0xf4, 0xa1, 0xba, 0x01, 0x1e, 0x01, 0xd0, 0xfc, 0x8c, 0x66, 0xd3, 0x70,
|
||||
0xd7, 0x0c, 0x5e, 0x3e, 0xc0, 0xa4, 0x64, 0x7d, 0xf7, 0x33, 0x40, 0x8b, 0x23, 0x18, 0x6d, 0xc2,
|
||||
0xda, 0xe7, 0x71, 0xc4, 0xbb, 0x16, 0x6a, 0xc3, 0xba, 0x7e, 0x4b, 0x76, 0x6d, 0xf5, 0xe9, 0x7a,
|
||||
0xa1, 0x1f, 0x75, 0x1b, 0x08, 0x60, 0xe3, 0x2b, 0xe1, 0x4b, 0x2e, 0xba, 0x4d, 0xf5, 0xad, 0xf8,
|
||||
0xc3, 0x45, 0x77, 0xed, 0xc9, 0xfe, 0x1f, 0xa7, 0x3d, 0xfb, 0xe4, 0xb4, 0x67, 0xff, 0x7d, 0xda,
|
||||
0xb3, 0x7f, 0x39, 0xeb, 0x59, 0x27, 0x67, 0x3d, 0xeb, 0xaf, 0xb3, 0x9e, 0xf5, 0xcd, 0xed, 0x0b,
|
||||
0xfd, 0xff, 0x70, 0xbc, 0xa1, 0xff, 0x7c, 0xf8, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xac, 0x5c,
|
||||
0x8a, 0x61, 0x6f, 0x0c, 0x00, 0x00,
|
||||
// 1020 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x6f, 0xdc, 0x44,
|
||||
0x14, 0xb7, 0x77, 0x93, 0x6c, 0xf6, 0x39, 0x49, 0xdd, 0x29, 0xb4, 0x6e, 0x0a, 0xab, 0x65, 0x50,
|
||||
0xab, 0x55, 0x85, 0x9a, 0x6a, 0x51, 0x2b, 0x54, 0x81, 0x52, 0x37, 0xa9, 0xba, 0x69, 0x55, 0xa8,
|
||||
0xa6, 0xa2, 0x45, 0x48, 0x20, 0x39, 0xe3, 0x51, 0x31, 0xf8, 0xcf, 0x32, 0x9e, 0x5d, 0xb4, 0xdf,
|
||||
0x82, 0x1b, 0x5f, 0x84, 0x2b, 0x77, 0x0e, 0x1c, 0x7a, 0xe4, 0x88, 0x92, 0x1b, 0x37, 0xbe, 0x01,
|
||||
0x9a, 0xb1, 0xd7, 0xf6, 0x78, 0xbd, 0x9b, 0xe4, 0xd0, 0x43, 0x12, 0xcf, 0xfb, 0x37, 0xef, 0xfd,
|
||||
0xde, 0x6f, 0xde, 0x4c, 0xe0, 0x73, 0x9a, 0x44, 0x51, 0x12, 0xa7, 0x63, 0x8f, 0xb2, 0xbd, 0xe4,
|
||||
0xf8, 0x47, 0x46, 0xc5, 0x9e, 0x47, 0x43, 0xf9, 0xc3, 0x19, 0x4d, 0xb8, 0x3f, 0xe6, 0x89, 0x48,
|
||||
0xf6, 0xd4, 0xef, 0xb4, 0x94, 0xde, 0x51, 0x02, 0xd4, 0x2d, 0x04, 0xf8, 0xbf, 0x16, 0x74, 0x5c,
|
||||
0x1a, 0x92, 0x24, 0x11, 0x68, 0x17, 0x36, 0x03, 0x9f, 0xc5, 0x22, 0x10, 0x33, 0xc7, 0xec, 0x9b,
|
||||
0x83, 0x2d, 0x52, 0xac, 0xd1, 0x07, 0xd0, 0x8d, 0xbc, 0x54, 0x30, 0xfe, 0x8c, 0xcd, 0x9c, 0x96,
|
||||
0x52, 0x96, 0x02, 0xe4, 0x40, 0x47, 0xa5, 0x72, 0xe4, 0x3b, 0xed, 0xbe, 0x39, 0xe8, 0x92, 0xf9,
|
||||
0x12, 0xdd, 0x06, 0x9b, 0xc5, 0x94, 0xcf, 0xc6, 0x82, 0xf9, 0x84, 0x79, 0xbe, 0x74, 0x5f, 0x53,
|
||||
0xee, 0x0b, 0x72, 0xb9, 0x87, 0x08, 0x22, 0x96, 0x0a, 0x2f, 0x1a, 0x3b, 0xeb, 0x7d, 0x73, 0xd0,
|
||||
0x26, 0xa5, 0x00, 0x7d, 0x02, 0x97, 0xe7, 0xd9, 0xbc, 0x0c, 0xde, 0xc4, 0x9e, 0x98, 0x70, 0xe6,
|
||||
0x6c, 0xa8, 0x50, 0x8b, 0x0a, 0x74, 0x0b, 0x76, 0x22, 0x26, 0x3c, 0xdf, 0x13, 0xde, 0x8b, 0xc9,
|
||||
0xb1, 0xdc, 0xb5, 0xa3, 0x4c, 0x6b, 0x52, 0xf4, 0x00, 0x9c, 0x22, 0x8f, 0xe7, 0x73, 0x15, 0x0f,
|
||||
0xa6, 0xd2, 0x63, 0x53, 0x79, 0x2c, 0xd5, 0xa3, 0xfb, 0x70, 0xb5, 0xd0, 0x7d, 0xf5, 0x4b, 0xcc,
|
||||
0xf8, 0xdc, 0xc0, 0xe9, 0x2a, 0xcf, 0x25, 0x5a, 0x7c, 0x17, 0x6c, 0x97, 0x86, 0x2e, 0xa5, 0xc9,
|
||||
0x24, 0x16, 0x47, 0xf1, 0x34, 0x10, 0x4c, 0xd6, 0x1e, 0xa8, 0x2f, 0xb9, 0x71, 0x06, 0x7e, 0x29,
|
||||
0xc0, 0x7f, 0x98, 0xf0, 0x7e, 0xe9, 0x42, 0xd8, 0xcf, 0x13, 0x96, 0x8a, 0xa7, 0x49, 0x10, 0xcb,
|
||||
0x3a, 0x33, 0xb3, 0x23, 0xbd, 0x73, 0x35, 0x69, 0x69, 0x47, 0x54, 0xdf, 0x8f, 0x7c, 0xd5, 0xc4,
|
||||
0x2e, 0xa9, 0x49, 0xd1, 0x67, 0x70, 0x4d, 0xf7, 0x2c, 0xb1, 0x6e, 0xab, 0xc0, 0xcb, 0xd4, 0x92,
|
||||
0x3d, 0x73, 0x6c, 0xf3, 0x0e, 0x17, 0x6b, 0xfc, 0x97, 0x09, 0xd7, 0x16, 0xf2, 0x77, 0x29, 0x65,
|
||||
0xe3, 0xd5, 0xac, 0x1b, 0xc0, 0x25, 0x9e, 0x19, 0xd7, 0xd2, 0xae, 0x8b, 0x1b, 0x79, 0xd6, 0x5e,
|
||||
0xc2, 0xb3, 0x7d, 0xb0, 0xc6, 0x8c, 0x47, 0x41, 0x9a, 0x06, 0x49, 0x9c, 0xaa, 0x64, 0x77, 0x86,
|
||||
0x1f, 0xde, 0x29, 0x4f, 0x89, 0x4b, 0xc3, 0xaf, 0x53, 0xc6, 0x5f, 0x94, 0x46, 0xa4, 0xea, 0x81,
|
||||
0x0f, 0xc1, 0x59, 0xa8, 0xe6, 0x90, 0xd1, 0x30, 0x88, 0x59, 0x53, 0xca, 0x66, 0x63, 0xca, 0xf8,
|
||||
0x21, 0x5c, 0xad, 0xd3, 0x80, 0xb0, 0x69, 0xf2, 0x13, 0x6b, 0x68, 0x96, 0xd9, 0xd4, 0x2c, 0xfc,
|
||||
0x1d, 0x5c, 0x71, 0x69, 0xf8, 0xb8, 0x5e, 0xdf, 0x2a, 0x44, 0x9b, 0x70, 0x6a, 0x35, 0xe3, 0x84,
|
||||
0xbf, 0x87, 0x1b, 0x65, 0x82, 0x25, 0x18, 0x07, 0x3f, 0x78, 0xf1, 0x1b, 0x96, 0xa2, 0x7d, 0xe8,
|
||||
0xd0, 0xec, 0xd3, 0x31, 0xfb, 0xed, 0x81, 0x35, 0xbc, 0xa9, 0x43, 0xb8, 0xc4, 0x91, 0xcc, 0xbd,
|
||||
0xf0, 0x08, 0x76, 0x4a, 0xb3, 0xd4, 0xf5, 0x7d, 0x74, 0x1f, 0xba, 0x9e, 0xef, 0x07, 0x42, 0xf5,
|
||||
0x25, 0x0b, 0xea, 0x34, 0x06, 0x75, 0x7d, 0x9f, 0x94, 0xa6, 0xf8, 0x77, 0x13, 0xb6, 0x35, 0xe5,
|
||||
0x4a, 0x0c, 0x6a, 0xfd, 0x6f, 0x5d, 0xb4, 0xff, 0x1a, 0xd5, 0xdb, 0x3a, 0xd5, 0x2f, 0x32, 0xf0,
|
||||
0xf0, 0xbd, 0x86, 0x53, 0x71, 0xe0, 0xc5, 0x94, 0x85, 0x72, 0x0b, 0xae, 0x37, 0xbf, 0x58, 0xe3,
|
||||
0x19, 0xec, 0x2e, 0x87, 0xf7, 0x9d, 0x56, 0x8e, 0xff, 0x35, 0xd5, 0xec, 0xca, 0x0b, 0xc8, 0x77,
|
||||
0x7c, 0x08, 0x96, 0x97, 0x25, 0xf3, 0x8c, 0xcd, 0xe6, 0x7d, 0xeb, 0xe9, 0x51, 0xeb, 0x24, 0x25,
|
||||
0x55, 0x97, 0x86, 0x69, 0xdd, 0xba, 0xf0, 0xb4, 0x6e, 0x9f, 0x31, 0xad, 0xef, 0xc2, 0x95, 0x72,
|
||||
0x1e, 0x87, 0xb5, 0xde, 0x34, 0xa9, 0xf0, 0xa4, 0x3a, 0xa7, 0x09, 0x8b, 0x92, 0x29, 0x43, 0x3d,
|
||||
0x80, 0x1c, 0xcd, 0x20, 0xe7, 0xfd, 0x16, 0xa9, 0x48, 0x90, 0x0b, 0xdb, 0xbc, 0x0a, 0x8e, 0x2a,
|
||||
0xc4, 0x1a, 0xde, 0xd0, 0xd1, 0xd0, 0xf0, 0x23, 0xba, 0x07, 0xbe, 0xde, 0xc0, 0x8a, 0x6c, 0x77,
|
||||
0xfc, 0x5b, 0x07, 0x2e, 0xb9, 0x34, 0x3c, 0x48, 0x62, 0xc1, 0x62, 0xf1, 0xca, 0x0b, 0x27, 0x0c,
|
||||
0xdd, 0x83, 0x8d, 0x6c, 0x2c, 0xa8, 0x6e, 0x2f, 0x6c, 0xa5, 0xcd, 0x97, 0x91, 0x41, 0x72, 0x63,
|
||||
0xf4, 0x04, 0xb6, 0x82, 0xca, 0xcc, 0xc9, 0xf3, 0xfc, 0x68, 0x85, 0x73, 0x66, 0x38, 0x32, 0x88,
|
||||
0xe6, 0x88, 0x0e, 0xc1, 0xe2, 0xe5, 0x85, 0xa4, 0xda, 0x60, 0x0d, 0xfb, 0x8d, 0x71, 0x2a, 0x17,
|
||||
0xd7, 0xc8, 0x20, 0x55, 0x37, 0xf4, 0x54, 0xe2, 0x56, 0xb9, 0x16, 0x54, 0x5f, 0xac, 0x21, 0x5e,
|
||||
0x15, 0x27, 0xb3, 0x1c, 0x19, 0x44, 0x77, 0x45, 0x2f, 0xc1, 0x1e, 0xd7, 0x4e, 0x85, 0x7a, 0x4e,
|
||||
0x9c, 0x77, 0x42, 0x8d, 0x0c, 0xb2, 0x10, 0x00, 0x1d, 0xc0, 0xb6, 0x57, 0x65, 0x82, 0x7a, 0x7a,
|
||||
0x2c, 0x43, 0x3b, 0x33, 0x91, 0x99, 0x69, 0x3e, 0x32, 0x88, 0xce, 0x8e, 0xce, 0x99, 0xec, 0xc8,
|
||||
0xca, 0xab, 0x1e, 0xb7, 0xe7, 0xb0, 0xc3, 0xb5, 0x3b, 0x47, 0x3d, 0x54, 0xac, 0xe1, 0xc7, 0xab,
|
||||
0xb0, 0xca, 0x4d, 0x47, 0x06, 0xa9, 0x39, 0xa3, 0x6f, 0xe0, 0x3d, 0xaf, 0x81, 0x6b, 0xea, 0x0d,
|
||||
0x73, 0x46, 0x03, 0x8a, 0x32, 0x1b, 0x23, 0xa0, 0x57, 0x70, 0xb9, 0x0e, 0x63, 0xea, 0x80, 0x0a,
|
||||
0x7b, 0xeb, 0x5c, 0x8d, 0x48, 0x47, 0x06, 0x59, 0x0c, 0x81, 0xbe, 0x28, 0xe6, 0x8d, 0xbc, 0x34,
|
||||
0x1c, 0x4b, 0x45, 0xbc, 0xde, 0x18, 0x51, 0x1a, 0x48, 0xaa, 0x55, 0xec, 0x2b, 0x54, 0xcb, 0x66,
|
||||
0xad, 0xb3, 0x75, 0x76, 0xa5, 0x99, 0x65, 0x85, 0x6a, 0x99, 0xe0, 0x51, 0x07, 0xd6, 0xa7, 0xf2,
|
||||
0x14, 0xe2, 0xc7, 0xea, 0x19, 0x7d, 0x28, 0x6f, 0x80, 0x07, 0x00, 0x5e, 0x71, 0x46, 0xf3, 0x69,
|
||||
0xb8, 0xab, 0x07, 0xaf, 0x1e, 0x60, 0x52, 0xb1, 0xbe, 0xfd, 0x1a, 0xd0, 0xe2, 0x08, 0x46, 0x9b,
|
||||
0xb0, 0xf6, 0x65, 0x12, 0x33, 0xdb, 0x40, 0x5d, 0x58, 0x57, 0x6f, 0x49, 0xdb, 0x94, 0x9f, 0xae,
|
||||
0x1f, 0x05, 0xb1, 0xdd, 0x42, 0x00, 0x1b, 0xaf, 0x79, 0x20, 0x18, 0xb7, 0xdb, 0xf2, 0x5b, 0xf2,
|
||||
0x87, 0x71, 0x7b, 0x4d, 0x9a, 0x3c, 0x91, 0xc9, 0xda, 0xeb, 0x8f, 0xf6, 0xff, 0x3c, 0xe9, 0x99,
|
||||
0x6f, 0x4f, 0x7a, 0xe6, 0x3f, 0x27, 0x3d, 0xf3, 0xd7, 0xd3, 0x9e, 0xf1, 0xf6, 0xb4, 0x67, 0xfc,
|
||||
0x7d, 0xda, 0x33, 0xbe, 0xbd, 0x79, 0xae, 0x7f, 0x25, 0x8e, 0x37, 0xd4, 0x9f, 0x4f, 0xff, 0x0f,
|
||||
0x00, 0x00, 0xff, 0xff, 0xb7, 0x65, 0xd3, 0x8c, 0x7a, 0x0c, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *AclRoot) Marshal() (dAtA []byte, err error) {
|
||||
|
|
|
@ -133,4 +133,5 @@ enum AclUserPermissions {
|
|||
Admin = 2;
|
||||
Writer = 3;
|
||||
Reader = 4;
|
||||
Guest = 5;
|
||||
}
|
||||
|
|
|
@ -131,6 +131,20 @@ func (st *AclState) CurrentReadKeyId() string {
|
|||
return st.readKeyChanges[len(st.readKeyChanges)-1]
|
||||
}
|
||||
|
||||
func (st *AclState) ReadKeyForAclId(id string) (string, error) {
|
||||
recIdx, ok := st.list.indexes[id]
|
||||
if !ok {
|
||||
return "", ErrNoSuchRecord
|
||||
}
|
||||
for i := len(st.readKeyChanges) - 1; i >= 0; i-- {
|
||||
recId := st.readKeyChanges[i]
|
||||
if recIdx >= st.list.indexes[recId] {
|
||||
return recId, nil
|
||||
}
|
||||
}
|
||||
return "", ErrNoSuchRecord
|
||||
}
|
||||
|
||||
func (st *AclState) AccountKey() crypto.PrivKey {
|
||||
return st.key
|
||||
}
|
||||
|
@ -151,6 +165,13 @@ func (st *AclState) CurrentMetadataKey() (crypto.PubKey, error) {
|
|||
return curKeys.MetadataPubKey, nil
|
||||
}
|
||||
|
||||
func (st *AclState) FirstMetadataKey() (crypto.PrivKey, error) {
|
||||
if firstKey, ok := st.keys[st.id]; ok && firstKey.MetadataPrivKey != nil {
|
||||
return firstKey.MetadataPrivKey, nil
|
||||
}
|
||||
return nil, ErrNoMetadataKey
|
||||
}
|
||||
|
||||
func (st *AclState) Keys() map[string]AclKeys {
|
||||
return st.keys
|
||||
}
|
||||
|
@ -196,6 +217,10 @@ func (st *AclState) Invites() []crypto.PubKey {
|
|||
return invites
|
||||
}
|
||||
|
||||
func (st *AclState) Key() crypto.PrivKey {
|
||||
return st.key
|
||||
}
|
||||
|
||||
func (st *AclState) InviteIds() []string {
|
||||
var invites []string
|
||||
for invId := range st.inviteKeys {
|
||||
|
@ -664,6 +689,9 @@ func (st *AclState) applyRequestRemove(ch *aclrecordproto.AclAccountRequestRemov
|
|||
st.pendingRequests[mapKeyFromPubKey(record.Identity)] = record.Id
|
||||
pk := mapKeyFromPubKey(record.Identity)
|
||||
accSt, exists := st.accountStates[pk]
|
||||
if !accSt.Permissions.CanRequestRemove() {
|
||||
return ErrInsufficientPermissions
|
||||
}
|
||||
if !exists {
|
||||
return ErrNoSuchAccount
|
||||
}
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
package list
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/anyproto/any-sync/util/crypto"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -68,3 +71,43 @@ func TestAclStateIsEmpty(t *testing.T) {
|
|||
require.True(t, st.IsEmpty())
|
||||
})
|
||||
}
|
||||
|
||||
func TestAclState_FirstMetadataKey(t *testing.T) {
|
||||
t.Run("returns first metadata key successfully", func(t *testing.T) {
|
||||
privKey, _, err := crypto.GenerateEd25519Key(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
pubKey := privKey.GetPublic()
|
||||
readKey := crypto.NewAES()
|
||||
state := &AclState{
|
||||
id: "recordId",
|
||||
keys: map[string]AclKeys{
|
||||
"recordId": {
|
||||
ReadKey: readKey,
|
||||
MetadataPrivKey: privKey,
|
||||
MetadataPubKey: pubKey,
|
||||
},
|
||||
},
|
||||
}
|
||||
key, err := state.FirstMetadataKey()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, privKey, key)
|
||||
})
|
||||
t.Run("first metadata is nil", func(t *testing.T) {
|
||||
state := &AclState{
|
||||
id: "recordId",
|
||||
keys: map[string]AclKeys{
|
||||
"recordId": {
|
||||
ReadKey: crypto.NewAES(),
|
||||
},
|
||||
},
|
||||
}
|
||||
key, err := state.FirstMetadataKey()
|
||||
require.ErrorIs(t, err, ErrNoMetadataKey)
|
||||
require.Nil(t, key)
|
||||
})
|
||||
t.Run("returns error when no read key changes", func(t *testing.T) {
|
||||
state := &AclState{}
|
||||
_, err := state.FirstMetadataKey()
|
||||
require.ErrorIs(t, err, ErrNoMetadataKey)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -332,12 +332,18 @@ func (a *AclTestExecutor) Execute(cmd string) (err error) {
|
|||
getPerm := func(perm string) AclPermissions {
|
||||
var aclPerm aclrecordproto.AclUserPermissions
|
||||
switch perm {
|
||||
case "own":
|
||||
aclPerm = aclrecordproto.AclUserPermissions_Owner
|
||||
case "adm":
|
||||
aclPerm = aclrecordproto.AclUserPermissions_Admin
|
||||
case "rw":
|
||||
aclPerm = aclrecordproto.AclUserPermissions_Writer
|
||||
case "none":
|
||||
aclPerm = aclrecordproto.AclUserPermissions_None
|
||||
case "r":
|
||||
aclPerm = aclrecordproto.AclUserPermissions_Reader
|
||||
case "g":
|
||||
aclPerm = aclrecordproto.AclUserPermissions_Guest
|
||||
}
|
||||
return AclPermissions(aclPerm)
|
||||
}
|
||||
|
|
|
@ -118,6 +118,16 @@ func TestAclExecutor(t *testing.T) {
|
|||
{"p.batch::revoke:i1;revoke:i2", nil},
|
||||
{"f.join::i1", ErrNoSuchInvite},
|
||||
{"f.join::i2", ErrNoSuchInvite},
|
||||
// add stream guest user
|
||||
{"a.add::guest,g,guestm", nil},
|
||||
// guest can't request removal
|
||||
{"guest.request_remove::guest", ErrInsufficientPermissions},
|
||||
{"guest.remove::guest", ErrInsufficientPermissions},
|
||||
// can't change permission of existing guest user
|
||||
{"a.changes::guest,rw", ErrInsufficientPermissions},
|
||||
{"a.changes::guest,none", ErrInsufficientPermissions},
|
||||
// can't change permission of existing user to guest, should be only possible to create it with add
|
||||
{"a.changes::r,g", ErrInsufficientPermissions},
|
||||
}
|
||||
for _, cmd := range cmds {
|
||||
err := a.Execute(cmd.cmd)
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_list/mock_list.go github.com/anyproto/any-sync/commonspace/object/acl/list AclList,Storage
|
||||
//
|
||||
|
||||
// Package mock_list is a generated GoMock package.
|
||||
package mock_list
|
||||
|
||||
|
|
|
@ -69,6 +69,7 @@ type AclPermissions aclrecordproto.AclUserPermissions
|
|||
const (
|
||||
AclPermissionsNone = AclPermissions(aclrecordproto.AclUserPermissions_None)
|
||||
AclPermissionsReader = AclPermissions(aclrecordproto.AclUserPermissions_Reader)
|
||||
AclPermissionsGuest = AclPermissions(aclrecordproto.AclUserPermissions_Guest) // like reader, but can't request removal and can't be upgraded to another permission
|
||||
AclPermissionsWriter = AclPermissions(aclrecordproto.AclUserPermissions_Writer)
|
||||
AclPermissionsAdmin = AclPermissions(aclrecordproto.AclUserPermissions_Admin)
|
||||
AclPermissionsOwner = AclPermissions(aclrecordproto.AclUserPermissions_Owner)
|
||||
|
@ -105,3 +106,12 @@ func (p AclPermissions) CanManageAccounts() bool {
|
|||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (p AclPermissions) CanRequestRemove() bool {
|
||||
switch aclrecordproto.AclUserPermissions(p) {
|
||||
case aclrecordproto.AclUserPermissions_Guest:
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,6 +61,19 @@ type storage struct {
|
|||
}
|
||||
|
||||
func CreateStorage(ctx context.Context, root *consensusproto.RawRecordWithId, headStorage headstorage.HeadStorage, store anystore.DB) (Storage, error) {
|
||||
tx, err := store.WriteTx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storage, err := CreateStorageTx(tx.Context(), root, headStorage, store)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return nil, err
|
||||
}
|
||||
return storage, tx.Commit()
|
||||
}
|
||||
|
||||
func CreateStorageTx(ctx context.Context, root *consensusproto.RawRecordWithId, headStorage headstorage.HeadStorage, store anystore.DB) (Storage, error) {
|
||||
st := &storage{
|
||||
id: root.Id,
|
||||
store: store,
|
||||
|
@ -89,24 +102,18 @@ func CreateStorage(ctx context.Context, root *consensusproto.RawRecordWithId, he
|
|||
st.arena = &anyenc.Arena{}
|
||||
defer st.arena.Reset()
|
||||
doc := newStorageRecordValue(rec, st.arena)
|
||||
tx, err := st.store.WriteTx(ctx)
|
||||
err = st.recordsColl.Insert(ctx, doc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = st.recordsColl.Insert(tx.Context(), doc)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return nil, err
|
||||
}
|
||||
err = st.headStorage.UpdateEntryTx(tx.Context(), headstorage.HeadsUpdate{
|
||||
err = st.headStorage.UpdateEntryTx(ctx, headstorage.HeadsUpdate{
|
||||
Id: root.Id,
|
||||
Heads: []string{root.Id},
|
||||
})
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return nil, err
|
||||
}
|
||||
return st, tx.Commit()
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func NewStorage(ctx context.Context, id string, headStorage headstorage.HeadStorage, store anystore.DB) (Storage, error) {
|
||||
|
@ -199,6 +206,13 @@ func (s *storage) AddAll(ctx context.Context, records []StorageRecord) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to create write tx: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
vals := make([]*anyenc.Value, 0, len(records))
|
||||
for _, ch := range records {
|
||||
newVal := newStorageRecordValue(ch, arena)
|
||||
|
@ -206,20 +220,14 @@ func (s *storage) AddAll(ctx context.Context, records []StorageRecord) error {
|
|||
}
|
||||
err = s.recordsColl.Insert(tx.Context(), vals...)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
head := records[len(records)-1].Id
|
||||
update := headstorage.HeadsUpdate{
|
||||
Id: s.id,
|
||||
Heads: []string{head},
|
||||
}
|
||||
err = s.headStorage.UpdateEntryTx(tx.Context(), update)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
return s.headStorage.UpdateEntryTx(tx.Context(), update)
|
||||
}
|
||||
|
||||
func (s *storage) Id() string {
|
||||
|
|
|
@ -117,10 +117,34 @@ func (c *contentValidator) ValidatePermissionChange(ch *aclrecordproto.AclAccoun
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, exists := c.aclState.accountStates[mapKeyFromPubKey(chIdentity)]
|
||||
currentState, exists := c.aclState.accountStates[mapKeyFromPubKey(chIdentity)]
|
||||
if !exists {
|
||||
return ErrNoSuchAccount
|
||||
}
|
||||
|
||||
if currentState.Permissions == AclPermissionsGuest {
|
||||
// it shouldn't be possible to change permission of guest user
|
||||
// it should be only possible to remove it with AccountRemove acl change
|
||||
return ErrInsufficientPermissions
|
||||
}
|
||||
|
||||
if currentState.Permissions == AclPermissionsOwner {
|
||||
// it shouldn't be possible to change permission of owner
|
||||
return ErrInsufficientPermissions
|
||||
}
|
||||
|
||||
if ch.Permissions == aclrecordproto.AclUserPermissions_Owner {
|
||||
// not supported
|
||||
// if we are going to support owner transfer, it should be done with a separate acl change so we can't have more than 1 owner at a time
|
||||
return ErrInsufficientPermissions
|
||||
}
|
||||
|
||||
if ch.Permissions == aclrecordproto.AclUserPermissions_Guest && currentState.Permissions != AclPermissionsReader {
|
||||
// generally, it should be only possible to create guest user with AccountsAdd acl change
|
||||
// but in order to migrate the current guest users we allow to change permissions to guest from reader
|
||||
return ErrInsufficientPermissions
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package syncacl
|
||||
|
||||
import (
|
||||
"github.com/anyproto/any-sync/commonspace/spacesyncproto"
|
||||
"github.com/anyproto/any-sync/commonspace/sync/objectsync/objectmessages"
|
||||
"github.com/anyproto/any-sync/consensus/consensusproto"
|
||||
)
|
||||
|
@ -24,6 +25,10 @@ func (h *InnerHeadUpdate) MsgSize() uint64 {
|
|||
return size + uint64(len(h.head)) + uint64(len(h.root.Id)) + uint64(len(h.root.Payload))
|
||||
}
|
||||
|
||||
func (h *InnerHeadUpdate) ObjectType() spacesyncproto.ObjectType {
|
||||
return spacesyncproto.ObjectType_Acl
|
||||
}
|
||||
|
||||
func (h *InnerHeadUpdate) Prepare() error {
|
||||
logMsg := consensusproto.WrapHeadUpdate(&consensusproto.LogHeadUpdate{
|
||||
Head: h.head,
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_syncacl/mock_syncacl.go github.com/anyproto/any-sync/commonspace/object/acl/syncacl SyncClient,SyncAcl
|
||||
//
|
||||
|
||||
// Package mock_syncacl is a generated GoMock package.
|
||||
package mock_syncacl
|
||||
|
||||
|
|
246
commonspace/object/keyvalue/keyvalue.go
Normal file
246
commonspace/object/keyvalue/keyvalue.go
Normal file
|
@ -0,0 +1,246 @@
|
|||
package keyvalue
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/anyproto/protobuf/proto"
|
||||
"go.uber.org/zap"
|
||||
"storj.io/drpc"
|
||||
|
||||
"github.com/anyproto/any-sync/accountservice"
|
||||
"github.com/anyproto/any-sync/app"
|
||||
"github.com/anyproto/any-sync/app/logger"
|
||||
"github.com/anyproto/any-sync/commonspace/object/acl/list"
|
||||
"github.com/anyproto/any-sync/commonspace/object/acl/syncacl"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage/syncstorage"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces"
|
||||
"github.com/anyproto/any-sync/commonspace/spacestate"
|
||||
"github.com/anyproto/any-sync/commonspace/spacestorage"
|
||||
"github.com/anyproto/any-sync/commonspace/spacesyncproto"
|
||||
"github.com/anyproto/any-sync/commonspace/sync"
|
||||
"github.com/anyproto/any-sync/commonspace/sync/objectsync/objectmessages"
|
||||
"github.com/anyproto/any-sync/net/peer"
|
||||
"github.com/anyproto/any-sync/net/rpc/rpcerr"
|
||||
"github.com/anyproto/any-sync/util/cidutil"
|
||||
)
|
||||
|
||||
var ErrUnexpectedMessageType = errors.New("unexpected message type")
|
||||
|
||||
var log = logger.NewNamed(kvinterfaces.CName)
|
||||
|
||||
type keyValueService struct {
|
||||
storageId string
|
||||
spaceId string
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
limiter *concurrentLimiter
|
||||
defaultStore keyvaluestorage.Storage
|
||||
clientFactory spacesyncproto.ClientFactory
|
||||
}
|
||||
|
||||
func New() kvinterfaces.KeyValueService {
|
||||
return &keyValueService{}
|
||||
}
|
||||
|
||||
func (k *keyValueService) DefaultStore() keyvaluestorage.Storage {
|
||||
return k.defaultStore
|
||||
}
|
||||
|
||||
func (k *keyValueService) SyncWithPeer(p peer.Peer) (err error) {
|
||||
k.limiter.ScheduleRequest(k.ctx, p.Id(), func() {
|
||||
err = k.syncWithPeer(k.ctx, p)
|
||||
if err != nil {
|
||||
log.Error("failed to sync with peer", zap.String("peerId", p.Id()), zap.Error(err))
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *keyValueService) syncWithPeer(ctx context.Context, p peer.Peer) (err error) {
|
||||
conn, err := p.AcquireDrpcConn(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer p.ReleaseDrpcConn(conn)
|
||||
var (
|
||||
client = k.clientFactory.Client(conn)
|
||||
rdiff = NewRemoteDiff(k.spaceId, client)
|
||||
diff = k.defaultStore.InnerStorage().Diff()
|
||||
)
|
||||
newIds, changedIds, theirChangedIds, removedIds, err := diff.CompareDiff(ctx, rdiff)
|
||||
err = rpcerr.Unwrap(err)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
innerStorage := k.defaultStore.InnerStorage()
|
||||
stream, err := client.StoreElements(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer stream.CloseSend()
|
||||
err = stream.Send(&spacesyncproto.StoreKeyValue{SpaceId: k.spaceId})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, id := range append(removedIds, changedIds...) {
|
||||
kv, err := innerStorage.GetKeyPeerId(ctx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = stream.Send(kv.Proto())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, id := range append(theirChangedIds, newIds...) {
|
||||
kv := &spacesyncproto.StoreKeyValue{
|
||||
KeyPeerId: id,
|
||||
}
|
||||
err := stream.Send(kv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = stream.Send(&spacesyncproto.StoreKeyValue{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var messages []*spacesyncproto.StoreKeyValue
|
||||
for {
|
||||
msg, err := stream.Recv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if msg.KeyPeerId == "" {
|
||||
break
|
||||
}
|
||||
messages = append(messages, msg)
|
||||
}
|
||||
return k.defaultStore.SetRaw(ctx, messages...)
|
||||
}
|
||||
|
||||
func (k *keyValueService) HandleStoreDiffRequest(ctx context.Context, req *spacesyncproto.StoreDiffRequest) (resp *spacesyncproto.StoreDiffResponse, err error) {
|
||||
return HandleRangeRequest(ctx, k.defaultStore.InnerStorage().Diff(), req)
|
||||
}
|
||||
|
||||
func (k *keyValueService) HandleStoreElementsRequest(ctx context.Context, stream spacesyncproto.DRPCSpaceSync_StoreElementsStream) (err error) {
|
||||
var (
|
||||
messagesToSave []*spacesyncproto.StoreKeyValue
|
||||
messagesToSend []string
|
||||
)
|
||||
for {
|
||||
msg, err := stream.Recv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if msg.KeyPeerId == "" {
|
||||
break
|
||||
}
|
||||
if msg.Value != nil {
|
||||
messagesToSave = append(messagesToSave, msg)
|
||||
} else {
|
||||
messagesToSend = append(messagesToSend, msg.KeyPeerId)
|
||||
}
|
||||
}
|
||||
innerStorage := k.defaultStore.InnerStorage()
|
||||
isError := false
|
||||
for _, msg := range messagesToSend {
|
||||
kv, err := innerStorage.GetKeyPeerId(ctx, msg)
|
||||
if err != nil {
|
||||
log.Warn("failed to get key value", zap.String("key", msg), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
err = stream.Send(kv.Proto())
|
||||
if err != nil {
|
||||
log.Warn("failed to send key value", zap.String("key", msg), zap.Error(err))
|
||||
isError = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isError {
|
||||
err = stream.Send(&spacesyncproto.StoreKeyValue{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return k.defaultStore.SetRaw(ctx, messagesToSave...)
|
||||
}
|
||||
|
||||
func (k *keyValueService) HandleMessage(ctx context.Context, headUpdate drpc.Message) (err error) {
|
||||
update, ok := headUpdate.(*objectmessages.HeadUpdate)
|
||||
if !ok {
|
||||
return ErrUnexpectedMessageType
|
||||
}
|
||||
keyValueMsg := &spacesyncproto.StoreKeyValues{}
|
||||
err = proto.Unmarshal(update.Bytes, keyValueMsg)
|
||||
if err != nil {
|
||||
objectmessages.FreeHeadUpdate(update)
|
||||
return err
|
||||
}
|
||||
objectmessages.FreeHeadUpdate(update)
|
||||
return k.defaultStore.SetRaw(ctx, keyValueMsg.KeyValues...)
|
||||
}
|
||||
|
||||
func (k *keyValueService) Init(a *app.App) (err error) {
|
||||
k.ctx, k.cancel = context.WithCancel(context.Background())
|
||||
spaceState := a.MustComponent(spacestate.CName).(*spacestate.SpaceState)
|
||||
k.spaceId = spaceState.SpaceId
|
||||
k.clientFactory = spacesyncproto.ClientFactoryFunc(spacesyncproto.NewDRPCSpaceSyncClient)
|
||||
k.limiter = newConcurrentLimiter()
|
||||
accountService := a.MustComponent(accountservice.CName).(accountservice.Service)
|
||||
aclList := a.MustComponent(syncacl.CName).(list.AclList)
|
||||
spaceStorage := a.MustComponent(spacestorage.CName).(spacestorage.SpaceStorage)
|
||||
syncService := a.MustComponent(sync.CName).(sync.SyncService)
|
||||
k.storageId, err = storageIdFromSpace(k.spaceId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexer := a.Component(keyvaluestorage.IndexerCName).(keyvaluestorage.Indexer)
|
||||
if indexer == nil {
|
||||
indexer = keyvaluestorage.NoOpIndexer{}
|
||||
}
|
||||
syncClient := syncstorage.New(spaceState.SpaceId, syncService)
|
||||
k.defaultStore, err = keyvaluestorage.New(
|
||||
k.ctx,
|
||||
k.storageId,
|
||||
spaceStorage.AnyStore(),
|
||||
spaceStorage.HeadStorage(),
|
||||
accountService.Account(),
|
||||
syncClient,
|
||||
aclList,
|
||||
indexer)
|
||||
return
|
||||
}
|
||||
|
||||
func (k *keyValueService) Name() (name string) {
|
||||
return kvinterfaces.CName
|
||||
}
|
||||
|
||||
func (k *keyValueService) Run(ctx context.Context) (err error) {
|
||||
return k.defaultStore.Prepare()
|
||||
}
|
||||
|
||||
func (k *keyValueService) Close(ctx context.Context) (err error) {
|
||||
k.cancel()
|
||||
k.limiter.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func storageIdFromSpace(spaceId string) (storageId string, err error) {
|
||||
header := &spacesyncproto.StorageHeader{
|
||||
SpaceId: spaceId,
|
||||
StorageName: "default",
|
||||
}
|
||||
data, err := proto.Marshal(header)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
cid, err := cidutil.NewCidFromBytes(data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return cid, nil
|
||||
}
|
371
commonspace/object/keyvalue/keyvalue_test.go
Normal file
371
commonspace/object/keyvalue/keyvalue_test.go
Normal file
|
@ -0,0 +1,371 @@
|
|||
package keyvalue
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
anystore "github.com/anyproto/any-store"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/anyproto/any-sync/commonspace/object/accountdata"
|
||||
"github.com/anyproto/any-sync/commonspace/object/acl/list"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage/innerstorage"
|
||||
"github.com/anyproto/any-sync/commonspace/spacepayloads"
|
||||
"github.com/anyproto/any-sync/commonspace/spacestorage"
|
||||
"github.com/anyproto/any-sync/commonspace/spacesyncproto"
|
||||
"github.com/anyproto/any-sync/net/peer"
|
||||
"github.com/anyproto/any-sync/net/rpc/rpctest"
|
||||
"github.com/anyproto/any-sync/util/crypto"
|
||||
)
|
||||
|
||||
func TestKeyValueService(t *testing.T) {
|
||||
t.Run("different keys", func(t *testing.T) {
|
||||
fxClient, fxServer, serverPeer := prepareFixtures(t)
|
||||
fxClient.add(t, "key1", []byte("value1"))
|
||||
fxClient.add(t, "key2", []byte("value2"))
|
||||
fxServer.add(t, "key3", []byte("value3"))
|
||||
fxServer.add(t, "key4", []byte("value4"))
|
||||
err := fxClient.SyncWithPeer(serverPeer)
|
||||
require.NoError(t, err)
|
||||
fxClient.limiter.Close()
|
||||
fxClient.check(t, "key3", []byte("value3"))
|
||||
fxClient.check(t, "key4", []byte("value4"))
|
||||
fxServer.check(t, "key1", []byte("value1"))
|
||||
fxServer.check(t, "key2", []byte("value2"))
|
||||
})
|
||||
|
||||
t.Run("change same keys, different values", func(t *testing.T) {
|
||||
fxClient, fxServer, serverPeer := prepareFixtures(t)
|
||||
fxClient.add(t, "key1", []byte("value1"))
|
||||
fxServer.add(t, "key1", []byte("value2"))
|
||||
err := fxClient.SyncWithPeer(serverPeer)
|
||||
require.NoError(t, err)
|
||||
fxClient.limiter.Close()
|
||||
fxClient.check(t, "key1", []byte("value1"))
|
||||
fxClient.check(t, "key1", []byte("value2"))
|
||||
fxServer.check(t, "key1", []byte("value1"))
|
||||
fxServer.check(t, "key1", []byte("value2"))
|
||||
fxClient.add(t, "key1", []byte("value1-2"))
|
||||
fxServer.add(t, "key1", []byte("value2-2"))
|
||||
err = fxClient.SyncWithPeer(serverPeer)
|
||||
require.NoError(t, err)
|
||||
fxClient.limiter.Close()
|
||||
fxClient.check(t, "key1", []byte("value1-2"))
|
||||
fxClient.check(t, "key1", []byte("value2-2"))
|
||||
fxServer.check(t, "key1", []byte("value1-2"))
|
||||
fxServer.check(t, "key1", []byte("value2-2"))
|
||||
})
|
||||
|
||||
t.Run("random keys and values", func(t *testing.T) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
diffEntries := 100
|
||||
ovelappingEntries := 10
|
||||
fxClient, fxServer, serverPeer := prepareFixtures(t)
|
||||
numClientEntries := 5 + rand.Intn(diffEntries)
|
||||
numServerEntries := 5 + rand.Intn(diffEntries)
|
||||
allKeys := make(map[string]bool)
|
||||
for i := 0; i < numClientEntries; i++ {
|
||||
key := fmt.Sprintf("client-key-%d", i)
|
||||
value := []byte(fmt.Sprintf("client-value-%d", i))
|
||||
fxClient.add(t, key, value)
|
||||
allKeys[key] = true
|
||||
}
|
||||
for i := 0; i < numServerEntries; i++ {
|
||||
key := fmt.Sprintf("server-key-%d", i)
|
||||
value := []byte(fmt.Sprintf("server-value-%d", i))
|
||||
fxServer.add(t, key, value)
|
||||
allKeys[key] = true
|
||||
}
|
||||
numOverlappingKeys := 3 + rand.Intn(ovelappingEntries)
|
||||
for i := 0; i < numOverlappingKeys; i++ {
|
||||
key := fmt.Sprintf("overlap-key-%d", i)
|
||||
clientValue := []byte(fmt.Sprintf("client-overlap-value-%d", i))
|
||||
serverValue := []byte(fmt.Sprintf("server-overlap-value-%d", i))
|
||||
fxClient.add(t, key, clientValue)
|
||||
fxServer.add(t, key, serverValue)
|
||||
allKeys[key] = true
|
||||
}
|
||||
err := fxClient.SyncWithPeer(serverPeer)
|
||||
require.NoError(t, err)
|
||||
fxClient.limiter.Close()
|
||||
|
||||
for key := range allKeys {
|
||||
if strings.HasPrefix(key, "client-key-") {
|
||||
i, _ := strconv.Atoi(strings.TrimPrefix(key, "client-key-"))
|
||||
value := []byte(fmt.Sprintf("client-value-%d", i))
|
||||
fxClient.check(t, key, value)
|
||||
fxServer.check(t, key, value)
|
||||
}
|
||||
if strings.HasPrefix(key, "server-key-") {
|
||||
i, _ := strconv.Atoi(strings.TrimPrefix(key, "server-key-"))
|
||||
value := []byte(fmt.Sprintf("server-value-%d", i))
|
||||
fxClient.check(t, key, value)
|
||||
fxServer.check(t, key, value)
|
||||
}
|
||||
}
|
||||
for i := 0; i < numOverlappingKeys; i++ {
|
||||
key := fmt.Sprintf("overlap-key-%d", i)
|
||||
clientValue := []byte(fmt.Sprintf("client-overlap-value-%d", i))
|
||||
serverValue := []byte(fmt.Sprintf("server-overlap-value-%d", i))
|
||||
|
||||
fxClient.check(t, key, clientValue)
|
||||
fxClient.check(t, key, serverValue)
|
||||
fxServer.check(t, key, clientValue)
|
||||
fxServer.check(t, key, serverValue)
|
||||
}
|
||||
foundClientKeys := make(map[string]bool)
|
||||
foundServerKeys := make(map[string]bool)
|
||||
err = fxClient.defaultStore.Iterate(context.Background(), func(decryptor keyvaluestorage.Decryptor, key string, values []innerstorage.KeyValue) (bool, error) {
|
||||
foundClientKeys[key] = true
|
||||
return true, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = fxServer.defaultStore.Iterate(context.Background(), func(decryptor keyvaluestorage.Decryptor, key string, values []innerstorage.KeyValue) (bool, error) {
|
||||
foundServerKeys[key] = true
|
||||
return true, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, mapEqual(allKeys, foundServerKeys), "expected all client keys to be found")
|
||||
require.True(t, mapEqual(foundClientKeys, foundServerKeys), "expected all client keys to be found")
|
||||
})
|
||||
}
|
||||
|
||||
func TestKeyValueServiceIterate(t *testing.T) {
|
||||
t.Run("empty storage", func(t *testing.T) {
|
||||
fxClient, _, _ := prepareFixtures(t)
|
||||
var keys []string
|
||||
err := fxClient.defaultStore.Iterate(context.Background(), func(decryptor keyvaluestorage.Decryptor, key string, values []innerstorage.KeyValue) (bool, error) {
|
||||
keys = append(keys, key)
|
||||
return true, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, keys, "expected no keys in empty storage")
|
||||
})
|
||||
|
||||
t.Run("single key later value", func(t *testing.T) {
|
||||
fxClient, _, _ := prepareFixtures(t)
|
||||
err := fxClient.defaultStore.Set(context.Background(), "test-key", []byte("value1"))
|
||||
require.NoError(t, err)
|
||||
err = fxClient.defaultStore.Set(context.Background(), "test-key", []byte("value2"))
|
||||
require.NoError(t, err)
|
||||
var keys []string
|
||||
valueCount := 0
|
||||
err = fxClient.defaultStore.Iterate(context.Background(), func(decryptor keyvaluestorage.Decryptor, key string, values []innerstorage.KeyValue) (bool, error) {
|
||||
keys = append(keys, key)
|
||||
valueCount = len(values)
|
||||
|
||||
for _, kv := range values {
|
||||
val, err := decryptor(kv)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "value2", string(val))
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(keys), "expected one key")
|
||||
require.Equal(t, "test-key", keys[0], "expected key to be 'test-key'")
|
||||
require.Equal(t, 1, valueCount, "expected one value for key")
|
||||
})
|
||||
|
||||
t.Run("multiple keys", func(t *testing.T) {
|
||||
fxClient, _, _ := prepareFixtures(t)
|
||||
testKeys := []string{"key1", "key2", "key3"}
|
||||
for _, key := range testKeys {
|
||||
err := fxClient.defaultStore.Set(context.Background(), key, []byte("value-"+key))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
var foundKeys []string
|
||||
err := fxClient.defaultStore.Iterate(context.Background(), func(decryptor keyvaluestorage.Decryptor, key string, values []innerstorage.KeyValue) (bool, error) {
|
||||
foundKeys = append(foundKeys, key)
|
||||
require.Equal(t, 1, len(values), "Expected one value for key: "+key)
|
||||
val, err := decryptor(values[0])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "value-"+key, string(val), "Value doesn't match for key: "+key)
|
||||
|
||||
return true, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
sort.Strings(foundKeys)
|
||||
sort.Strings(testKeys)
|
||||
require.Equal(t, testKeys, foundKeys, "Expected all keys to be found")
|
||||
})
|
||||
|
||||
t.Run("early termination", func(t *testing.T) {
|
||||
fxClient, _, _ := prepareFixtures(t)
|
||||
testKeys := []string{"key1", "key2", "key3", "key4", "key5"}
|
||||
for _, key := range testKeys {
|
||||
err := fxClient.defaultStore.Set(context.Background(), key, []byte("value-"+key))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var foundKeys []string
|
||||
err := fxClient.defaultStore.Iterate(context.Background(), func(decryptor keyvaluestorage.Decryptor, key string, values []innerstorage.KeyValue) (bool, error) {
|
||||
foundKeys = append(foundKeys, key)
|
||||
return len(foundKeys) < 2, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(foundKeys), "expected to find exactly 2 keys before stopping")
|
||||
})
|
||||
|
||||
t.Run("error during iteration", func(t *testing.T) {
|
||||
fxClient, _, _ := prepareFixtures(t)
|
||||
|
||||
err := fxClient.defaultStore.Set(context.Background(), "test-key", []byte("test-value"))
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedErr := context.Canceled
|
||||
err = fxClient.defaultStore.Iterate(context.Background(), func(decryptor keyvaluestorage.Decryptor, key string, values []innerstorage.KeyValue) (bool, error) {
|
||||
return false, expectedErr
|
||||
})
|
||||
require.Equal(t, expectedErr, err, "expected error to be propagated")
|
||||
})
|
||||
}
|
||||
|
||||
func prepareFixtures(t *testing.T) (fxClient *fixture, fxServer *fixture, serverPeer peer.Peer) {
|
||||
firstKeys, err := accountdata.NewRandom()
|
||||
require.NoError(t, err)
|
||||
secondKeys, err := accountdata.NewRandom()
|
||||
require.NoError(t, err)
|
||||
secondKeys.SignKey = firstKeys.SignKey
|
||||
payload := newStorageCreatePayload(t, firstKeys)
|
||||
fxClient = newFixture(t, firstKeys, payload)
|
||||
fxServer = newFixture(t, secondKeys, payload)
|
||||
serverConn, clientConn := rpctest.MultiConnPair(firstKeys.PeerId, secondKeys.PeerId)
|
||||
serverPeer, err = peer.NewPeer(serverConn, fxClient.server)
|
||||
require.NoError(t, err)
|
||||
_, err = peer.NewPeer(clientConn, fxServer.server)
|
||||
require.NoError(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
func mapEqual[K comparable, V comparable](map1, map2 map[K]V) bool {
|
||||
if len(map1) != len(map2) {
|
||||
return false
|
||||
}
|
||||
for key, val1 := range map1 {
|
||||
if val2, ok := map2[key]; !ok || val1 != val2 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
type noOpSyncClient struct{}
|
||||
|
||||
func (n noOpSyncClient) Broadcast(ctx context.Context, objectId string, keyValues ...innerstorage.KeyValue) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type fixture struct {
|
||||
*keyValueService
|
||||
server *rpctest.TestServer
|
||||
}
|
||||
|
||||
func newFixture(t *testing.T, keys *accountdata.AccountKeys, spacePayload spacestorage.SpaceStorageCreatePayload) *fixture {
|
||||
storePath := filepath.Join(t.TempDir(), "store.db")
|
||||
anyStore, err := anystore.Open(ctx, storePath, nil)
|
||||
require.NoError(t, err)
|
||||
storage, err := spacestorage.Create(ctx, anyStore, spacePayload)
|
||||
require.NoError(t, err)
|
||||
aclStorage, err := storage.AclStorage()
|
||||
require.NoError(t, err)
|
||||
aclList, err := list.BuildAclListWithIdentity(keys, aclStorage, list.NoOpAcceptorVerifier{})
|
||||
require.NoError(t, err)
|
||||
storageId := "kv.storage"
|
||||
rpcHandler := rpctest.NewTestServer()
|
||||
defaultStorage, err := keyvaluestorage.New(ctx,
|
||||
storageId,
|
||||
anyStore,
|
||||
storage.HeadStorage(),
|
||||
keys,
|
||||
noOpSyncClient{},
|
||||
aclList,
|
||||
keyvaluestorage.NoOpIndexer{})
|
||||
require.NoError(t, err)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
service := &keyValueService{
|
||||
spaceId: storage.Id(),
|
||||
storageId: storageId,
|
||||
limiter: newConcurrentLimiter(),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
clientFactory: spacesyncproto.ClientFactoryFunc(spacesyncproto.NewDRPCSpaceSyncClient),
|
||||
defaultStore: defaultStorage,
|
||||
}
|
||||
require.NoError(t, spacesyncproto.DRPCRegisterSpaceSync(rpcHandler, &testServer{service: service, t: t}))
|
||||
return &fixture{
|
||||
keyValueService: service,
|
||||
server: rpcHandler,
|
||||
}
|
||||
}
|
||||
|
||||
func (fx *fixture) add(t *testing.T, key string, value []byte) {
|
||||
err := fx.defaultStore.Set(ctx, key, value)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (fx *fixture) check(t *testing.T, key string, value []byte) (isFound bool) {
|
||||
err := fx.defaultStore.GetAll(ctx, key, func(decryptor keyvaluestorage.Decryptor, values []innerstorage.KeyValue) error {
|
||||
for _, v := range values {
|
||||
decryptedValue, err := decryptor(v)
|
||||
require.NoError(t, err)
|
||||
if bytes.Equal(value, decryptedValue) {
|
||||
isFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
func newStorageCreatePayload(t *testing.T, keys *accountdata.AccountKeys) spacestorage.SpaceStorageCreatePayload {
|
||||
masterKey, _, err := crypto.GenerateRandomEd25519KeyPair()
|
||||
require.NoError(t, err)
|
||||
metaKey, _, err := crypto.GenerateRandomEd25519KeyPair()
|
||||
require.NoError(t, err)
|
||||
readKey := crypto.NewAES()
|
||||
meta := []byte("account")
|
||||
payload := spacepayloads.SpaceCreatePayload{
|
||||
SigningKey: keys.SignKey,
|
||||
SpaceType: "space",
|
||||
ReplicationKey: 10,
|
||||
SpacePayload: nil,
|
||||
MasterKey: masterKey,
|
||||
ReadKey: readKey,
|
||||
MetadataKey: metaKey,
|
||||
Metadata: meta,
|
||||
}
|
||||
createSpace, err := spacepayloads.StoragePayloadForSpaceCreate(payload)
|
||||
require.NoError(t, err)
|
||||
return createSpace
|
||||
}
|
||||
|
||||
type testServer struct {
|
||||
spacesyncproto.DRPCSpaceSyncUnimplementedServer
|
||||
service *keyValueService
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (t *testServer) StoreDiff(ctx context.Context, req *spacesyncproto.StoreDiffRequest) (*spacesyncproto.StoreDiffResponse, error) {
|
||||
return t.service.HandleStoreDiffRequest(ctx, req)
|
||||
}
|
||||
|
||||
func (t *testServer) StoreElements(stream spacesyncproto.DRPCSpaceSync_StoreElementsStream) error {
|
||||
msg, err := stream.Recv()
|
||||
require.NoError(t.t, err)
|
||||
require.NotEmpty(t.t, msg.SpaceId)
|
||||
return t.service.HandleStoreElementsRequest(ctx, stream)
|
||||
}
|
|
@ -0,0 +1,92 @@
|
|||
package innerstorage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/anyproto/any-store/anyenc"
|
||||
|
||||
"github.com/anyproto/any-sync/commonspace/spacesyncproto"
|
||||
"github.com/anyproto/any-sync/util/crypto"
|
||||
)
|
||||
|
||||
var ErrInvalidSignature = errors.New("invalid signature")
|
||||
|
||||
type KeyValue struct {
|
||||
KeyPeerId string
|
||||
ReadKeyId string
|
||||
Key string
|
||||
Value Value
|
||||
TimestampMilli int
|
||||
Identity string
|
||||
PeerId string
|
||||
AclId string
|
||||
}
|
||||
|
||||
type Value struct {
|
||||
Value []byte
|
||||
PeerSignature []byte
|
||||
IdentitySignature []byte
|
||||
}
|
||||
|
||||
func KeyValueFromProto(proto *spacesyncproto.StoreKeyValue, verify bool) (kv KeyValue, err error) {
|
||||
kv.KeyPeerId = proto.KeyPeerId
|
||||
kv.Value.Value = proto.Value
|
||||
kv.Value.PeerSignature = proto.PeerSignature
|
||||
kv.Value.IdentitySignature = proto.IdentitySignature
|
||||
innerValue := &spacesyncproto.StoreKeyInner{}
|
||||
if err = innerValue.Unmarshal(proto.Value); err != nil {
|
||||
return kv, err
|
||||
}
|
||||
kv.TimestampMilli = int(innerValue.TimestampMicro)
|
||||
identity, err := crypto.UnmarshalEd25519PublicKeyProto(innerValue.Identity)
|
||||
if err != nil {
|
||||
return kv, err
|
||||
}
|
||||
peerId, err := crypto.UnmarshalEd25519PublicKeyProto(innerValue.Peer)
|
||||
if err != nil {
|
||||
return kv, err
|
||||
}
|
||||
kv.Identity = identity.Account()
|
||||
kv.PeerId = peerId.PeerId()
|
||||
kv.Key = innerValue.Key
|
||||
kv.AclId = innerValue.AclHeadId
|
||||
// TODO: check that key-peerId is equal to key+peerId?
|
||||
if verify {
|
||||
if verify, _ = identity.Verify(proto.Value, proto.IdentitySignature); !verify {
|
||||
return kv, ErrInvalidSignature
|
||||
}
|
||||
if verify, _ = peerId.Verify(proto.Value, proto.PeerSignature); !verify {
|
||||
return kv, ErrInvalidSignature
|
||||
}
|
||||
}
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
func (v Value) AnyEnc(a *anyenc.Arena) *anyenc.Value {
|
||||
obj := a.NewObject()
|
||||
obj.Set("v", a.NewBinary(v.Value))
|
||||
obj.Set("p", a.NewBinary(v.PeerSignature))
|
||||
obj.Set("i", a.NewBinary(v.IdentitySignature))
|
||||
return obj
|
||||
}
|
||||
|
||||
func (kv KeyValue) AnyEnc(a *anyenc.Arena) *anyenc.Value {
|
||||
obj := a.NewObject()
|
||||
obj.Set("id", a.NewString(kv.KeyPeerId))
|
||||
obj.Set("k", a.NewString(kv.Key))
|
||||
obj.Set("r", a.NewString(kv.ReadKeyId))
|
||||
obj.Set("v", kv.Value.AnyEnc(a))
|
||||
obj.Set("t", a.NewNumberInt(kv.TimestampMilli))
|
||||
obj.Set("i", a.NewString(kv.Identity))
|
||||
obj.Set("p", a.NewString(kv.PeerId))
|
||||
return obj
|
||||
}
|
||||
|
||||
func (kv KeyValue) Proto() *spacesyncproto.StoreKeyValue {
|
||||
return &spacesyncproto.StoreKeyValue{
|
||||
KeyPeerId: kv.KeyPeerId,
|
||||
Value: kv.Value.Value,
|
||||
PeerSignature: kv.Value.PeerSignature,
|
||||
IdentitySignature: kv.Value.IdentitySignature,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,249 @@
|
|||
package innerstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
anystore "github.com/anyproto/any-store"
|
||||
"github.com/anyproto/any-store/anyenc"
|
||||
"github.com/anyproto/any-store/query"
|
||||
|
||||
"github.com/anyproto/any-sync/app/ldiff"
|
||||
"github.com/anyproto/any-sync/commonspace/headsync/headstorage"
|
||||
)
|
||||
|
||||
var (
|
||||
parserPool = &anyenc.ParserPool{}
|
||||
arenaPool = &anyenc.ArenaPool{}
|
||||
)
|
||||
|
||||
type KeyValueStorage interface {
|
||||
Set(ctx context.Context, keyValues ...KeyValue) (err error)
|
||||
Diff() ldiff.CompareDiff
|
||||
GetKeyPeerId(ctx context.Context, keyPeerId string) (keyValue KeyValue, err error)
|
||||
IterateValues(context.Context, func(kv KeyValue) (bool, error)) (err error)
|
||||
IteratePrefix(context.Context, string, func(kv KeyValue) error) (err error)
|
||||
}
|
||||
|
||||
type storage struct {
|
||||
diff ldiff.CompareDiff
|
||||
headStorage headstorage.HeadStorage
|
||||
collection anystore.Collection
|
||||
store anystore.DB
|
||||
storageName string
|
||||
}
|
||||
|
||||
func New(ctx context.Context, storageName string, headStorage headstorage.HeadStorage, store anystore.DB) (kv KeyValueStorage, err error) {
|
||||
collection, err := store.Collection(ctx, storageName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tx, err := store.WriteTx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
storage := &storage{
|
||||
storageName: storageName,
|
||||
headStorage: headStorage,
|
||||
collection: collection,
|
||||
store: store,
|
||||
diff: ldiff.New(32, 256).(ldiff.CompareDiff),
|
||||
}
|
||||
iter, err := storage.collection.Find(nil).Iter(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
_ = iter.Close()
|
||||
}()
|
||||
var (
|
||||
doc anystore.Doc
|
||||
elements []ldiff.Element
|
||||
)
|
||||
for iter.Next() {
|
||||
if doc, err = iter.Doc(); err != nil {
|
||||
return
|
||||
}
|
||||
elements = append(elements, anyEncToElement(doc.Value()))
|
||||
}
|
||||
storage.diff.Set(elements...)
|
||||
hash := storage.diff.Hash()
|
||||
err = headStorage.UpdateEntryTx(tx.Context(), headstorage.HeadsUpdate{
|
||||
Id: storageName,
|
||||
Heads: []string{hash},
|
||||
})
|
||||
return storage, err
|
||||
}
|
||||
|
||||
func (s *storage) Diff() ldiff.CompareDiff {
|
||||
return s.diff
|
||||
}
|
||||
|
||||
func (s *storage) GetKeyPeerId(ctx context.Context, keyPeerId string) (value KeyValue, err error) {
|
||||
doc, err := s.collection.FindId(ctx, keyPeerId)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return s.keyValueFromDoc(doc), nil
|
||||
}
|
||||
|
||||
func (s *storage) IterateValues(ctx context.Context, iterFunc func(kv KeyValue) (bool, error)) (err error) {
|
||||
iter, err := s.collection.Find(nil).Iter(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
_ = iter.Close()
|
||||
}()
|
||||
var doc anystore.Doc
|
||||
for iter.Next() {
|
||||
if doc, err = iter.Doc(); err != nil {
|
||||
return
|
||||
}
|
||||
continueIteration, err := iterFunc(s.keyValueFromDoc(doc))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !continueIteration {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *storage) IteratePrefix(ctx context.Context, prefix string, iterFunc func(kv KeyValue) error) (err error) {
|
||||
filter := query.Key{Path: []string{"id"}, Filter: query.NewComp(query.CompOpGte, prefix)}
|
||||
qry := s.collection.Find(filter).Sort("id")
|
||||
iter, err := qry.Iter(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
_ = iter.Close()
|
||||
}()
|
||||
var doc anystore.Doc
|
||||
for iter.Next() {
|
||||
if doc, err = iter.Doc(); err != nil {
|
||||
return
|
||||
}
|
||||
if !strings.Contains(doc.Value().GetString("id"), prefix) {
|
||||
break
|
||||
}
|
||||
err := iterFunc(s.keyValueFromDoc(doc))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *storage) keyValueFromDoc(doc anystore.Doc) KeyValue {
|
||||
valueObj := doc.Value().GetObject("v")
|
||||
value := Value{
|
||||
Value: valueObj.Get("v").GetBytes(),
|
||||
PeerSignature: valueObj.Get("p").GetBytes(),
|
||||
IdentitySignature: valueObj.Get("i").GetBytes(),
|
||||
}
|
||||
return KeyValue{
|
||||
KeyPeerId: doc.Value().GetString("id"),
|
||||
ReadKeyId: doc.Value().GetString("r"),
|
||||
Value: value,
|
||||
TimestampMilli: doc.Value().GetInt("t"),
|
||||
Identity: doc.Value().GetString("i"),
|
||||
PeerId: doc.Value().GetString("p"),
|
||||
Key: doc.Value().GetString("k"),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *storage) init(ctx context.Context) (err error) {
|
||||
s.diff = ldiff.New(32, 256).(ldiff.CompareDiff)
|
||||
iter, err := s.collection.Find(nil).Iter(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
_ = iter.Close()
|
||||
}()
|
||||
var doc anystore.Doc
|
||||
var elements []ldiff.Element
|
||||
for iter.Next() {
|
||||
if doc, err = iter.Doc(); err != nil {
|
||||
return
|
||||
}
|
||||
elements = append(elements, anyEncToElement(doc.Value()))
|
||||
}
|
||||
s.diff.Set(elements...)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *storage) Set(ctx context.Context, values ...KeyValue) (err error) {
|
||||
tx, err := s.collection.WriteTx(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
ctx = tx.Context()
|
||||
elements, err := s.updateValues(ctx, values...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
s.diff.Set(elements...)
|
||||
err = s.headStorage.UpdateEntryTx(ctx, headstorage.HeadsUpdate{
|
||||
Id: s.storageName,
|
||||
Heads: []string{s.diff.Hash()},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (s *storage) updateValues(ctx context.Context, values ...KeyValue) (elements []ldiff.Element, err error) {
|
||||
parser := parserPool.Get()
|
||||
defer parserPool.Put(parser)
|
||||
arena := arenaPool.Get()
|
||||
defer arenaPool.Put(arena)
|
||||
|
||||
elements = make([]ldiff.Element, 0, len(values))
|
||||
var doc anystore.Doc
|
||||
for _, value := range values {
|
||||
doc, err = s.collection.FindIdWithParser(ctx, parser, value.KeyPeerId)
|
||||
isNotFound := errors.Is(err, anystore.ErrDocNotFound)
|
||||
if err != nil && !isNotFound {
|
||||
return
|
||||
}
|
||||
if !isNotFound {
|
||||
if doc.Value().GetInt("t") >= value.TimestampMilli {
|
||||
continue
|
||||
}
|
||||
}
|
||||
arena.Reset()
|
||||
val := value.AnyEnc(arena)
|
||||
if err = s.collection.UpsertOne(ctx, val); err != nil {
|
||||
return
|
||||
}
|
||||
elements = append(elements, anyEncToElement(val))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func anyEncToElement(val *anyenc.Value) ldiff.Element {
|
||||
byteRepr := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(byteRepr, uint64(val.GetInt("t")))
|
||||
return ldiff.Element{
|
||||
Id: val.GetString("id"),
|
||||
Head: string(byteRepr),
|
||||
}
|
||||
}
|
|
@ -0,0 +1,144 @@
|
|||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage (interfaces: Storage)
|
||||
//
|
||||
// Generated by this command:
|
||||
//
|
||||
// mockgen -destination mock_keyvaluestorage/mock_keyvaluestorage.go github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage Storage
|
||||
//
|
||||
// Package mock_keyvaluestorage is a generated GoMock package.
|
||||
package mock_keyvaluestorage
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
|
||||
innerstorage "github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage/innerstorage"
|
||||
spacesyncproto "github.com/anyproto/any-sync/commonspace/spacesyncproto"
|
||||
gomock "go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
// MockStorage is a mock of Storage interface.
|
||||
type MockStorage struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockStorageMockRecorder
|
||||
}
|
||||
|
||||
// MockStorageMockRecorder is the mock recorder for MockStorage.
|
||||
type MockStorageMockRecorder struct {
|
||||
mock *MockStorage
|
||||
}
|
||||
|
||||
// NewMockStorage creates a new mock instance.
|
||||
func NewMockStorage(ctrl *gomock.Controller) *MockStorage {
|
||||
mock := &MockStorage{ctrl: ctrl}
|
||||
mock.recorder = &MockStorageMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockStorage) EXPECT() *MockStorageMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// GetAll mocks base method.
|
||||
func (m *MockStorage) GetAll(arg0 context.Context, arg1 string, arg2 func(func(innerstorage.KeyValue) ([]byte, error), []innerstorage.KeyValue) error) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetAll", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetAll indicates an expected call of GetAll.
|
||||
func (mr *MockStorageMockRecorder) GetAll(arg0, arg1, arg2 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAll", reflect.TypeOf((*MockStorage)(nil).GetAll), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// Id mocks base method.
|
||||
func (m *MockStorage) Id() string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Id")
|
||||
ret0, _ := ret[0].(string)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Id indicates an expected call of Id.
|
||||
func (mr *MockStorageMockRecorder) Id() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Id", reflect.TypeOf((*MockStorage)(nil).Id))
|
||||
}
|
||||
|
||||
// InnerStorage mocks base method.
|
||||
func (m *MockStorage) InnerStorage() innerstorage.KeyValueStorage {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "InnerStorage")
|
||||
ret0, _ := ret[0].(innerstorage.KeyValueStorage)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// InnerStorage indicates an expected call of InnerStorage.
|
||||
func (mr *MockStorageMockRecorder) InnerStorage() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InnerStorage", reflect.TypeOf((*MockStorage)(nil).InnerStorage))
|
||||
}
|
||||
|
||||
// Iterate mocks base method.
|
||||
func (m *MockStorage) Iterate(arg0 context.Context, arg1 func(func(innerstorage.KeyValue) ([]byte, error), string, []innerstorage.KeyValue) (bool, error)) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Iterate", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Iterate indicates an expected call of Iterate.
|
||||
func (mr *MockStorageMockRecorder) Iterate(arg0, arg1 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterate", reflect.TypeOf((*MockStorage)(nil).Iterate), arg0, arg1)
|
||||
}
|
||||
|
||||
// Prepare mocks base method.
|
||||
func (m *MockStorage) Prepare() error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Prepare")
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Prepare indicates an expected call of Prepare.
|
||||
func (mr *MockStorageMockRecorder) Prepare() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Prepare", reflect.TypeOf((*MockStorage)(nil).Prepare))
|
||||
}
|
||||
|
||||
// Set mocks base method.
|
||||
func (m *MockStorage) Set(arg0 context.Context, arg1 string, arg2 []byte) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Set", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Set indicates an expected call of Set.
|
||||
func (mr *MockStorageMockRecorder) Set(arg0, arg1, arg2 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockStorage)(nil).Set), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// SetRaw mocks base method.
|
||||
func (m *MockStorage) SetRaw(arg0 context.Context, arg1 ...*spacesyncproto.StoreKeyValue) error {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []any{arg0}
|
||||
for _, a := range arg1 {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
ret := m.ctrl.Call(m, "SetRaw", varargs...)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// SetRaw indicates an expected call of SetRaw.
|
||||
func (mr *MockStorageMockRecorder) SetRaw(arg0 any, arg1 ...any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]any{arg0}, arg1...)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetRaw", reflect.TypeOf((*MockStorage)(nil).SetRaw), varargs...)
|
||||
}
|
368
commonspace/object/keyvalue/keyvaluestorage/storage.go
Normal file
368
commonspace/object/keyvalue/keyvaluestorage/storage.go
Normal file
|
@ -0,0 +1,368 @@
|
|||
//go:generate mockgen -destination mock_keyvaluestorage/mock_keyvaluestorage.go github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage Storage
|
||||
package keyvaluestorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
anystore "github.com/anyproto/any-store"
|
||||
"github.com/anyproto/protobuf/proto"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/anyproto/any-sync/app"
|
||||
"github.com/anyproto/any-sync/app/logger"
|
||||
"github.com/anyproto/any-sync/commonspace/headsync/headstorage"
|
||||
"github.com/anyproto/any-sync/commonspace/object/accountdata"
|
||||
"github.com/anyproto/any-sync/commonspace/object/acl/list"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage/innerstorage"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage/syncstorage"
|
||||
"github.com/anyproto/any-sync/commonspace/spacesyncproto"
|
||||
"github.com/anyproto/any-sync/util/crypto"
|
||||
"github.com/anyproto/any-sync/util/slice"
|
||||
)
|
||||
|
||||
var log = logger.NewNamed("common.keyvalue.keyvaluestorage")
|
||||
|
||||
const IndexerCName = "common.keyvalue.indexer"
|
||||
|
||||
type Indexer interface {
|
||||
app.Component
|
||||
Index(decryptor Decryptor, keyValue ...innerstorage.KeyValue) error
|
||||
}
|
||||
|
||||
type Decryptor = func(kv innerstorage.KeyValue) (value []byte, err error)
|
||||
|
||||
type NoOpIndexer struct{}
|
||||
|
||||
func (n NoOpIndexer) Init(a *app.App) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n NoOpIndexer) Name() (name string) {
|
||||
return IndexerCName
|
||||
}
|
||||
|
||||
func (n NoOpIndexer) Index(decryptor Decryptor, keyValue ...innerstorage.KeyValue) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type Storage interface {
|
||||
Id() string
|
||||
Prepare() error
|
||||
Set(ctx context.Context, key string, value []byte) error
|
||||
SetRaw(ctx context.Context, keyValue ...*spacesyncproto.StoreKeyValue) error
|
||||
GetAll(ctx context.Context, key string, get func(decryptor Decryptor, values []innerstorage.KeyValue) error) error
|
||||
Iterate(ctx context.Context, f func(decryptor Decryptor, key string, values []innerstorage.KeyValue) (bool, error)) error
|
||||
InnerStorage() innerstorage.KeyValueStorage
|
||||
}
|
||||
|
||||
type storage struct {
|
||||
inner innerstorage.KeyValueStorage
|
||||
keys *accountdata.AccountKeys
|
||||
aclList list.AclList
|
||||
syncClient syncstorage.SyncClient
|
||||
indexer Indexer
|
||||
storageId string
|
||||
byteRepr []byte
|
||||
readKeys map[string]crypto.SymKey
|
||||
currentReadKey crypto.SymKey
|
||||
mx sync.Mutex
|
||||
}
|
||||
|
||||
func New(
|
||||
ctx context.Context,
|
||||
storageId string,
|
||||
store anystore.DB,
|
||||
headStorage headstorage.HeadStorage,
|
||||
keys *accountdata.AccountKeys,
|
||||
syncClient syncstorage.SyncClient,
|
||||
aclList list.AclList,
|
||||
indexer Indexer,
|
||||
) (Storage, error) {
|
||||
inner, err := innerstorage.New(ctx, storageId, headStorage, store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := &storage{
|
||||
inner: inner,
|
||||
keys: keys,
|
||||
storageId: storageId,
|
||||
aclList: aclList,
|
||||
indexer: indexer,
|
||||
syncClient: syncClient,
|
||||
byteRepr: make([]byte, 8),
|
||||
readKeys: make(map[string]crypto.SymKey),
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *storage) Prepare() error {
|
||||
s.aclList.RLock()
|
||||
defer s.aclList.RUnlock()
|
||||
return s.readKeysFromAclState(s.aclList.AclState())
|
||||
}
|
||||
|
||||
func (s *storage) Id() string {
|
||||
return s.storageId
|
||||
}
|
||||
|
||||
func (s *storage) Set(ctx context.Context, key string, value []byte) error {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
s.aclList.RLock()
|
||||
headId := s.aclList.Head().Id
|
||||
state := s.aclList.AclState()
|
||||
if !s.aclList.AclState().Permissions(state.Identity()).CanWrite() {
|
||||
s.aclList.RUnlock()
|
||||
return list.ErrInsufficientPermissions
|
||||
}
|
||||
readKeyId := state.CurrentReadKeyId()
|
||||
err := s.readKeysFromAclState(state)
|
||||
if err != nil {
|
||||
s.aclList.RUnlock()
|
||||
return err
|
||||
}
|
||||
s.aclList.RUnlock()
|
||||
value, err = s.currentReadKey.Encrypt(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
peerIdKey := s.keys.PeerKey
|
||||
identityKey := s.keys.SignKey
|
||||
protoPeerKey, err := peerIdKey.GetPublic().Marshall()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
protoIdentityKey, err := identityKey.GetPublic().Marshall()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
timestampMicro := time.Now().UnixMicro()
|
||||
inner := spacesyncproto.StoreKeyInner{
|
||||
Peer: protoPeerKey,
|
||||
Identity: protoIdentityKey,
|
||||
Value: value,
|
||||
TimestampMicro: timestampMicro,
|
||||
AclHeadId: headId,
|
||||
Key: key,
|
||||
}
|
||||
innerBytes, err := inner.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
peerSig, err := peerIdKey.Sign(innerBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
identitySig, err := identityKey.Sign(innerBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
keyPeerId := key + "-" + peerIdKey.GetPublic().PeerId()
|
||||
keyValue := innerstorage.KeyValue{
|
||||
KeyPeerId: keyPeerId,
|
||||
Key: key,
|
||||
TimestampMilli: int(timestampMicro),
|
||||
Identity: identityKey.GetPublic().Account(),
|
||||
PeerId: peerIdKey.GetPublic().PeerId(),
|
||||
AclId: headId,
|
||||
ReadKeyId: readKeyId,
|
||||
Value: innerstorage.Value{
|
||||
Value: innerBytes,
|
||||
PeerSignature: peerSig,
|
||||
IdentitySignature: identitySig,
|
||||
},
|
||||
}
|
||||
err = s.inner.Set(ctx, keyValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexErr := s.indexer.Index(s.decrypt, keyValue)
|
||||
if indexErr != nil {
|
||||
log.Warn("failed to index for key", zap.String("key", key), zap.Error(indexErr))
|
||||
}
|
||||
sendErr := s.syncClient.Broadcast(ctx, s.storageId, keyValue)
|
||||
if sendErr != nil {
|
||||
log.Warn("failed to send key value", zap.String("key", key), zap.Error(sendErr))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *storage) SetRaw(ctx context.Context, keyValue ...*spacesyncproto.StoreKeyValue) (err error) {
|
||||
if len(keyValue) == 0 {
|
||||
return nil
|
||||
}
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
keyValues := make([]innerstorage.KeyValue, 0, len(keyValue))
|
||||
for _, kv := range keyValue {
|
||||
innerKv, err := innerstorage.KeyValueFromProto(kv, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
keyValues = append(keyValues, innerKv)
|
||||
}
|
||||
s.aclList.RLock()
|
||||
state := s.aclList.AclState()
|
||||
err = s.readKeysFromAclState(state)
|
||||
if err != nil {
|
||||
s.aclList.RUnlock()
|
||||
return err
|
||||
}
|
||||
for i := range keyValues {
|
||||
el, err := s.inner.Diff().Element(keyValues[i].KeyPeerId)
|
||||
if err == nil {
|
||||
binary.BigEndian.PutUint64(s.byteRepr, uint64(keyValues[i].TimestampMilli))
|
||||
if el.Head >= string(s.byteRepr) {
|
||||
keyValues[i].KeyPeerId = ""
|
||||
continue
|
||||
}
|
||||
}
|
||||
keyValues[i].ReadKeyId, err = state.ReadKeyForAclId(keyValues[i].AclId)
|
||||
if err != nil {
|
||||
keyValues[i].KeyPeerId = ""
|
||||
continue
|
||||
}
|
||||
}
|
||||
s.aclList.RUnlock()
|
||||
keyValues = slice.DiscardFromSlice(keyValues, func(value innerstorage.KeyValue) bool {
|
||||
return value.KeyPeerId == ""
|
||||
})
|
||||
if len(keyValues) == 0 {
|
||||
return nil
|
||||
}
|
||||
err = s.inner.Set(ctx, keyValues...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sendErr := s.syncClient.Broadcast(ctx, s.storageId, keyValues...)
|
||||
if sendErr != nil {
|
||||
log.Warn("failed to send key values", zap.Error(sendErr))
|
||||
}
|
||||
indexErr := s.indexer.Index(s.decrypt, keyValues...)
|
||||
if indexErr != nil {
|
||||
log.Warn("failed to index for keys", zap.Error(indexErr))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *storage) GetAll(ctx context.Context, key string, get func(decryptor Decryptor, values []innerstorage.KeyValue) error) (err error) {
|
||||
var values []innerstorage.KeyValue
|
||||
err = s.inner.IteratePrefix(ctx, key, func(kv innerstorage.KeyValue) error {
|
||||
bytes := make([]byte, len(kv.Value.Value))
|
||||
copy(bytes, kv.Value.Value)
|
||||
kv.Value.Value = bytes
|
||||
values = append(values, kv)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
return get(s.decrypt, values)
|
||||
}
|
||||
|
||||
func (s *storage) InnerStorage() innerstorage.KeyValueStorage {
|
||||
return s.inner
|
||||
}
|
||||
|
||||
func (s *storage) readKeysFromAclState(state *list.AclState) (err error) {
|
||||
if len(s.readKeys) == len(state.Keys()) {
|
||||
return nil
|
||||
}
|
||||
if state.AccountKey() == nil || !state.HadReadPermissions(state.AccountKey().GetPublic()) {
|
||||
return nil
|
||||
}
|
||||
for key, value := range state.Keys() {
|
||||
if _, exists := s.readKeys[key]; exists {
|
||||
continue
|
||||
}
|
||||
if value.ReadKey == nil {
|
||||
continue
|
||||
}
|
||||
treeKey, err := deriveKey(value.ReadKey, s.storageId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.readKeys[key] = treeKey
|
||||
}
|
||||
curKey, err := state.CurrentReadKey()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if curKey == nil {
|
||||
return nil
|
||||
}
|
||||
s.currentReadKey, err = deriveKey(curKey, s.storageId)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *storage) Iterate(ctx context.Context, f func(decryptor Decryptor, key string, values []innerstorage.KeyValue) (bool, error)) (err error) {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
var (
|
||||
curKey = ""
|
||||
// TODO: reuse buffer
|
||||
values []innerstorage.KeyValue
|
||||
)
|
||||
err = s.inner.IterateValues(ctx, func(kv innerstorage.KeyValue) (bool, error) {
|
||||
if kv.Key != curKey {
|
||||
if curKey != "" {
|
||||
iter, err := f(s.decrypt, curKey, values)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !iter {
|
||||
values = nil
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
curKey = kv.Key
|
||||
values = values[:0]
|
||||
}
|
||||
bytes := make([]byte, len(kv.Value.Value))
|
||||
copy(bytes, kv.Value.Value)
|
||||
kv.Value.Value = bytes
|
||||
values = append(values, kv)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(values) > 0 {
|
||||
_, err = f(s.decrypt, curKey, values)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *storage) decrypt(kv innerstorage.KeyValue) (value []byte, err error) {
|
||||
if kv.ReadKeyId == "" {
|
||||
return nil, fmt.Errorf("no read key id")
|
||||
}
|
||||
key := s.readKeys[kv.ReadKeyId]
|
||||
if key == nil {
|
||||
return nil, fmt.Errorf("no read key for %s", kv.ReadKeyId)
|
||||
}
|
||||
msg := &spacesyncproto.StoreKeyInner{}
|
||||
err = proto.Unmarshal(kv.Value.Value, msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
value, err = key.Decrypt(msg.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func deriveKey(key crypto.SymKey, id string) (crypto.SymKey, error) {
|
||||
raw, err := key.Raw()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return crypto.DeriveSymmetricKey(raw, fmt.Sprintf(crypto.AnysyncKeyValuePath, id))
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
package syncstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage/innerstorage"
|
||||
"github.com/anyproto/any-sync/commonspace/spacesyncproto"
|
||||
"github.com/anyproto/any-sync/commonspace/sync"
|
||||
"github.com/anyproto/any-sync/commonspace/sync/objectsync/objectmessages"
|
||||
)
|
||||
|
||||
type innerUpdate struct {
|
||||
prepared []byte
|
||||
keyValues []innerstorage.KeyValue
|
||||
}
|
||||
|
||||
func (i *innerUpdate) Marshall(data objectmessages.ObjectMeta) ([]byte, error) {
|
||||
if i.prepared != nil {
|
||||
return i.prepared, nil
|
||||
}
|
||||
return nil, fmt.Errorf("no prepared data")
|
||||
}
|
||||
|
||||
func (i *innerUpdate) Prepare() error {
|
||||
// TODO: Add peer to ignored peers list
|
||||
var (
|
||||
protoKeyValues []*spacesyncproto.StoreKeyValue
|
||||
err error
|
||||
)
|
||||
for _, kv := range i.keyValues {
|
||||
protoKeyValues = append(protoKeyValues, kv.Proto())
|
||||
}
|
||||
keyValues := &spacesyncproto.StoreKeyValues{KeyValues: protoKeyValues}
|
||||
i.prepared, err = keyValues.Marshal()
|
||||
return err
|
||||
}
|
||||
|
||||
func (i *innerUpdate) Heads() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *innerUpdate) MsgSize() uint64 {
|
||||
return uint64(len(i.prepared))
|
||||
}
|
||||
|
||||
func (i *innerUpdate) ObjectType() spacesyncproto.ObjectType {
|
||||
return spacesyncproto.ObjectType_KeyValue
|
||||
}
|
||||
|
||||
type SyncClient interface {
|
||||
Broadcast(ctx context.Context, objectId string, keyValues ...innerstorage.KeyValue) error
|
||||
}
|
||||
|
||||
type syncClient struct {
|
||||
spaceId string
|
||||
syncService sync.SyncService
|
||||
}
|
||||
|
||||
func New(spaceId string, syncService sync.SyncService) SyncClient {
|
||||
return &syncClient{
|
||||
spaceId: spaceId,
|
||||
syncService: syncService,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *syncClient) Broadcast(ctx context.Context, objectId string, keyValue ...innerstorage.KeyValue) error {
|
||||
inner := &innerUpdate{
|
||||
keyValues: keyValue,
|
||||
}
|
||||
err := inner.Prepare()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headUpdate := &objectmessages.HeadUpdate{
|
||||
Meta: objectmessages.ObjectMeta{
|
||||
ObjectId: objectId,
|
||||
SpaceId: s.spaceId,
|
||||
},
|
||||
Update: inner,
|
||||
}
|
||||
return s.syncService.BroadcastMessage(ctx, headUpdate)
|
||||
}
|
24
commonspace/object/keyvalue/kvinterfaces/interfaces.go
Normal file
24
commonspace/object/keyvalue/kvinterfaces/interfaces.go
Normal file
|
@ -0,0 +1,24 @@
|
|||
//go:generate mockgen -destination mock_kvinterfaces/mock_kvinterfaces.go github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces KeyValueService
|
||||
package kvinterfaces
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"storj.io/drpc"
|
||||
|
||||
"github.com/anyproto/any-sync/app"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage"
|
||||
"github.com/anyproto/any-sync/commonspace/spacesyncproto"
|
||||
"github.com/anyproto/any-sync/net/peer"
|
||||
)
|
||||
|
||||
const CName = "common.object.keyvalue"
|
||||
|
||||
type KeyValueService interface {
|
||||
app.ComponentRunnable
|
||||
DefaultStore() keyvaluestorage.Storage
|
||||
HandleMessage(ctx context.Context, msg drpc.Message) (err error)
|
||||
SyncWithPeer(p peer.Peer) (err error)
|
||||
HandleStoreDiffRequest(ctx context.Context, req *spacesyncproto.StoreDiffRequest) (resp *spacesyncproto.StoreDiffResponse, err error)
|
||||
HandleStoreElementsRequest(ctx context.Context, stream spacesyncproto.DRPCSpaceSync_StoreElementsStream) (err error)
|
||||
}
|
|
@ -0,0 +1,171 @@
|
|||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces (interfaces: KeyValueService)
|
||||
//
|
||||
// Generated by this command:
|
||||
//
|
||||
// mockgen -destination mock_kvinterfaces/mock_kvinterfaces.go github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces KeyValueService
|
||||
//
|
||||
// Package mock_kvinterfaces is a generated GoMock package.
|
||||
package mock_kvinterfaces
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
|
||||
app "github.com/anyproto/any-sync/app"
|
||||
keyvaluestorage "github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage"
|
||||
spacesyncproto "github.com/anyproto/any-sync/commonspace/spacesyncproto"
|
||||
peer "github.com/anyproto/any-sync/net/peer"
|
||||
gomock "go.uber.org/mock/gomock"
|
||||
drpc "storj.io/drpc"
|
||||
)
|
||||
|
||||
// MockKeyValueService is a mock of KeyValueService interface.
|
||||
type MockKeyValueService struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockKeyValueServiceMockRecorder
|
||||
}
|
||||
|
||||
// MockKeyValueServiceMockRecorder is the mock recorder for MockKeyValueService.
|
||||
type MockKeyValueServiceMockRecorder struct {
|
||||
mock *MockKeyValueService
|
||||
}
|
||||
|
||||
// NewMockKeyValueService creates a new mock instance.
|
||||
func NewMockKeyValueService(ctrl *gomock.Controller) *MockKeyValueService {
|
||||
mock := &MockKeyValueService{ctrl: ctrl}
|
||||
mock.recorder = &MockKeyValueServiceMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockKeyValueService) EXPECT() *MockKeyValueServiceMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Close mocks base method.
|
||||
func (m *MockKeyValueService) Close(arg0 context.Context) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Close", arg0)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Close indicates an expected call of Close.
|
||||
func (mr *MockKeyValueServiceMockRecorder) Close(arg0 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockKeyValueService)(nil).Close), arg0)
|
||||
}
|
||||
|
||||
// DefaultStore mocks base method.
|
||||
func (m *MockKeyValueService) DefaultStore() keyvaluestorage.Storage {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DefaultStore")
|
||||
ret0, _ := ret[0].(keyvaluestorage.Storage)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// DefaultStore indicates an expected call of DefaultStore.
|
||||
func (mr *MockKeyValueServiceMockRecorder) DefaultStore() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DefaultStore", reflect.TypeOf((*MockKeyValueService)(nil).DefaultStore))
|
||||
}
|
||||
|
||||
// HandleMessage mocks base method.
|
||||
func (m *MockKeyValueService) HandleMessage(arg0 context.Context, arg1 drpc.Message) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "HandleMessage", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// HandleMessage indicates an expected call of HandleMessage.
|
||||
func (mr *MockKeyValueServiceMockRecorder) HandleMessage(arg0, arg1 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleMessage", reflect.TypeOf((*MockKeyValueService)(nil).HandleMessage), arg0, arg1)
|
||||
}
|
||||
|
||||
// HandleStoreDiffRequest mocks base method.
|
||||
func (m *MockKeyValueService) HandleStoreDiffRequest(arg0 context.Context, arg1 *spacesyncproto.StoreDiffRequest) (*spacesyncproto.StoreDiffResponse, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "HandleStoreDiffRequest", arg0, arg1)
|
||||
ret0, _ := ret[0].(*spacesyncproto.StoreDiffResponse)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// HandleStoreDiffRequest indicates an expected call of HandleStoreDiffRequest.
|
||||
func (mr *MockKeyValueServiceMockRecorder) HandleStoreDiffRequest(arg0, arg1 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleStoreDiffRequest", reflect.TypeOf((*MockKeyValueService)(nil).HandleStoreDiffRequest), arg0, arg1)
|
||||
}
|
||||
|
||||
// HandleStoreElementsRequest mocks base method.
|
||||
func (m *MockKeyValueService) HandleStoreElementsRequest(arg0 context.Context, arg1 spacesyncproto.DRPCSpaceSync_StoreElementsStream) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "HandleStoreElementsRequest", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// HandleStoreElementsRequest indicates an expected call of HandleStoreElementsRequest.
|
||||
func (mr *MockKeyValueServiceMockRecorder) HandleStoreElementsRequest(arg0, arg1 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleStoreElementsRequest", reflect.TypeOf((*MockKeyValueService)(nil).HandleStoreElementsRequest), arg0, arg1)
|
||||
}
|
||||
|
||||
// Init mocks base method.
|
||||
func (m *MockKeyValueService) Init(arg0 *app.App) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Init", arg0)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Init indicates an expected call of Init.
|
||||
func (mr *MockKeyValueServiceMockRecorder) Init(arg0 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockKeyValueService)(nil).Init), arg0)
|
||||
}
|
||||
|
||||
// Name mocks base method.
|
||||
func (m *MockKeyValueService) Name() string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Name")
|
||||
ret0, _ := ret[0].(string)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Name indicates an expected call of Name.
|
||||
func (mr *MockKeyValueServiceMockRecorder) Name() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockKeyValueService)(nil).Name))
|
||||
}
|
||||
|
||||
// Run mocks base method.
|
||||
func (m *MockKeyValueService) Run(arg0 context.Context) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Run", arg0)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Run indicates an expected call of Run.
|
||||
func (mr *MockKeyValueServiceMockRecorder) Run(arg0 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockKeyValueService)(nil).Run), arg0)
|
||||
}
|
||||
|
||||
// SyncWithPeer mocks base method.
|
||||
func (m *MockKeyValueService) SyncWithPeer(arg0 peer.Peer) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SyncWithPeer", arg0)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// SyncWithPeer indicates an expected call of SyncWithPeer.
|
||||
func (mr *MockKeyValueServiceMockRecorder) SyncWithPeer(arg0 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncWithPeer", reflect.TypeOf((*MockKeyValueService)(nil).SyncWithPeer), arg0)
|
||||
}
|
52
commonspace/object/keyvalue/limiter.go
Normal file
52
commonspace/object/keyvalue/limiter.go
Normal file
|
@ -0,0 +1,52 @@
|
|||
package keyvalue
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type concurrentLimiter struct {
|
||||
mu sync.Mutex
|
||||
inProgress map[string]bool
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func newConcurrentLimiter() *concurrentLimiter {
|
||||
return &concurrentLimiter{
|
||||
inProgress: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
func (cl *concurrentLimiter) ScheduleRequest(ctx context.Context, id string, action func()) bool {
|
||||
cl.mu.Lock()
|
||||
if cl.inProgress[id] {
|
||||
cl.mu.Unlock()
|
||||
return false
|
||||
}
|
||||
|
||||
cl.inProgress[id] = true
|
||||
cl.wg.Add(1)
|
||||
cl.mu.Unlock()
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
cl.mu.Lock()
|
||||
delete(cl.inProgress, id)
|
||||
cl.mu.Unlock()
|
||||
cl.wg.Done()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
action()
|
||||
}
|
||||
}()
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (cl *concurrentLimiter) Close() {
|
||||
cl.wg.Wait()
|
||||
}
|
106
commonspace/object/keyvalue/remotediff.go
Normal file
106
commonspace/object/keyvalue/remotediff.go
Normal file
|
@ -0,0 +1,106 @@
|
|||
package keyvalue
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/anyproto/any-sync/app/ldiff"
|
||||
"github.com/anyproto/any-sync/commonspace/spacesyncproto"
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
StoreDiff(context.Context, *spacesyncproto.StoreDiffRequest) (*spacesyncproto.StoreDiffResponse, error)
|
||||
}
|
||||
|
||||
type RemoteDiff interface {
|
||||
ldiff.Remote
|
||||
}
|
||||
|
||||
func NewRemoteDiff(spaceId string, client Client) RemoteDiff {
|
||||
return &remote{
|
||||
spaceId: spaceId,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
type remote struct {
|
||||
spaceId string
|
||||
client Client
|
||||
}
|
||||
|
||||
func (r *remote) Ranges(ctx context.Context, ranges []ldiff.Range, resBuf []ldiff.RangeResult) (results []ldiff.RangeResult, err error) {
|
||||
results = resBuf[:0]
|
||||
pbRanges := make([]*spacesyncproto.HeadSyncRange, 0, len(ranges))
|
||||
for _, rg := range ranges {
|
||||
pbRanges = append(pbRanges, &spacesyncproto.HeadSyncRange{
|
||||
From: rg.From,
|
||||
To: rg.To,
|
||||
Elements: rg.Elements,
|
||||
Limit: uint32(rg.Limit),
|
||||
})
|
||||
}
|
||||
req := &spacesyncproto.StoreDiffRequest{
|
||||
SpaceId: r.spaceId,
|
||||
Ranges: pbRanges,
|
||||
}
|
||||
resp, err := r.client.StoreDiff(ctx, req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, rr := range resp.Results {
|
||||
var elms []ldiff.Element
|
||||
if len(rr.Elements) > 0 {
|
||||
elms = make([]ldiff.Element, 0, len(rr.Elements))
|
||||
}
|
||||
for _, e := range rr.Elements {
|
||||
elms = append(elms, ldiff.Element{
|
||||
Id: e.Id,
|
||||
Head: e.Head,
|
||||
})
|
||||
}
|
||||
results = append(results, ldiff.RangeResult{
|
||||
Hash: rr.Hash,
|
||||
Elements: elms,
|
||||
Count: int(rr.Count),
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func HandleRangeRequest(ctx context.Context, d ldiff.Diff, req *spacesyncproto.StoreDiffRequest) (resp *spacesyncproto.StoreDiffResponse, err error) {
|
||||
ranges := make([]ldiff.Range, 0, len(req.Ranges))
|
||||
// basically we gather data applicable for both diffs
|
||||
for _, reqRange := range req.Ranges {
|
||||
ranges = append(ranges, ldiff.Range{
|
||||
From: reqRange.From,
|
||||
To: reqRange.To,
|
||||
Limit: int(reqRange.Limit),
|
||||
Elements: reqRange.Elements,
|
||||
})
|
||||
}
|
||||
res, err := d.Ranges(ctx, ranges, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resp = &spacesyncproto.StoreDiffResponse{
|
||||
Results: make([]*spacesyncproto.HeadSyncResult, 0, len(res)),
|
||||
}
|
||||
for _, rangeRes := range res {
|
||||
var elements []*spacesyncproto.HeadSyncResultElement
|
||||
if len(rangeRes.Elements) > 0 {
|
||||
elements = make([]*spacesyncproto.HeadSyncResultElement, 0, len(rangeRes.Elements))
|
||||
for _, el := range rangeRes.Elements {
|
||||
elements = append(elements, &spacesyncproto.HeadSyncResultElement{
|
||||
Id: el.Id,
|
||||
Head: el.Head,
|
||||
})
|
||||
}
|
||||
}
|
||||
resp.Results = append(resp.Results, &spacesyncproto.HeadSyncResult{
|
||||
Hash: rangeRes.Hash,
|
||||
Elements: elements,
|
||||
Count: uint32(rangeRes.Count),
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_objecttree/mock_objecttree.go github.com/anyproto/any-sync/commonspace/object/tree/objecttree ObjectTree,Storage
|
||||
//
|
||||
|
||||
// Package mock_objecttree is a generated GoMock package.
|
||||
package mock_objecttree
|
||||
|
||||
|
@ -374,11 +373,12 @@ func (mr *MockObjectTreeMockRecorder) SetFlusher(arg0 any) *gomock.Call {
|
|||
}
|
||||
|
||||
// SnapshotPath mocks base method.
|
||||
func (m *MockObjectTree) SnapshotPath() []string {
|
||||
func (m *MockObjectTree) SnapshotPath() ([]string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SnapshotPath")
|
||||
ret0, _ := ret[0].([]string)
|
||||
return ret0
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// SnapshotPath indicates an expected call of SnapshotPath.
|
||||
|
|
|
@ -90,7 +90,7 @@ type ReadableObjectTree interface {
|
|||
type ObjectTree interface {
|
||||
ReadableObjectTree
|
||||
|
||||
SnapshotPath() []string
|
||||
SnapshotPath() ([]string, error)
|
||||
ChangesAfterCommonSnapshotLoader(snapshotPath, heads []string) (LoadIterator, error)
|
||||
|
||||
Storage() Storage
|
||||
|
@ -144,7 +144,10 @@ func (ot *objectTree) rebuildFromStorage(theirHeads, theirSnapshotPath []string,
|
|||
)
|
||||
if theirHeads != nil {
|
||||
// TODO: add error handling
|
||||
ourPath = ot.SnapshotPath()
|
||||
ourPath, err = ot.SnapshotPath()
|
||||
if err != nil {
|
||||
return fmt.Errorf("rebuild from storage: %w", err)
|
||||
}
|
||||
}
|
||||
ot.tree, err = ot.treeBuilder.Build(treeBuilderOpts{
|
||||
theirHeads: theirHeads,
|
||||
|
@ -748,13 +751,12 @@ func (ot *objectTree) Delete() error {
|
|||
return ot.storage.Delete(context.Background())
|
||||
}
|
||||
|
||||
func (ot *objectTree) SnapshotPath() []string {
|
||||
func (ot *objectTree) SnapshotPath() ([]string, error) {
|
||||
if ot.isDeleted {
|
||||
return nil
|
||||
return nil, ErrDeleted
|
||||
}
|
||||
// TODO: Add error as return parameter
|
||||
if ot.snapshotPathIsActual() {
|
||||
return ot.snapshotPath
|
||||
return ot.snapshotPath, nil
|
||||
}
|
||||
|
||||
var path []string
|
||||
|
@ -763,14 +765,13 @@ func (ot *objectTree) SnapshotPath() []string {
|
|||
for currentSnapshotId != "" {
|
||||
sn, err := ot.storage.Get(context.Background(), currentSnapshotId)
|
||||
if err != nil {
|
||||
// TODO: add error handling
|
||||
panic(fmt.Sprintf("failed to get snapshot %s: %v", currentSnapshotId, err))
|
||||
return nil, fmt.Errorf("failed to get snapshot %s: %w", currentSnapshotId, err)
|
||||
}
|
||||
path = append(path, currentSnapshotId)
|
||||
currentSnapshotId = sn.SnapshotId
|
||||
}
|
||||
ot.snapshotPath = path
|
||||
return path
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func (ot *objectTree) ChangesAfterCommonSnapshotLoader(theirPath, theirHeads []string) (LoadIterator, error) {
|
||||
|
@ -779,12 +780,16 @@ func (ot *objectTree) ChangesAfterCommonSnapshotLoader(theirPath, theirHeads []s
|
|||
}
|
||||
var (
|
||||
needFullDocument = len(theirPath) == 0
|
||||
ourPath = ot.SnapshotPath()
|
||||
ourPath []string
|
||||
// by default returning everything we have from start
|
||||
commonSnapshot = ourPath[len(ourPath)-1]
|
||||
commonSnapshot string
|
||||
err error
|
||||
)
|
||||
|
||||
ourPath, err = ot.SnapshotPath()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get snapshot path: %w", err)
|
||||
}
|
||||
commonSnapshot = ourPath[len(ourPath)-1]
|
||||
// if this is non-empty request
|
||||
if !needFullDocument {
|
||||
commonSnapshot, err = commonSnapshotForTwoPaths(ourPath, theirPath)
|
||||
|
|
|
@ -1075,7 +1075,8 @@ func TestObjectTree(t *testing.T) {
|
|||
_, err := objTree.AddRawChanges(context.Background(), payload)
|
||||
require.NoError(t, err, "adding changes should be without error")
|
||||
|
||||
snapshotPath := objTree.SnapshotPath()
|
||||
snapshotPath, err := objTree.SnapshotPath()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"3", "0"}, snapshotPath)
|
||||
|
||||
assert.Equal(t, true, objTree.(*objectTree).snapshotPathIsActual())
|
||||
|
@ -1857,7 +1858,9 @@ func TestObjectTree(t *testing.T) {
|
|||
RawChanges: result.changes,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
iter, err := objTree.ChangesAfterCommonSnapshotLoader(otherTree.SnapshotPath(), otherTree.Heads())
|
||||
snPath, err := otherTree.SnapshotPath()
|
||||
require.NoError(t, err)
|
||||
iter, err := objTree.ChangesAfterCommonSnapshotLoader(snPath, otherTree.Heads())
|
||||
require.NoError(t, err)
|
||||
for {
|
||||
batch, err := iter.NextBatch(400)
|
||||
|
|
|
@ -20,6 +20,6 @@ func (o objectTreeDebug) debugInfo(ot *objectTree, parser DescriptionParser) (di
|
|||
di.TreeString = ot.tree.String()
|
||||
di.TreeLen = ot.tree.Len()
|
||||
di.Heads = ot.Heads()
|
||||
di.SnapshotPath = ot.SnapshotPath()
|
||||
di.SnapshotPath, _ = ot.SnapshotPath()
|
||||
return
|
||||
}
|
||||
|
|
|
@ -77,6 +77,19 @@ type storage struct {
|
|||
var StorageChangeBuilder = NewChangeBuilder
|
||||
|
||||
func CreateStorage(ctx context.Context, root *treechangeproto.RawTreeChangeWithId, headStorage headstorage.HeadStorage, store anystore.DB) (Storage, error) {
|
||||
tx, err := store.WriteTx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storage, err := CreateStorageTx(tx.Context(), root, headStorage, store)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return nil, err
|
||||
}
|
||||
return storage, tx.Commit()
|
||||
}
|
||||
|
||||
func CreateStorageTx(ctx context.Context, root *treechangeproto.RawTreeChangeWithId, headStorage headstorage.HeadStorage, store anystore.DB) (Storage, error) {
|
||||
st := &storage{
|
||||
id: root.Id,
|
||||
store: store,
|
||||
|
@ -107,29 +120,23 @@ func CreateStorage(ctx context.Context, root *treechangeproto.RawTreeChangeWithI
|
|||
st.parser = &anyenc.Parser{}
|
||||
defer st.arena.Reset()
|
||||
doc := newStorageChangeValue(stChange, st.arena)
|
||||
tx, err := st.store.WriteTx(ctx)
|
||||
err = st.changesColl.Insert(ctx, doc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = st.changesColl.Insert(tx.Context(), doc)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
if errors.Is(err, anystore.ErrDocExists) {
|
||||
return nil, treestorage.ErrTreeExists
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
err = st.headStorage.UpdateEntryTx(tx.Context(), headstorage.HeadsUpdate{
|
||||
err = st.headStorage.UpdateEntryTx(ctx, headstorage.HeadsUpdate{
|
||||
Id: root.Id,
|
||||
Heads: []string{root.Id},
|
||||
CommonSnapshot: &root.Id,
|
||||
IsDerived: &unmarshalled.IsDerived,
|
||||
})
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return nil, err
|
||||
}
|
||||
return st, tx.Commit()
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func NewStorage(ctx context.Context, id string, headStorage headstorage.HeadStorage, store anystore.DB) (Storage, error) {
|
||||
|
@ -151,7 +158,7 @@ func NewStorage(ctx context.Context, id string, headStorage headstorage.HeadStor
|
|||
st.changesColl = changesColl
|
||||
st.arena = &anyenc.Arena{}
|
||||
st.parser = &anyenc.Parser{}
|
||||
st.root, err = st.Get(ctx, st.id)
|
||||
st.root, err = st.getWithoutParser(ctx, st.id)
|
||||
if err != nil {
|
||||
if errors.Is(err, anystore.ErrDocNotFound) {
|
||||
return nil, treestorage.ErrUnknownTreeId
|
||||
|
@ -182,6 +189,7 @@ func (s *storage) Has(ctx context.Context, id string) (bool, error) {
|
|||
}
|
||||
|
||||
func (s *storage) GetAfterOrder(ctx context.Context, orderId string, storageIter StorageIterator) error {
|
||||
// this method can be called without having a lock on a tree, so don't reuse any non-thread-safe parts
|
||||
filter := query.And{
|
||||
query.Key{Path: []string{OrderKey}, Filter: query.NewComp(query.CompOpGte, orderId)},
|
||||
query.Key{Path: []string{TreeKey}, Filter: query.NewComp(query.CompOpEq, s.id)},
|
||||
|
@ -213,13 +221,19 @@ func (s *storage) AddAll(ctx context.Context, changes []StorageChange, heads []s
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to create write tx: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
for _, ch := range changes {
|
||||
ch.TreeId = s.id
|
||||
newVal := newStorageChangeValue(ch, arena)
|
||||
err = s.changesColl.Insert(tx.Context(), newVal)
|
||||
arena.Reset()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -228,12 +242,7 @@ func (s *storage) AddAll(ctx context.Context, changes []StorageChange, heads []s
|
|||
Heads: heads,
|
||||
CommonSnapshot: &commonSnapshot,
|
||||
}
|
||||
err = s.headStorage.UpdateEntryTx(tx.Context(), update)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
return s.headStorage.UpdateEntryTx(tx.Context(), update)
|
||||
}
|
||||
|
||||
func (s *storage) AddAllNoError(ctx context.Context, changes []StorageChange, heads []string, commonSnapshot string) error {
|
||||
|
@ -243,13 +252,19 @@ func (s *storage) AddAllNoError(ctx context.Context, changes []StorageChange, he
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to create write tx: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
for _, ch := range changes {
|
||||
ch.TreeId = s.id
|
||||
newVal := newStorageChangeValue(ch, arena)
|
||||
err = s.changesColl.Insert(tx.Context(), newVal)
|
||||
arena.Reset()
|
||||
if err != nil && !errors.Is(err, anystore.ErrDocExists) {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -258,12 +273,7 @@ func (s *storage) AddAllNoError(ctx context.Context, changes []StorageChange, he
|
|||
Heads: heads,
|
||||
CommonSnapshot: &commonSnapshot,
|
||||
}
|
||||
err = s.headStorage.UpdateEntryTx(tx.Context(), update)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
return s.headStorage.UpdateEntryTx(tx.Context(), update)
|
||||
}
|
||||
|
||||
func (s *storage) Delete(ctx context.Context) error {
|
||||
|
@ -300,6 +310,15 @@ func (s *storage) CommonSnapshot(ctx context.Context) (string, error) {
|
|||
return entry.CommonSnapshot, nil
|
||||
}
|
||||
|
||||
func (s *storage) getWithoutParser(ctx context.Context, id string) (StorageChange, error) {
|
||||
// root will be reused outside the lock, so we shouldn't use parser for it
|
||||
doc, err := s.changesColl.FindId(ctx, id)
|
||||
if err != nil {
|
||||
return StorageChange{}, err
|
||||
}
|
||||
return s.changeFromDoc(doc), nil
|
||||
}
|
||||
|
||||
func (s *storage) Get(ctx context.Context, id string) (StorageChange, error) {
|
||||
doc, err := s.changesColl.FindIdWithParser(ctx, s.parser, id)
|
||||
if err != nil {
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"fmt"
|
||||
|
||||
anystore "github.com/anyproto/any-store"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/anyproto/any-sync/commonspace/headsync/headstorage"
|
||||
"github.com/anyproto/any-sync/commonspace/object/acl/list"
|
||||
|
@ -55,10 +54,6 @@ func NewTreeMigrator(keyStorage crypto.KeyStorage, aclList list.AclList) *TreeMi
|
|||
}
|
||||
|
||||
func (tm *TreeMigrator) MigrateTreeStorage(ctx context.Context, storage treeStorage, headStorage headstorage.HeadStorage, store anystore.DB) error {
|
||||
var (
|
||||
usedDfs bool
|
||||
loadFailed bool
|
||||
)
|
||||
rootChange, err := storage.Root()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -78,8 +73,7 @@ func (tm *TreeMigrator) MigrateTreeStorage(ctx context.Context, storage treeStor
|
|||
return fmt.Errorf("migration: failed to get all changes: %w", err)
|
||||
}
|
||||
} else {
|
||||
usedDfs = true
|
||||
loadFailed = tm.dfs(ctx, heads, rootChange.Id)
|
||||
tm.dfs(ctx, heads, rootChange.Id)
|
||||
}
|
||||
newStorage, err := CreateStorage(ctx, rootChange, headStorage, store)
|
||||
if err != nil && !errors.Is(err, treestorage.ErrTreeExists) {
|
||||
|
@ -107,20 +101,7 @@ func (tm *TreeMigrator) MigrateTreeStorage(ctx context.Context, storage treeStor
|
|||
return fmt.Errorf("migration: failed to add raw changes: %w", err)
|
||||
}
|
||||
if !slice.UnsortedEquals(res.Heads, heads) {
|
||||
returnErr := fmt.Errorf("migration: heads mismatch: %v, %v != %v", rootChange.Id, res.Heads, heads)
|
||||
if loadFailed {
|
||||
log.Error("tree is corrupted", zap.String("id", storage.Id()), zap.Error(returnErr))
|
||||
return nil
|
||||
}
|
||||
if usedDfs {
|
||||
return returnErr
|
||||
}
|
||||
tm.allChanges = nil
|
||||
if tm.dfs(ctx, heads, rootChange.Id) {
|
||||
log.Error("tree is corrupted", zap.String("id", storage.Id()), zap.Error(returnErr))
|
||||
return nil
|
||||
}
|
||||
return returnErr
|
||||
log.Errorf("migration: heads mismatch: %v, %v != %v", rootChange.Id, res.Heads, heads)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"slices"
|
||||
|
||||
"github.com/anyproto/any-sync/commonspace/object/tree/treechangeproto"
|
||||
"github.com/anyproto/any-sync/commonspace/spacesyncproto"
|
||||
"github.com/anyproto/any-sync/commonspace/sync/objectsync/objectmessages"
|
||||
)
|
||||
|
||||
|
@ -21,6 +22,10 @@ func (h *InnerHeadUpdate) MsgSize() (size uint64) {
|
|||
return uint64(len(h.prepared))
|
||||
}
|
||||
|
||||
func (h *InnerHeadUpdate) ObjectType() spacesyncproto.ObjectType {
|
||||
return spacesyncproto.ObjectType_Tree
|
||||
}
|
||||
|
||||
func (h *InnerHeadUpdate) Prepare() error {
|
||||
treeMsg := treechangeproto.WrapHeadUpdate(&treechangeproto.TreeHeadUpdate{
|
||||
Heads: h.heads,
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_synctree/mock_synctree.go github.com/anyproto/any-sync/commonspace/object/tree/synctree SyncTree,HeadNotifiable,SyncClient,RequestFactory
|
||||
//
|
||||
|
||||
// Package mock_synctree is a generated GoMock package.
|
||||
package mock_synctree
|
||||
|
||||
|
@ -467,11 +466,12 @@ func (mr *MockSyncTreeMockRecorder) SetListener(arg0 any) *gomock.Call {
|
|||
}
|
||||
|
||||
// SnapshotPath mocks base method.
|
||||
func (m *MockSyncTree) SnapshotPath() []string {
|
||||
func (m *MockSyncTree) SnapshotPath() ([]string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SnapshotPath")
|
||||
ret0, _ := ret[0].([]string)
|
||||
return ret0
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// SnapshotPath indicates an expected call of SnapshotPath.
|
||||
|
@ -651,11 +651,12 @@ func (mr *MockSyncClientMockRecorder) Broadcast(arg0, arg1 any) *gomock.Call {
|
|||
}
|
||||
|
||||
// CreateFullSyncRequest mocks base method.
|
||||
func (m *MockSyncClient) CreateFullSyncRequest(arg0 string, arg1 objecttree.ObjectTree) *objectmessages.Request {
|
||||
func (m *MockSyncClient) CreateFullSyncRequest(arg0 string, arg1 objecttree.ObjectTree) (*objectmessages.Request, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "CreateFullSyncRequest", arg0, arg1)
|
||||
ret0, _ := ret[0].(*objectmessages.Request)
|
||||
return ret0
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// CreateFullSyncRequest indicates an expected call of CreateFullSyncRequest.
|
||||
|
@ -760,11 +761,12 @@ func (m *MockRequestFactory) EXPECT() *MockRequestFactoryMockRecorder {
|
|||
}
|
||||
|
||||
// CreateFullSyncRequest mocks base method.
|
||||
func (m *MockRequestFactory) CreateFullSyncRequest(arg0 string, arg1 objecttree.ObjectTree) *objectmessages.Request {
|
||||
func (m *MockRequestFactory) CreateFullSyncRequest(arg0 string, arg1 objecttree.ObjectTree) (*objectmessages.Request, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "CreateFullSyncRequest", arg0, arg1)
|
||||
ret0, _ := ret[0].(*objectmessages.Request)
|
||||
return ret0
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// CreateFullSyncRequest indicates an expected call of CreateFullSyncRequest.
|
||||
|
|
|
@ -12,7 +12,7 @@ const batchSize = 1024 * 1024
|
|||
type RequestFactory interface {
|
||||
CreateHeadUpdate(t objecttree.ObjectTree, ignoredPeer string, added []*treechangeproto.RawTreeChangeWithId) (headUpdate *objectmessages.HeadUpdate, err error)
|
||||
CreateNewTreeRequest(peerId, objectId string) *objectmessages.Request
|
||||
CreateFullSyncRequest(peerId string, t objecttree.ObjectTree) *objectmessages.Request
|
||||
CreateFullSyncRequest(peerId string, t objecttree.ObjectTree) (*objectmessages.Request, error)
|
||||
CreateResponseProducer(t objecttree.ObjectTree, theirHeads, theirSnapshotPath []string) (response.ResponseProducer, error)
|
||||
}
|
||||
|
||||
|
@ -29,6 +29,10 @@ func (r *requestFactory) CreateHeadUpdate(t objecttree.ObjectTree, ignoredPeer s
|
|||
if ignoredPeer != "" {
|
||||
broadcastOpts.EmptyPeers = []string{ignoredPeer}
|
||||
}
|
||||
snapshotPath, err := t.SnapshotPath()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
headUpdate = &objectmessages.HeadUpdate{
|
||||
Meta: objectmessages.ObjectMeta{
|
||||
ObjectId: t.Id(),
|
||||
|
@ -38,7 +42,7 @@ func (r *requestFactory) CreateHeadUpdate(t objecttree.ObjectTree, ignoredPeer s
|
|||
opts: broadcastOpts,
|
||||
heads: t.Heads(),
|
||||
changes: added,
|
||||
snapshotPath: t.SnapshotPath(),
|
||||
snapshotPath: snapshotPath,
|
||||
root: t.Header(),
|
||||
},
|
||||
}
|
||||
|
@ -50,8 +54,12 @@ func (r *requestFactory) CreateNewTreeRequest(peerId, objectId string) *objectme
|
|||
return NewRequest(peerId, r.spaceId, objectId, nil, nil, nil)
|
||||
}
|
||||
|
||||
func (r *requestFactory) CreateFullSyncRequest(peerId string, t objecttree.ObjectTree) *objectmessages.Request {
|
||||
return NewRequest(peerId, r.spaceId, t.Id(), t.Heads(), t.SnapshotPath(), t.Header())
|
||||
func (r *requestFactory) CreateFullSyncRequest(peerId string, t objecttree.ObjectTree) (*objectmessages.Request, error) {
|
||||
path, err := t.SnapshotPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewRequest(peerId, r.spaceId, t.Id(), t.Heads(), path, t.Header()), nil
|
||||
}
|
||||
|
||||
func (r *requestFactory) CreateResponseProducer(t objecttree.ObjectTree, theirHeads, theirSnapshotPath []string) (response.ResponseProducer, error) {
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_response/mock_response.go github.com/anyproto/any-sync/commonspace/object/tree/synctree/response ResponseProducer
|
||||
//
|
||||
|
||||
// Package mock_response is a generated GoMock package.
|
||||
package mock_response
|
||||
|
||||
|
@ -40,11 +39,12 @@ func (m *MockResponseProducer) EXPECT() *MockResponseProducerMockRecorder {
|
|||
}
|
||||
|
||||
// EmptyResponse mocks base method.
|
||||
func (m *MockResponseProducer) EmptyResponse() *response.Response {
|
||||
func (m *MockResponseProducer) EmptyResponse() (*response.Response, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EmptyResponse")
|
||||
ret0, _ := ret[0].(*response.Response)
|
||||
return ret0
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EmptyResponse indicates an expected call of EmptyResponse.
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
|
||||
type ResponseProducer interface {
|
||||
NewResponse(batchSize int) (*Response, error)
|
||||
EmptyResponse() *Response
|
||||
EmptyResponse() (*Response, error)
|
||||
}
|
||||
|
||||
type responseProducer struct {
|
||||
|
@ -45,14 +45,18 @@ func (r *responseProducer) NewResponse(batchSize int) (*Response, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (r *responseProducer) EmptyResponse() *Response {
|
||||
func (r *responseProducer) EmptyResponse() (*Response, error) {
|
||||
headsCopy := make([]string, len(r.tree.Heads()))
|
||||
copy(headsCopy, r.tree.Heads())
|
||||
snapshotPath, err := r.tree.SnapshotPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Response{
|
||||
Heads: headsCopy,
|
||||
SpaceId: r.spaceId,
|
||||
ObjectId: r.objectId,
|
||||
Root: r.tree.Header(),
|
||||
SnapshotPath: r.tree.SnapshotPath(),
|
||||
}
|
||||
SnapshotPath: snapshotPath,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -41,6 +41,13 @@ func NewSyncHandler(tree SyncTree, syncClient SyncClient, spaceId string) syncde
|
|||
}
|
||||
|
||||
func (s *syncHandler) HandleHeadUpdate(ctx context.Context, statusUpdater syncstatus.StatusUpdater, headUpdate drpc.Message) (req syncdeps.Request, err error) {
|
||||
var objectRequest *objectmessages.Request
|
||||
defer func() {
|
||||
// we mitigate the problem of a nil value being wrapped in an interface
|
||||
if err == nil && objectRequest != nil {
|
||||
req = objectRequest
|
||||
}
|
||||
}()
|
||||
update, ok := headUpdate.(*objectmessages.HeadUpdate)
|
||||
if !ok {
|
||||
return nil, ErrUnexpectedResponseType
|
||||
|
@ -73,7 +80,8 @@ func (s *syncHandler) HandleHeadUpdate(ctx context.Context, statusUpdater syncst
|
|||
return nil, nil
|
||||
}
|
||||
statusUpdater.HeadsApply(peerId, update.ObjectId(), contentUpdate.Heads, false)
|
||||
return s.syncClient.CreateFullSyncRequest(peerId, s.tree), nil
|
||||
objectRequest, err = s.syncClient.CreateFullSyncRequest(peerId, s.tree)
|
||||
return
|
||||
}
|
||||
rawChangesPayload := objecttree.RawChangesPayload{
|
||||
NewHeads: contentUpdate.Heads,
|
||||
|
@ -85,7 +93,8 @@ func (s *syncHandler) HandleHeadUpdate(ctx context.Context, statusUpdater syncst
|
|||
return nil, err
|
||||
}
|
||||
if !slice.UnsortedEquals(res.Heads, contentUpdate.Heads) {
|
||||
return s.syncClient.CreateFullSyncRequest(peerId, s.tree), nil
|
||||
objectRequest, err = s.syncClient.CreateFullSyncRequest(peerId, s.tree)
|
||||
return
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -119,10 +128,17 @@ func (s *syncHandler) HandleStreamRequest(ctx context.Context, rq syncdeps.Reque
|
|||
var returnReq syncdeps.Request
|
||||
if slice.UnsortedEquals(curHeads, request.Heads) || slice.ContainsSorted(request.Heads, curHeads) {
|
||||
if len(curHeads) != len(request.Heads) {
|
||||
returnReq = s.syncClient.CreateFullSyncRequest(rq.PeerId(), s.tree)
|
||||
returnReq, err = s.syncClient.CreateFullSyncRequest(rq.PeerId(), s.tree)
|
||||
if err != nil {
|
||||
s.tree.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
resp := producer.EmptyResponse()
|
||||
resp, err := producer.EmptyResponse()
|
||||
s.tree.Unlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
protoResp, err := resp.ProtoMessage()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -130,7 +146,11 @@ func (s *syncHandler) HandleStreamRequest(ctx context.Context, rq syncdeps.Reque
|
|||
return returnReq, send(protoResp)
|
||||
} else {
|
||||
if len(request.Heads) != 0 {
|
||||
returnReq = s.syncClient.CreateFullSyncRequest(rq.PeerId(), s.tree)
|
||||
returnReq, err = s.syncClient.CreateFullSyncRequest(rq.PeerId(), s.tree)
|
||||
if err != nil {
|
||||
s.tree.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
s.tree.Unlock()
|
||||
}
|
||||
|
|
|
@ -107,7 +107,7 @@ func TestSyncHandler_HeadUpdate(t *testing.T) {
|
|||
returnReq := &objectmessages.Request{
|
||||
Bytes: []byte("abcd"),
|
||||
}
|
||||
fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq)
|
||||
fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq, nil)
|
||||
req, err := fx.syncHandler.HandleHeadUpdate(ctx, fx.syncStatus, headUpdate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, returnReq, req)
|
||||
|
@ -174,7 +174,7 @@ func TestSyncHandler_HeadUpdate(t *testing.T) {
|
|||
returnReq := &objectmessages.Request{
|
||||
Bytes: []byte("abcd"),
|
||||
}
|
||||
fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq)
|
||||
fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq, nil)
|
||||
req, err := fx.syncHandler.HandleHeadUpdate(ctx, fx.syncStatus, headUpdate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, returnReq, req)
|
||||
|
@ -205,7 +205,7 @@ func TestSyncHandler_HandleStreamRequest(t *testing.T) {
|
|||
returnReq := &objectmessages.Request{
|
||||
Bytes: []byte("abcde"),
|
||||
}
|
||||
fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq)
|
||||
fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq, nil)
|
||||
fx.tree.EXPECT().Heads().Return([]string{"curHead"})
|
||||
resp := &response.Response{
|
||||
Heads: heads,
|
||||
|
@ -287,8 +287,8 @@ func TestSyncHandler_HandleStreamRequest(t *testing.T) {
|
|||
returnReq := &objectmessages.Request{
|
||||
Bytes: []byte("abcde"),
|
||||
}
|
||||
fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq)
|
||||
producer.EXPECT().EmptyResponse().Return(resp)
|
||||
fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq, nil)
|
||||
producer.EXPECT().EmptyResponse().Return(resp, nil)
|
||||
ctx = peer.CtxWithPeerId(ctx, "peerId")
|
||||
callCount := 0
|
||||
req, err := fx.syncHandler.HandleStreamRequest(ctx, request, testUpdater{}, func(resp proto.Message) error {
|
||||
|
@ -322,8 +322,8 @@ func TestSyncHandler_HandleStreamRequest(t *testing.T) {
|
|||
returnReq := &objectmessages.Request{
|
||||
Bytes: []byte("abcde"),
|
||||
}
|
||||
fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq)
|
||||
producer.EXPECT().EmptyResponse().Return(resp)
|
||||
fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq, nil)
|
||||
producer.EXPECT().EmptyResponse().Return(resp, nil)
|
||||
ctx = peer.CtxWithPeerId(ctx, "peerId")
|
||||
callCount := 0
|
||||
req, err := fx.syncHandler.HandleStreamRequest(ctx, request, testUpdater{}, func(resp proto.Message) error {
|
||||
|
|
|
@ -53,12 +53,14 @@ type SyncTree interface {
|
|||
type syncTree struct {
|
||||
syncdeps.ObjectSyncHandler
|
||||
objecttree.ObjectTree
|
||||
syncClient SyncClient
|
||||
syncStatus syncstatus.StatusUpdater
|
||||
listener updatelistener.UpdateListener
|
||||
onClose func(id string)
|
||||
isClosed bool
|
||||
isDeleted bool
|
||||
syncClient SyncClient
|
||||
syncStatus syncstatus.StatusUpdater
|
||||
listener updatelistener.UpdateListener
|
||||
statsCollector *TreeStatsCollector
|
||||
onClose func(id string)
|
||||
isClosed bool
|
||||
isDeleted bool
|
||||
buildTime time.Duration
|
||||
}
|
||||
|
||||
var log = logger.NewNamed("common.commonspace.synctree")
|
||||
|
@ -81,6 +83,7 @@ type BuildDeps struct {
|
|||
PeerGetter ResponsiblePeersGetter
|
||||
BuildObjectTree objecttree.BuildObjectTreeFunc
|
||||
ValidateObjectTree objecttree.ValidatorFunc
|
||||
StatsCollector *TreeStatsCollector
|
||||
}
|
||||
|
||||
var newTreeGetter = func(deps BuildDeps, treeId string) treeGetter {
|
||||
|
@ -112,17 +115,20 @@ func PutSyncTree(ctx context.Context, payload treestorage.TreeStorageCreatePaylo
|
|||
}
|
||||
|
||||
func buildSyncTree(ctx context.Context, peerId string, deps BuildDeps) (t SyncTree, err error) {
|
||||
buildStart := time.Now()
|
||||
objTree, err := deps.BuildObjectTree(deps.TreeStorage, deps.AclList)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
syncClient := deps.SyncClient
|
||||
syncTree := &syncTree{
|
||||
ObjectTree: objTree,
|
||||
syncClient: syncClient,
|
||||
onClose: deps.OnClose,
|
||||
listener: deps.Listener,
|
||||
syncStatus: deps.SyncStatus,
|
||||
ObjectTree: objTree,
|
||||
syncClient: syncClient,
|
||||
onClose: deps.OnClose,
|
||||
listener: deps.Listener,
|
||||
syncStatus: deps.SyncStatus,
|
||||
statsCollector: deps.StatsCollector,
|
||||
buildTime: time.Since(buildStart),
|
||||
}
|
||||
syncHandler := NewSyncHandler(syncTree, syncClient, deps.SpaceId)
|
||||
syncTree.ObjectSyncHandler = syncHandler
|
||||
|
@ -146,6 +152,9 @@ func buildSyncTree(ctx context.Context, peerId string, deps BuildDeps) (t SyncTr
|
|||
deps.SyncStatus.ObjectReceive(peerId, syncTree.Id(), syncTree.Heads())
|
||||
}
|
||||
}
|
||||
if syncTree.statsCollector != nil {
|
||||
syncTree.statsCollector.Register(syncTree)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -285,6 +294,11 @@ func (s *syncTree) Close() (err error) {
|
|||
}
|
||||
|
||||
func (s *syncTree) close() (err error) {
|
||||
defer func() {
|
||||
if s.statsCollector != nil {
|
||||
s.statsCollector.Unregister(s)
|
||||
}
|
||||
}()
|
||||
defer s.Unlock()
|
||||
defer func() {
|
||||
log.Debug("closed sync tree", zap.Error(err), zap.String("id", s.Id()))
|
||||
|
@ -311,7 +325,10 @@ func (s *syncTree) checkAlive() (err error) {
|
|||
func (s *syncTree) SyncWithPeer(ctx context.Context, p peer.Peer) (err error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
req := s.syncClient.CreateFullSyncRequest(p.Id(), s)
|
||||
req, err := s.syncClient.CreateFullSyncRequest(p.Id(), s)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return s.syncClient.QueueRequest(ctx, req)
|
||||
}
|
||||
|
||||
|
|
58
commonspace/object/tree/synctree/treestats.go
Normal file
58
commonspace/object/tree/synctree/treestats.go
Normal file
|
@ -0,0 +1,58 @@
|
|||
package synctree
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type TreeStatsCollector struct {
|
||||
trees map[string]*syncTree
|
||||
mutex sync.Mutex
|
||||
spaceId string
|
||||
}
|
||||
|
||||
func NewTreeStatsCollector(spaceId string) *TreeStatsCollector {
|
||||
return &TreeStatsCollector{
|
||||
trees: make(map[string]*syncTree),
|
||||
spaceId: spaceId,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TreeStatsCollector) Register(tree *syncTree) {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
t.trees[tree.Id()] = tree
|
||||
}
|
||||
|
||||
func (t *TreeStatsCollector) Collect() []TreeStats {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
stats := make([]TreeStats, 0, len(t.trees))
|
||||
for _, tree := range t.trees {
|
||||
tree.Lock()
|
||||
stats = append(stats, TreeStats{
|
||||
TreeLen: tree.Len(),
|
||||
SnapshotCounter: tree.Root().SnapshotCounter,
|
||||
Heads: tree.Heads(),
|
||||
ObjectId: tree.Id(),
|
||||
SpaceId: t.spaceId,
|
||||
BuildTimeMillis: int(tree.buildTime.Milliseconds()),
|
||||
})
|
||||
tree.Unlock()
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
func (t *TreeStatsCollector) Unregister(tree SyncTree) {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
delete(t.trees, tree.Id())
|
||||
}
|
||||
|
||||
type TreeStats struct {
|
||||
TreeLen int `json:"tree_len"`
|
||||
SnapshotCounter int `json:"snapshot_counter"`
|
||||
Heads []string `json:"heads"`
|
||||
ObjectId string `json:"object_id"`
|
||||
SpaceId string `json:"space_id"`
|
||||
BuildTimeMillis int `json:"build_time_millis"`
|
||||
}
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_updatelistener/mock_updatelistener.go github.com/anyproto/any-sync/commonspace/object/tree/synctree/updatelistener UpdateListener
|
||||
//
|
||||
|
||||
// Package mock_updatelistener is a generated GoMock package.
|
||||
package mock_updatelistener
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_treemanager/mock_treemanager.go github.com/anyproto/any-sync/commonspace/object/treemanager TreeManager
|
||||
//
|
||||
|
||||
// Package mock_treemanager is a generated GoMock package.
|
||||
package mock_treemanager
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_treesyncer/mock_treesyncer.go github.com/anyproto/any-sync/commonspace/object/treesyncer TreeSyncer
|
||||
//
|
||||
|
||||
// Package mock_treesyncer is a generated GoMock package.
|
||||
package mock_treesyncer
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_objectmanager/mock_objectmanager.go github.com/anyproto/any-sync/commonspace/objectmanager ObjectManager
|
||||
//
|
||||
|
||||
// Package mock_objectmanager is a generated GoMock package.
|
||||
package mock_objectmanager
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_objecttreebuilder/mock_objecttreebuilder.go github.com/anyproto/any-sync/commonspace/objecttreebuilder TreeBuilder
|
||||
//
|
||||
|
||||
// Package mock_objecttreebuilder is a generated GoMock package.
|
||||
package mock_objecttreebuilder
|
||||
|
||||
|
|
8
commonspace/objecttreebuilder/stat.go
Normal file
8
commonspace/objecttreebuilder/stat.go
Normal file
|
@ -0,0 +1,8 @@
|
|||
package objecttreebuilder
|
||||
|
||||
import "github.com/anyproto/any-sync/commonspace/object/tree/synctree"
|
||||
|
||||
type debugStat struct {
|
||||
TreeStats []synctree.TreeStats `json:"tree_stats"`
|
||||
SpaceId string `json:"space_id"`
|
||||
}
|
|
@ -9,6 +9,7 @@ import (
|
|||
"go.uber.org/zap"
|
||||
|
||||
"github.com/anyproto/any-sync/app"
|
||||
"github.com/anyproto/any-sync/app/debugstat"
|
||||
"github.com/anyproto/any-sync/app/logger"
|
||||
"github.com/anyproto/any-sync/commonspace/object/acl/list"
|
||||
"github.com/anyproto/any-sync/commonspace/object/acl/syncacl"
|
||||
|
@ -69,14 +70,47 @@ type treeBuilder struct {
|
|||
|
||||
log logger.CtxLogger
|
||||
builder objecttree.BuildObjectTreeFunc
|
||||
treeStats *synctree.TreeStatsCollector
|
||||
debugStat debugstat.StatService
|
||||
spaceId string
|
||||
aclList list.AclList
|
||||
treesUsed *atomic.Int32
|
||||
isClosed *atomic.Bool
|
||||
}
|
||||
|
||||
func (t *treeBuilder) ProvideStat() any {
|
||||
return debugStat{
|
||||
TreeStats: t.treeStats.Collect(),
|
||||
SpaceId: t.spaceId,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *treeBuilder) StatId() string {
|
||||
return t.spaceId
|
||||
}
|
||||
|
||||
func (t *treeBuilder) StatType() string {
|
||||
return CName
|
||||
}
|
||||
|
||||
func (t *treeBuilder) Run(ctx context.Context) (err error) {
|
||||
t.debugStat.AddProvider(t)
|
||||
return
|
||||
}
|
||||
|
||||
func (t *treeBuilder) Close(ctx context.Context) (err error) {
|
||||
t.debugStat.RemoveProvider(t)
|
||||
return
|
||||
}
|
||||
|
||||
func (t *treeBuilder) Init(a *app.App) (err error) {
|
||||
state := a.MustComponent(spacestate.CName).(*spacestate.SpaceState)
|
||||
comp, ok := a.Component(debugstat.CName).(debugstat.StatService)
|
||||
if !ok {
|
||||
comp = debugstat.NewNoOp()
|
||||
}
|
||||
t.treeStats = synctree.NewTreeStatsCollector(state.SpaceId)
|
||||
t.debugStat = comp
|
||||
t.spaceId = state.SpaceId
|
||||
t.isClosed = state.SpaceIsClosed
|
||||
t.treesUsed = state.TreesUsed
|
||||
|
@ -119,6 +153,7 @@ func (t *treeBuilder) BuildTree(ctx context.Context, id string, opts BuildTreeOp
|
|||
PeerGetter: t.peerManager,
|
||||
BuildObjectTree: treeBuilder,
|
||||
ValidateObjectTree: opts.TreeValidator,
|
||||
StatsCollector: t.treeStats,
|
||||
}
|
||||
t.treesUsed.Add(1)
|
||||
t.log.Debug("incrementing counter", zap.String("id", id), zap.Int32("trees", t.treesUsed.Load()))
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_peermanager/mock_peermanager.go github.com/anyproto/any-sync/commonspace/peermanager PeerManager
|
||||
//
|
||||
|
||||
// Package mock_peermanager is a generated GoMock package.
|
||||
package mock_peermanager
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_settingsstate/mock_settingsstate.go github.com/anyproto/any-sync/commonspace/settings/settingsstate StateBuilder,ChangeFactory
|
||||
//
|
||||
|
||||
// Package mock_settingsstate is a generated GoMock package.
|
||||
package mock_settingsstate
|
||||
|
||||
|
|
|
@ -13,8 +13,10 @@ import (
|
|||
"github.com/anyproto/any-sync/app"
|
||||
"github.com/anyproto/any-sync/commonspace/acl/aclclient"
|
||||
"github.com/anyproto/any-sync/commonspace/headsync"
|
||||
"github.com/anyproto/any-sync/commonspace/headsync/headstorage"
|
||||
"github.com/anyproto/any-sync/commonspace/object/acl/list"
|
||||
"github.com/anyproto/any-sync/commonspace/object/acl/syncacl"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces"
|
||||
"github.com/anyproto/any-sync/commonspace/object/treesyncer"
|
||||
"github.com/anyproto/any-sync/commonspace/objecttreebuilder"
|
||||
"github.com/anyproto/any-sync/commonspace/peermanager"
|
||||
|
@ -27,35 +29,8 @@ import (
|
|||
"github.com/anyproto/any-sync/commonspace/syncstatus"
|
||||
"github.com/anyproto/any-sync/net/peer"
|
||||
"github.com/anyproto/any-sync/net/streampool"
|
||||
"github.com/anyproto/any-sync/util/crypto"
|
||||
)
|
||||
|
||||
type SpaceCreatePayload struct {
|
||||
// SigningKey is the signing key of the owner
|
||||
SigningKey crypto.PrivKey
|
||||
// SpaceType is an arbitrary string
|
||||
SpaceType string
|
||||
// ReplicationKey is a key which is to be used to determine the node where the space should be held
|
||||
ReplicationKey uint64
|
||||
// SpacePayload is an arbitrary payload related to space type
|
||||
SpacePayload []byte
|
||||
// MasterKey is the master key of the owner
|
||||
MasterKey crypto.PrivKey
|
||||
// ReadKey is the first read key of space
|
||||
ReadKey crypto.SymKey
|
||||
// MetadataKey is the first metadata key of space
|
||||
MetadataKey crypto.PrivKey
|
||||
// Metadata is the metadata of the owner
|
||||
Metadata []byte
|
||||
}
|
||||
|
||||
type SpaceDerivePayload struct {
|
||||
SigningKey crypto.PrivKey
|
||||
MasterKey crypto.PrivKey
|
||||
SpaceType string
|
||||
SpacePayload []byte
|
||||
}
|
||||
|
||||
type SpaceDescription struct {
|
||||
SpaceHeader *spacesyncproto.RawSpaceHeaderWithId
|
||||
AclId string
|
||||
|
@ -82,6 +57,7 @@ type Space interface {
|
|||
AclClient() aclclient.AclSpaceClient
|
||||
SyncStatus() syncstatus.StatusUpdater
|
||||
Storage() spacestorage.SpaceStorage
|
||||
KeyValue() kvinterfaces.KeyValueService
|
||||
|
||||
DeleteTree(ctx context.Context, id string) (err error)
|
||||
GetNodePeers(ctx context.Context) (peer []peer.Peer, err error)
|
||||
|
@ -109,6 +85,7 @@ type space struct {
|
|||
settings settings.Settings
|
||||
storage spacestorage.SpaceStorage
|
||||
aclClient aclclient.AclSpaceClient
|
||||
keyValue kvinterfaces.KeyValueService
|
||||
aclList list.AclList
|
||||
creationTime time.Time
|
||||
}
|
||||
|
@ -145,8 +122,17 @@ func (s *space) StoredIds() []string {
|
|||
return s.headSync.ExternalIds()
|
||||
}
|
||||
|
||||
func (s *space) DebugAllHeads() []headsync.TreeHeads {
|
||||
return s.headSync.DebugAllHeads()
|
||||
func (s *space) DebugAllHeads() (heads []headsync.TreeHeads) {
|
||||
s.storage.HeadStorage().IterateEntries(context.Background(), headstorage.IterOpts{}, func(entry headstorage.HeadsEntry) (bool, error) {
|
||||
if entry.CommonSnapshot != "" {
|
||||
heads = append(heads, headsync.TreeHeads{
|
||||
Id: entry.Id,
|
||||
Heads: entry.Heads,
|
||||
})
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return heads
|
||||
}
|
||||
|
||||
func (s *space) DeleteTree(ctx context.Context, id string) (err error) {
|
||||
|
@ -211,6 +197,7 @@ func (s *space) Init(ctx context.Context) (err error) {
|
|||
s.streamPool = s.app.MustComponent(streampool.CName).(streampool.StreamPool)
|
||||
s.treeSyncer = s.app.MustComponent(treesyncer.CName).(treesyncer.TreeSyncer)
|
||||
s.aclClient = s.app.MustComponent(aclclient.CName).(aclclient.AclSpaceClient)
|
||||
s.keyValue = s.app.MustComponent(kvinterfaces.CName).(kvinterfaces.KeyValueService)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -218,6 +205,10 @@ func (s *space) SyncStatus() syncstatus.StatusUpdater {
|
|||
return s.syncStatus
|
||||
}
|
||||
|
||||
func (s *space) KeyValue() kvinterfaces.KeyValueService {
|
||||
return s.keyValue
|
||||
}
|
||||
|
||||
func (s *space) Storage() spacestorage.SpaceStorage {
|
||||
return s.storage
|
||||
}
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
package commonspace
|
||||
package spacepayloads
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"hash/fnv"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -21,6 +21,32 @@ import (
|
|||
"github.com/anyproto/any-sync/util/crypto"
|
||||
)
|
||||
|
||||
type SpaceCreatePayload struct {
|
||||
// SigningKey is the signing key of the owner
|
||||
SigningKey crypto.PrivKey
|
||||
// SpaceType is an arbitrary string
|
||||
SpaceType string
|
||||
// ReplicationKey is a key which is to be used to determine the node where the space should be held
|
||||
ReplicationKey uint64
|
||||
// SpacePayload is an arbitrary payload related to space type
|
||||
SpacePayload []byte
|
||||
// MasterKey is the master key of the owner
|
||||
MasterKey crypto.PrivKey
|
||||
// ReadKey is the first read key of space
|
||||
ReadKey crypto.SymKey
|
||||
// MetadataKey is the first metadata key of space
|
||||
MetadataKey crypto.PrivKey
|
||||
// Metadata is the metadata of the owner
|
||||
Metadata []byte
|
||||
}
|
||||
|
||||
type SpaceDerivePayload struct {
|
||||
SigningKey crypto.PrivKey
|
||||
MasterKey crypto.PrivKey
|
||||
SpaceType string
|
||||
SpacePayload []byte
|
||||
}
|
||||
|
||||
const (
|
||||
SpaceReserved = "any-sync.space"
|
||||
)
|
||||
|
@ -113,7 +139,7 @@ func StoragePayloadForSpaceCreate(payload SpaceCreatePayload) (storagePayload sp
|
|||
return
|
||||
}
|
||||
|
||||
func storagePayloadForSpaceDerive(payload SpaceDerivePayload) (storagePayload spacestorage.SpaceStorageCreatePayload, err error) {
|
||||
func StoragePayloadForSpaceDerive(payload SpaceDerivePayload) (storagePayload spacestorage.SpaceStorageCreatePayload, err error) {
|
||||
// marshalling keys
|
||||
identity, err := payload.SigningKey.GetPublic().Marshall()
|
||||
if err != nil {
|
||||
|
@ -192,7 +218,7 @@ func storagePayloadForSpaceDerive(payload SpaceDerivePayload) (storagePayload sp
|
|||
return
|
||||
}
|
||||
|
||||
func validateSpaceStorageCreatePayload(payload spacestorage.SpaceStorageCreatePayload) (err error) {
|
||||
func ValidateSpaceStorageCreatePayload(payload spacestorage.SpaceStorageCreatePayload) (err error) {
|
||||
err = ValidateSpaceHeader(payload.SpaceHeaderWithId, nil)
|
||||
if err != nil {
|
||||
return
|
||||
|
@ -328,3 +354,7 @@ func validateCreateSpaceSettingsPayload(rawWithId *treechangeproto.RawTreeChange
|
|||
|
||||
return
|
||||
}
|
||||
|
||||
func NewSpaceId(id string, repKey uint64) string {
|
||||
return strings.Join([]string{id, strconv.FormatUint(repKey, 36)}, ".")
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package commonspace
|
||||
package spacepayloads
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -7,6 +7,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/anyproto/any-sync/commonspace/object/accountdata"
|
||||
"github.com/anyproto/any-sync/commonspace/object/acl/aclrecordproto"
|
||||
"github.com/anyproto/any-sync/commonspace/object/tree/objecttree"
|
||||
|
@ -16,8 +19,6 @@ import (
|
|||
"github.com/anyproto/any-sync/consensus/consensusproto"
|
||||
"github.com/anyproto/any-sync/util/cidutil"
|
||||
"github.com/anyproto/any-sync/util/crypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSuccessHeaderPayloadForSpaceCreate(t *testing.T) {
|
||||
|
@ -438,7 +439,7 @@ func TestSuccessSameIds(t *testing.T) {
|
|||
SpaceHeaderWithId: rawHeaderWithId,
|
||||
SpaceSettingsWithId: rawSettingsPayload,
|
||||
}
|
||||
err = validateSpaceStorageCreatePayload(spacePayload)
|
||||
err = ValidateSpaceStorageCreatePayload(spacePayload)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -455,7 +456,7 @@ func TestFailWithAclWrongSpaceId(t *testing.T) {
|
|||
SpaceHeaderWithId: rawHeaderWithId,
|
||||
SpaceSettingsWithId: rawSettingsPayload,
|
||||
}
|
||||
err = validateSpaceStorageCreatePayload(spacePayload)
|
||||
err = ValidateSpaceStorageCreatePayload(spacePayload)
|
||||
assert.EqualErrorf(t, err, spacestorage.ErrIncorrectSpaceHeader.Error(), "Error should be: %v, got: %v", spacestorage.ErrIncorrectSpaceHeader, err)
|
||||
}
|
||||
|
||||
|
@ -472,7 +473,7 @@ func TestFailWithSettingsWrongSpaceId(t *testing.T) {
|
|||
SpaceHeaderWithId: rawHeaderWithId,
|
||||
SpaceSettingsWithId: rawSettingsPayload,
|
||||
}
|
||||
err = validateSpaceStorageCreatePayload(spacePayload)
|
||||
err = ValidateSpaceStorageCreatePayload(spacePayload)
|
||||
assert.EqualErrorf(t, err, spacestorage.ErrIncorrectSpaceHeader.Error(), "Error should be: %v, got: %v", spacestorage.ErrIncorrectSpaceHeader, err)
|
||||
}
|
||||
|
||||
|
@ -489,7 +490,7 @@ func TestFailWithWrongAclHeadIdInSettingsPayload(t *testing.T) {
|
|||
SpaceHeaderWithId: rawHeaderWithId,
|
||||
SpaceSettingsWithId: rawSettingsPayload,
|
||||
}
|
||||
err = validateSpaceStorageCreatePayload(spacePayload)
|
||||
err = ValidateSpaceStorageCreatePayload(spacePayload)
|
||||
assert.EqualErrorf(t, err, spacestorage.ErrIncorrectSpaceHeader.Error(), "Error should be: %v, got: %v", spacestorage.ErrIncorrectSpaceHeader, err)
|
||||
}
|
||||
|
|
@ -93,6 +93,16 @@ type RpcServer struct {
|
|||
sync.Mutex
|
||||
}
|
||||
|
||||
func (r *RpcServer) StoreDiff(ctx2 context.Context, request *spacesyncproto.StoreDiffRequest) (*spacesyncproto.StoreDiffResponse, error) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (r *RpcServer) StoreElements(stream spacesyncproto.DRPCSpaceSync_StoreElementsStream) error {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func NewRpcServer() *RpcServer {
|
||||
return &RpcServer{
|
||||
spaces: make(map[string]Space),
|
||||
|
|
|
@ -13,7 +13,10 @@ import (
|
|||
|
||||
"github.com/anyproto/any-sync/commonspace/acl/aclclient"
|
||||
"github.com/anyproto/any-sync/commonspace/deletionmanager"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage"
|
||||
"github.com/anyproto/any-sync/commonspace/object/treesyncer"
|
||||
"github.com/anyproto/any-sync/commonspace/spacepayloads"
|
||||
"github.com/anyproto/any-sync/commonspace/sync"
|
||||
"github.com/anyproto/any-sync/commonspace/sync/objectsync"
|
||||
"github.com/anyproto/any-sync/net"
|
||||
|
@ -58,16 +61,18 @@ type ctxKey int
|
|||
const AddSpaceCtxKey ctxKey = 0
|
||||
|
||||
type SpaceService interface {
|
||||
DeriveSpace(ctx context.Context, payload SpaceDerivePayload) (string, error)
|
||||
DeriveId(ctx context.Context, payload SpaceDerivePayload) (string, error)
|
||||
CreateSpace(ctx context.Context, payload SpaceCreatePayload) (string, error)
|
||||
DeriveSpace(ctx context.Context, payload spacepayloads.SpaceDerivePayload) (string, error)
|
||||
DeriveId(ctx context.Context, payload spacepayloads.SpaceDerivePayload) (string, error)
|
||||
CreateSpace(ctx context.Context, payload spacepayloads.SpaceCreatePayload) (string, error)
|
||||
NewSpace(ctx context.Context, id string, deps Deps) (sp Space, err error)
|
||||
app.Component
|
||||
}
|
||||
|
||||
type Deps struct {
|
||||
SyncStatus syncstatus.StatusUpdater
|
||||
TreeSyncer treesyncer.TreeSyncer
|
||||
SyncStatus syncstatus.StatusUpdater
|
||||
TreeSyncer treesyncer.TreeSyncer
|
||||
AccountService accountservice.Service
|
||||
Indexer keyvaluestorage.Indexer
|
||||
}
|
||||
|
||||
type spaceService struct {
|
||||
|
@ -100,8 +105,8 @@ func (s *spaceService) Name() (name string) {
|
|||
return CName
|
||||
}
|
||||
|
||||
func (s *spaceService) CreateSpace(ctx context.Context, payload SpaceCreatePayload) (id string, err error) {
|
||||
storageCreate, err := StoragePayloadForSpaceCreate(payload)
|
||||
func (s *spaceService) CreateSpace(ctx context.Context, payload spacepayloads.SpaceCreatePayload) (id string, err error) {
|
||||
storageCreate, err := spacepayloads.StoragePayloadForSpaceCreate(payload)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -116,8 +121,8 @@ func (s *spaceService) CreateSpace(ctx context.Context, payload SpaceCreatePaylo
|
|||
return store.Id(), store.Close(ctx)
|
||||
}
|
||||
|
||||
func (s *spaceService) DeriveId(ctx context.Context, payload SpaceDerivePayload) (id string, err error) {
|
||||
storageCreate, err := storagePayloadForSpaceDerive(payload)
|
||||
func (s *spaceService) DeriveId(ctx context.Context, payload spacepayloads.SpaceDerivePayload) (id string, err error) {
|
||||
storageCreate, err := spacepayloads.StoragePayloadForSpaceDerive(payload)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -125,8 +130,8 @@ func (s *spaceService) DeriveId(ctx context.Context, payload SpaceDerivePayload)
|
|||
return
|
||||
}
|
||||
|
||||
func (s *spaceService) DeriveSpace(ctx context.Context, payload SpaceDerivePayload) (id string, err error) {
|
||||
storageCreate, err := storagePayloadForSpaceDerive(payload)
|
||||
func (s *spaceService) DeriveSpace(ctx context.Context, payload spacepayloads.SpaceDerivePayload) (id string, err error) {
|
||||
storageCreate, err := spacepayloads.StoragePayloadForSpaceDerive(payload)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -176,13 +181,22 @@ func (s *spaceService) NewSpace(ctx context.Context, id string, deps Deps) (Spac
|
|||
return nil, err
|
||||
}
|
||||
spaceApp := s.app.ChildApp()
|
||||
if deps.AccountService != nil {
|
||||
spaceApp.Register(deps.AccountService)
|
||||
}
|
||||
var keyValueIndexer keyvaluestorage.Indexer = keyvaluestorage.NoOpIndexer{}
|
||||
if deps.Indexer != nil {
|
||||
keyValueIndexer = deps.Indexer
|
||||
}
|
||||
spaceApp.Register(state).
|
||||
Register(deps.SyncStatus).
|
||||
Register(peerManager).
|
||||
Register(st).
|
||||
Register(keyValueIndexer).
|
||||
Register(objectsync.New()).
|
||||
Register(sync.NewSyncService()).
|
||||
Register(syncacl.New()).
|
||||
Register(keyvalue.New()).
|
||||
Register(deletionstate.New()).
|
||||
Register(deletionmanager.New()).
|
||||
Register(settings.New()).
|
||||
|
@ -300,7 +314,7 @@ func (s *spaceService) spacePullWithPeer(ctx context.Context, p peer.Peer, id st
|
|||
}
|
||||
|
||||
func (s *spaceService) createSpaceStorage(ctx context.Context, payload spacestorage.SpaceStorageCreatePayload) (spacestorage.SpaceStorage, error) {
|
||||
err := validateSpaceStorageCreatePayload(payload)
|
||||
err := spacepayloads.ValidateSpaceStorageCreatePayload(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -190,7 +190,7 @@ func (s *spaceMigrator) migrateHash(ctx context.Context, oldStorage oldstorage.S
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return newStorage.StateStorage().SetHash(ctx, spaceHash)
|
||||
return newStorage.StateStorage().SetHash(ctx, spaceHash, spaceHash)
|
||||
}
|
||||
|
||||
func (s *spaceMigrator) checkMigrated(ctx context.Context, id string) (bool, spacestorage.SpaceStorage) {
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_spacestorage/mock_spacestorage.go github.com/anyproto/any-sync/commonspace/spacestorage SpaceStorage
|
||||
//
|
||||
|
||||
// Package mock_spacestorage is a generated GoMock package.
|
||||
package mock_spacestorage
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ type SpaceStorageProvider interface {
|
|||
CreateSpaceStorage(ctx context.Context, payload SpaceStorageCreatePayload) (SpaceStorage, error)
|
||||
}
|
||||
|
||||
func Create(ctx context.Context, store anystore.DB, payload SpaceStorageCreatePayload) (SpaceStorage, error) {
|
||||
func Create(ctx context.Context, store anystore.DB, payload SpaceStorageCreatePayload) (st SpaceStorage, err error) {
|
||||
spaceId := payload.SpaceHeaderWithId.Id
|
||||
state := statestorage.State{
|
||||
AclId: payload.AclWithId.Id,
|
||||
|
@ -60,7 +60,18 @@ func Create(ctx context.Context, store anystore.DB, payload SpaceStorageCreatePa
|
|||
SpaceId: payload.SpaceHeaderWithId.Id,
|
||||
SpaceHeader: payload.SpaceHeaderWithId.RawHeader,
|
||||
}
|
||||
changesColl, err := store.Collection(ctx, objecttree.CollName)
|
||||
tx, err := store.WriteTx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
changesColl, err := store.Collection(tx.Context(), objecttree.CollName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -68,27 +79,27 @@ func Create(ctx context.Context, store anystore.DB, payload SpaceStorageCreatePa
|
|||
Fields: []string{objecttree.TreeKey, objecttree.OrderKey},
|
||||
Unique: true,
|
||||
}
|
||||
err = changesColl.EnsureIndex(ctx, orderIdx)
|
||||
err = changesColl.EnsureIndex(tx.Context(), orderIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO: put it in one transaction
|
||||
stateStorage, err := statestorage.Create(ctx, state, store)
|
||||
stateStorage, err := statestorage.CreateTx(tx.Context(), state, store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headStorage, err := headstorage.New(ctx, store)
|
||||
headStorage, err := headstorage.New(tx.Context(), store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
aclStorage, err := list.CreateStorage(ctx, &consensusproto.RawRecordWithId{
|
||||
aclStorage, err := list.CreateStorageTx(tx.Context(), &consensusproto.RawRecordWithId{
|
||||
Payload: payload.AclWithId.Payload,
|
||||
Id: payload.AclWithId.Id,
|
||||
}, headStorage, store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = objecttree.CreateStorage(ctx, &treechangeproto.RawTreeChangeWithId{
|
||||
_, err = objecttree.CreateStorageTx(tx.Context(), &treechangeproto.RawTreeChangeWithId{
|
||||
RawChange: payload.SpaceSettingsWithId.RawChange,
|
||||
Id: payload.SpaceSettingsWithId.Id,
|
||||
}, headStorage, store)
|
||||
|
|
53
commonspace/spacestorage_test.go
Normal file
53
commonspace/spacestorage_test.go
Normal file
|
@ -0,0 +1,53 @@
|
|||
package commonspace
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
anystore "github.com/anyproto/any-store"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/anyproto/any-sync/commonspace/object/accountdata"
|
||||
"github.com/anyproto/any-sync/commonspace/spacepayloads"
|
||||
"github.com/anyproto/any-sync/commonspace/spacestorage"
|
||||
"github.com/anyproto/any-sync/util/crypto"
|
||||
)
|
||||
|
||||
func newStorageCreatePayload(t *testing.T) spacestorage.SpaceStorageCreatePayload {
|
||||
keys, err := accountdata.NewRandom()
|
||||
require.NoError(t, err)
|
||||
masterKey, _, err := crypto.GenerateRandomEd25519KeyPair()
|
||||
require.NoError(t, err)
|
||||
metaKey, _, err := crypto.GenerateRandomEd25519KeyPair()
|
||||
require.NoError(t, err)
|
||||
readKey := crypto.NewAES()
|
||||
meta := []byte("account")
|
||||
payload := spacepayloads.SpaceCreatePayload{
|
||||
SigningKey: keys.SignKey,
|
||||
SpaceType: "space",
|
||||
ReplicationKey: 10,
|
||||
SpacePayload: nil,
|
||||
MasterKey: masterKey,
|
||||
ReadKey: readKey,
|
||||
MetadataKey: metaKey,
|
||||
Metadata: meta,
|
||||
}
|
||||
createSpace, err := spacepayloads.StoragePayloadForSpaceCreate(payload)
|
||||
require.NoError(t, err)
|
||||
return createSpace
|
||||
}
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func TestCreateSpaceStorageFailed_EmptyStorage(t *testing.T) {
|
||||
payload := newStorageCreatePayload(t)
|
||||
store, err := anystore.Open(ctx, filepath.Join(t.TempDir(), "store.db"), nil)
|
||||
require.NoError(t, err)
|
||||
payload.SpaceSettingsWithId.RawChange = nil
|
||||
_, err = spacestorage.Create(ctx, store, payload)
|
||||
require.Error(t, err)
|
||||
collNames, err := store.GetCollectionNames(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, collNames)
|
||||
}
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_spacesyncproto/mock_spacesyncproto.go github.com/anyproto/any-sync/commonspace/spacesyncproto DRPCSpaceSyncClient
|
||||
//
|
||||
|
||||
// Package mock_spacesyncproto is a generated GoMock package.
|
||||
package mock_spacesyncproto
|
||||
|
||||
|
@ -174,3 +173,33 @@ func (mr *MockDRPCSpaceSyncClientMockRecorder) SpacePush(arg0, arg1 any) *gomock
|
|||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SpacePush", reflect.TypeOf((*MockDRPCSpaceSyncClient)(nil).SpacePush), arg0, arg1)
|
||||
}
|
||||
|
||||
// StoreDiff mocks base method.
|
||||
func (m *MockDRPCSpaceSyncClient) StoreDiff(arg0 context.Context, arg1 *spacesyncproto.StoreDiffRequest) (*spacesyncproto.StoreDiffResponse, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StoreDiff", arg0, arg1)
|
||||
ret0, _ := ret[0].(*spacesyncproto.StoreDiffResponse)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StoreDiff indicates an expected call of StoreDiff.
|
||||
func (mr *MockDRPCSpaceSyncClientMockRecorder) StoreDiff(arg0, arg1 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreDiff", reflect.TypeOf((*MockDRPCSpaceSyncClient)(nil).StoreDiff), arg0, arg1)
|
||||
}
|
||||
|
||||
// StoreElements mocks base method.
|
||||
func (m *MockDRPCSpaceSyncClient) StoreElements(arg0 context.Context) (spacesyncproto.DRPCSpaceSync_StoreElementsClient, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StoreElements", arg0)
|
||||
ret0, _ := ret[0].(spacesyncproto.DRPCSpaceSync_StoreElementsClient)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StoreElements indicates an expected call of StoreElements.
|
||||
func (mr *MockDRPCSpaceSyncClientMockRecorder) StoreElements(arg0 any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreElements", reflect.TypeOf((*MockDRPCSpaceSyncClient)(nil).StoreElements), arg0)
|
||||
}
|
||||
|
|
|
@ -20,6 +20,10 @@ enum ErrCodes {
|
|||
service SpaceSync {
|
||||
// HeadSync compares all objects and their hashes in a space
|
||||
rpc HeadSync(HeadSyncRequest) returns (HeadSyncResponse);
|
||||
// StoreDiff compares all objects and their hashes in a space
|
||||
rpc StoreDiff(StoreDiffRequest) returns (StoreDiffResponse);
|
||||
// StoreElements exchanges elements between peers
|
||||
rpc StoreElements(stream StoreKeyValue) returns (stream StoreKeyValue);
|
||||
// SpacePush sends new space to the node
|
||||
rpc SpacePush(SpacePushRequest) returns (SpacePushResponse);
|
||||
// SpacePull gets space from the remote peer
|
||||
|
@ -63,7 +67,7 @@ message HeadSyncResultElement {
|
|||
message HeadSyncRequest {
|
||||
string spaceId = 1;
|
||||
repeated HeadSyncRange ranges = 2;
|
||||
DiffType diffType = 3 [deprecated=true];
|
||||
DiffType diffType = 3;
|
||||
}
|
||||
|
||||
// HeadSyncResponse is a response for HeadSync
|
||||
|
@ -79,6 +83,7 @@ message ObjectSyncMessage {
|
|||
string replyId = 3;
|
||||
bytes payload = 4;
|
||||
string objectId = 5;
|
||||
ObjectType objectType = 6;
|
||||
}
|
||||
|
||||
// SpacePushRequest is a request to add space on a node containing only one acl record
|
||||
|
@ -144,6 +149,12 @@ message ObjectDelete {
|
|||
string id = 1;
|
||||
}
|
||||
|
||||
// StoreHeader is a header for a store
|
||||
message StoreHeader {
|
||||
string spaceId = 1;
|
||||
string storageName = 2;
|
||||
}
|
||||
|
||||
// SpaceDelete is a message containing deleter peer id
|
||||
message SpaceDelete {
|
||||
string deleterPeerId = 1;
|
||||
|
@ -196,8 +207,51 @@ message AclGetRecordsResponse {
|
|||
repeated bytes records = 1;
|
||||
}
|
||||
|
||||
message StoreDiffRequest {
|
||||
string spaceId = 1;
|
||||
repeated HeadSyncRange ranges = 2;
|
||||
}
|
||||
|
||||
message StoreDiffResponse {
|
||||
repeated HeadSyncResult results = 1;
|
||||
}
|
||||
|
||||
message StoreKeyValue {
|
||||
string keyPeerId = 1;
|
||||
bytes value = 2;
|
||||
bytes identitySignature = 3;
|
||||
bytes peerSignature = 4;
|
||||
string spaceId = 5;
|
||||
}
|
||||
|
||||
message StoreKeyValues {
|
||||
repeated StoreKeyValue keyValues = 1;
|
||||
}
|
||||
|
||||
message StoreKeyInner {
|
||||
bytes peer = 1;
|
||||
bytes identity = 2;
|
||||
bytes value = 3;
|
||||
int64 timestampMicro = 4;
|
||||
string aclHeadId = 5;
|
||||
string key = 6;
|
||||
}
|
||||
|
||||
message StorageHeader {
|
||||
string spaceId = 1;
|
||||
string storageName = 2;
|
||||
}
|
||||
|
||||
// DiffType is a type of diff
|
||||
enum DiffType {
|
||||
Initial = 0;
|
||||
Precalculated = 1;
|
||||
}
|
||||
V1 = 1;
|
||||
V2 = 2;
|
||||
}
|
||||
|
||||
// ObjectType is a type of object
|
||||
enum ObjectType {
|
||||
Tree = 0;
|
||||
Acl = 1;
|
||||
KeyValue = 2;
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -41,6 +41,8 @@ type DRPCSpaceSyncClient interface {
|
|||
DRPCConn() drpc.Conn
|
||||
|
||||
HeadSync(ctx context.Context, in *HeadSyncRequest) (*HeadSyncResponse, error)
|
||||
StoreDiff(ctx context.Context, in *StoreDiffRequest) (*StoreDiffResponse, error)
|
||||
StoreElements(ctx context.Context) (DRPCSpaceSync_StoreElementsClient, error)
|
||||
SpacePush(ctx context.Context, in *SpacePushRequest) (*SpacePushResponse, error)
|
||||
SpacePull(ctx context.Context, in *SpacePullRequest) (*SpacePullResponse, error)
|
||||
ObjectSyncStream(ctx context.Context) (DRPCSpaceSync_ObjectSyncStreamClient, error)
|
||||
|
@ -69,6 +71,54 @@ func (c *drpcSpaceSyncClient) HeadSync(ctx context.Context, in *HeadSyncRequest)
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcSpaceSyncClient) StoreDiff(ctx context.Context, in *StoreDiffRequest) (*StoreDiffResponse, error) {
|
||||
out := new(StoreDiffResponse)
|
||||
err := c.cc.Invoke(ctx, "/spacesync.SpaceSync/StoreDiff", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcSpaceSyncClient) StoreElements(ctx context.Context) (DRPCSpaceSync_StoreElementsClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, "/spacesync.SpaceSync/StoreElements", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &drpcSpaceSync_StoreElementsClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type DRPCSpaceSync_StoreElementsClient interface {
|
||||
drpc.Stream
|
||||
Send(*StoreKeyValue) error
|
||||
Recv() (*StoreKeyValue, error)
|
||||
}
|
||||
|
||||
type drpcSpaceSync_StoreElementsClient struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcSpaceSync_StoreElementsClient) GetStream() drpc.Stream {
|
||||
return x.Stream
|
||||
}
|
||||
|
||||
func (x *drpcSpaceSync_StoreElementsClient) Send(m *StoreKeyValue) error {
|
||||
return x.MsgSend(m, drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{})
|
||||
}
|
||||
|
||||
func (x *drpcSpaceSync_StoreElementsClient) Recv() (*StoreKeyValue, error) {
|
||||
m := new(StoreKeyValue)
|
||||
if err := x.MsgRecv(m, drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (x *drpcSpaceSync_StoreElementsClient) RecvMsg(m *StoreKeyValue) error {
|
||||
return x.MsgRecv(m, drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{})
|
||||
}
|
||||
|
||||
func (c *drpcSpaceSyncClient) SpacePush(ctx context.Context, in *SpacePushRequest) (*SpacePushResponse, error) {
|
||||
out := new(SpacePushResponse)
|
||||
err := c.cc.Invoke(ctx, "/spacesync.SpaceSync/SpacePush", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}, in, out)
|
||||
|
@ -195,6 +245,8 @@ func (c *drpcSpaceSyncClient) AclGetRecords(ctx context.Context, in *AclGetRecor
|
|||
|
||||
type DRPCSpaceSyncServer interface {
|
||||
HeadSync(context.Context, *HeadSyncRequest) (*HeadSyncResponse, error)
|
||||
StoreDiff(context.Context, *StoreDiffRequest) (*StoreDiffResponse, error)
|
||||
StoreElements(DRPCSpaceSync_StoreElementsStream) error
|
||||
SpacePush(context.Context, *SpacePushRequest) (*SpacePushResponse, error)
|
||||
SpacePull(context.Context, *SpacePullRequest) (*SpacePullResponse, error)
|
||||
ObjectSyncStream(DRPCSpaceSync_ObjectSyncStreamStream) error
|
||||
|
@ -210,6 +262,14 @@ func (s *DRPCSpaceSyncUnimplementedServer) HeadSync(context.Context, *HeadSyncRe
|
|||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCSpaceSyncUnimplementedServer) StoreDiff(context.Context, *StoreDiffRequest) (*StoreDiffResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCSpaceSyncUnimplementedServer) StoreElements(DRPCSpaceSync_StoreElementsStream) error {
|
||||
return drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCSpaceSyncUnimplementedServer) SpacePush(context.Context, *SpacePushRequest) (*SpacePushResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
@ -240,7 +300,7 @@ func (s *DRPCSpaceSyncUnimplementedServer) AclGetRecords(context.Context, *AclGe
|
|||
|
||||
type DRPCSpaceSyncDescription struct{}
|
||||
|
||||
func (DRPCSpaceSyncDescription) NumMethods() int { return 8 }
|
||||
func (DRPCSpaceSyncDescription) NumMethods() int { return 10 }
|
||||
|
||||
func (DRPCSpaceSyncDescription) Method(n int) (string, drpc.Encoding, drpc.Receiver, interface{}, bool) {
|
||||
switch n {
|
||||
|
@ -254,6 +314,23 @@ func (DRPCSpaceSyncDescription) Method(n int) (string, drpc.Encoding, drpc.Recei
|
|||
)
|
||||
}, DRPCSpaceSyncServer.HeadSync, true
|
||||
case 1:
|
||||
return "/spacesync.SpaceSync/StoreDiff", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCSpaceSyncServer).
|
||||
StoreDiff(
|
||||
ctx,
|
||||
in1.(*StoreDiffRequest),
|
||||
)
|
||||
}, DRPCSpaceSyncServer.StoreDiff, true
|
||||
case 2:
|
||||
return "/spacesync.SpaceSync/StoreElements", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return nil, srv.(DRPCSpaceSyncServer).
|
||||
StoreElements(
|
||||
&drpcSpaceSync_StoreElementsStream{in1.(drpc.Stream)},
|
||||
)
|
||||
}, DRPCSpaceSyncServer.StoreElements, true
|
||||
case 3:
|
||||
return "/spacesync.SpaceSync/SpacePush", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCSpaceSyncServer).
|
||||
|
@ -262,7 +339,7 @@ func (DRPCSpaceSyncDescription) Method(n int) (string, drpc.Encoding, drpc.Recei
|
|||
in1.(*SpacePushRequest),
|
||||
)
|
||||
}, DRPCSpaceSyncServer.SpacePush, true
|
||||
case 2:
|
||||
case 4:
|
||||
return "/spacesync.SpaceSync/SpacePull", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCSpaceSyncServer).
|
||||
|
@ -271,7 +348,7 @@ func (DRPCSpaceSyncDescription) Method(n int) (string, drpc.Encoding, drpc.Recei
|
|||
in1.(*SpacePullRequest),
|
||||
)
|
||||
}, DRPCSpaceSyncServer.SpacePull, true
|
||||
case 3:
|
||||
case 5:
|
||||
return "/spacesync.SpaceSync/ObjectSyncStream", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return nil, srv.(DRPCSpaceSyncServer).
|
||||
|
@ -279,7 +356,7 @@ func (DRPCSpaceSyncDescription) Method(n int) (string, drpc.Encoding, drpc.Recei
|
|||
&drpcSpaceSync_ObjectSyncStreamStream{in1.(drpc.Stream)},
|
||||
)
|
||||
}, DRPCSpaceSyncServer.ObjectSyncStream, true
|
||||
case 4:
|
||||
case 6:
|
||||
return "/spacesync.SpaceSync/ObjectSync", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCSpaceSyncServer).
|
||||
|
@ -288,7 +365,7 @@ func (DRPCSpaceSyncDescription) Method(n int) (string, drpc.Encoding, drpc.Recei
|
|||
in1.(*ObjectSyncMessage),
|
||||
)
|
||||
}, DRPCSpaceSyncServer.ObjectSync, true
|
||||
case 5:
|
||||
case 7:
|
||||
return "/spacesync.SpaceSync/ObjectSyncRequestStream", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return nil, srv.(DRPCSpaceSyncServer).
|
||||
|
@ -297,7 +374,7 @@ func (DRPCSpaceSyncDescription) Method(n int) (string, drpc.Encoding, drpc.Recei
|
|||
&drpcSpaceSync_ObjectSyncRequestStreamStream{in2.(drpc.Stream)},
|
||||
)
|
||||
}, DRPCSpaceSyncServer.ObjectSyncRequestStream, true
|
||||
case 6:
|
||||
case 8:
|
||||
return "/spacesync.SpaceSync/AclAddRecord", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCSpaceSyncServer).
|
||||
|
@ -306,7 +383,7 @@ func (DRPCSpaceSyncDescription) Method(n int) (string, drpc.Encoding, drpc.Recei
|
|||
in1.(*AclAddRecordRequest),
|
||||
)
|
||||
}, DRPCSpaceSyncServer.AclAddRecord, true
|
||||
case 7:
|
||||
case 9:
|
||||
return "/spacesync.SpaceSync/AclGetRecords", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCSpaceSyncServer).
|
||||
|
@ -340,6 +417,48 @@ func (x *drpcSpaceSync_HeadSyncStream) SendAndClose(m *HeadSyncResponse) error {
|
|||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCSpaceSync_StoreDiffStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*StoreDiffResponse) error
|
||||
}
|
||||
|
||||
type drpcSpaceSync_StoreDiffStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcSpaceSync_StoreDiffStream) SendAndClose(m *StoreDiffResponse) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCSpaceSync_StoreElementsStream interface {
|
||||
drpc.Stream
|
||||
Send(*StoreKeyValue) error
|
||||
Recv() (*StoreKeyValue, error)
|
||||
}
|
||||
|
||||
type drpcSpaceSync_StoreElementsStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcSpaceSync_StoreElementsStream) Send(m *StoreKeyValue) error {
|
||||
return x.MsgSend(m, drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{})
|
||||
}
|
||||
|
||||
func (x *drpcSpaceSync_StoreElementsStream) Recv() (*StoreKeyValue, error) {
|
||||
m := new(StoreKeyValue)
|
||||
if err := x.MsgRecv(m, drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (x *drpcSpaceSync_StoreElementsStream) RecvMsg(m *StoreKeyValue) error {
|
||||
return x.MsgRecv(m, drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{})
|
||||
}
|
||||
|
||||
type DRPCSpaceSync_SpacePushStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*SpacePushResponse) error
|
||||
|
|
|
@ -2,12 +2,14 @@ package commonspace
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
anystore "github.com/anyproto/any-store"
|
||||
"github.com/anyproto/go-chash"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
|
@ -26,6 +28,7 @@ import (
|
|||
"github.com/anyproto/any-sync/commonspace/object/treesyncer"
|
||||
"github.com/anyproto/any-sync/commonspace/objecttreebuilder"
|
||||
"github.com/anyproto/any-sync/commonspace/peermanager"
|
||||
"github.com/anyproto/any-sync/commonspace/spacepayloads"
|
||||
"github.com/anyproto/any-sync/commonspace/spacestorage"
|
||||
"github.com/anyproto/any-sync/commonspace/spacesyncproto"
|
||||
"github.com/anyproto/any-sync/commonspace/sync/objectsync/objectmessages"
|
||||
|
@ -751,7 +754,7 @@ func newMultiPeerFixture(t *testing.T, peerNum int) *multiPeerFixture {
|
|||
require.NoError(t, err)
|
||||
readKey := crypto.NewAES()
|
||||
meta := []byte("account")
|
||||
payload := SpaceCreatePayload{
|
||||
payload := spacepayloads.SpaceCreatePayload{
|
||||
SigningKey: keys.SignKey,
|
||||
SpaceType: "space",
|
||||
ReplicationKey: 10,
|
||||
|
@ -761,7 +764,7 @@ func newMultiPeerFixture(t *testing.T, peerNum int) *multiPeerFixture {
|
|||
MetadataKey: metaKey,
|
||||
Metadata: meta,
|
||||
}
|
||||
createSpace, err := StoragePayloadForSpaceCreate(payload)
|
||||
createSpace, err := spacepayloads.StoragePayloadForSpaceCreate(payload)
|
||||
require.NoError(t, err)
|
||||
executor := list.NewExternalKeysAclExecutor(createSpace.SpaceHeaderWithId.Id, keys, meta, createSpace.AclWithId)
|
||||
cmds := []string{
|
||||
|
@ -802,6 +805,9 @@ func newMultiPeerFixture(t *testing.T, peerNum int) *multiPeerFixture {
|
|||
err := listStorage.AddAll(ctx, []list.StorageRecord{
|
||||
{RawRecord: rec.Payload, Id: rec.Id, PrevId: prevRec, Order: i + 1, ChangeSize: len(rec.Payload)},
|
||||
})
|
||||
if errors.Is(err, anystore.ErrDocExists) {
|
||||
continue
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
@ -829,7 +835,7 @@ func Test_Sync(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
state, err := sp.Storage().StateStorage().GetState(context.Background())
|
||||
require.NoError(t, err)
|
||||
hashes = append(hashes, state.Hash)
|
||||
hashes = append(hashes, state.NewHash)
|
||||
}
|
||||
for i := 1; i < len(hashes); i++ {
|
||||
require.Equal(t, hashes[0], hashes[i])
|
||||
|
|
|
@ -39,6 +39,7 @@ type InnerHeadUpdate interface {
|
|||
Prepare() error
|
||||
Heads() []string
|
||||
MsgSize() uint64
|
||||
ObjectType() spacesyncproto.ObjectType
|
||||
}
|
||||
|
||||
type ObjectMeta struct {
|
||||
|
@ -48,10 +49,11 @@ type ObjectMeta struct {
|
|||
}
|
||||
|
||||
type HeadUpdate struct {
|
||||
Meta ObjectMeta
|
||||
Bytes []byte
|
||||
Update InnerHeadUpdate
|
||||
msg *spacesyncproto.ObjectSyncMessage
|
||||
Meta ObjectMeta
|
||||
Bytes []byte
|
||||
Update InnerHeadUpdate
|
||||
objectType spacesyncproto.ObjectType
|
||||
msg *spacesyncproto.ObjectSyncMessage
|
||||
}
|
||||
|
||||
func (h *HeadUpdate) MsgSize() uint64 {
|
||||
|
@ -84,6 +86,7 @@ func (h *HeadUpdate) SetProtoMessage(message proto.Message) error {
|
|||
h.Bytes = msg.GetPayload()
|
||||
h.Meta.SpaceId = msg.SpaceId
|
||||
h.Meta.ObjectId = msg.ObjectId
|
||||
h.objectType = msg.GetObjectType()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -94,14 +97,19 @@ func (h *HeadUpdate) ProtoMessage() (proto.Message, error) {
|
|||
return nil, err
|
||||
}
|
||||
return &spacesyncproto.ObjectSyncMessage{
|
||||
SpaceId: h.Meta.SpaceId,
|
||||
Payload: payload,
|
||||
ObjectId: h.Meta.ObjectId,
|
||||
SpaceId: h.Meta.SpaceId,
|
||||
Payload: payload,
|
||||
ObjectId: h.Meta.ObjectId,
|
||||
ObjectType: h.Update.ObjectType(),
|
||||
}, nil
|
||||
}
|
||||
return NewMessage(), nil
|
||||
}
|
||||
|
||||
func (h *HeadUpdate) ObjectType() spacesyncproto.ObjectType {
|
||||
return h.objectType
|
||||
}
|
||||
|
||||
func (h *HeadUpdate) SpaceId() string {
|
||||
return h.Meta.SpaceId
|
||||
}
|
||||
|
@ -116,9 +124,10 @@ func (h *HeadUpdate) ObjectId() string {
|
|||
|
||||
func (h *HeadUpdate) Copy() drpc.Message {
|
||||
return &HeadUpdate{
|
||||
Meta: h.Meta,
|
||||
Bytes: h.Bytes,
|
||||
Update: h.Update,
|
||||
msg: h.msg,
|
||||
Meta: h.Meta,
|
||||
Bytes: h.Bytes,
|
||||
Update: h.Update,
|
||||
msg: h.msg,
|
||||
objectType: h.objectType,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,6 +10,8 @@ import (
|
|||
"go.uber.org/mock/gomock"
|
||||
|
||||
"github.com/anyproto/any-sync/app"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces/mock_kvinterfaces"
|
||||
"github.com/anyproto/any-sync/commonspace/object/tree/synctree"
|
||||
"github.com/anyproto/any-sync/commonspace/object/tree/treechangeproto"
|
||||
"github.com/anyproto/any-sync/commonspace/object/treemanager"
|
||||
|
@ -159,6 +161,7 @@ func TestObjectSync_ApplyRequest(t *testing.T) {
|
|||
type fixture struct {
|
||||
*objectSync
|
||||
objectManager *mock_objectmanager.MockObjectManager
|
||||
keyValue *mock_kvinterfaces.MockKeyValueService
|
||||
pool *mock_pool.MockService
|
||||
a *app.App
|
||||
ctrl *gomock.Controller
|
||||
|
@ -171,13 +174,16 @@ func newFixture(t *testing.T) *fixture {
|
|||
fx.ctrl = gomock.NewController(t)
|
||||
fx.objectManager = mock_objectmanager.NewMockObjectManager(fx.ctrl)
|
||||
fx.pool = mock_pool.NewMockService(fx.ctrl)
|
||||
fx.keyValue = mock_kvinterfaces.NewMockKeyValueService(fx.ctrl)
|
||||
anymock.ExpectComp(fx.objectManager.EXPECT(), treemanager.CName)
|
||||
anymock.ExpectComp(fx.pool.EXPECT(), pool.CName)
|
||||
anymock.ExpectComp(fx.keyValue.EXPECT(), kvinterfaces.CName)
|
||||
fx.objectSync = &objectSync{}
|
||||
spaceState := &spacestate.SpaceState{SpaceId: "spaceId"}
|
||||
fx.a.Register(fx.objectManager).
|
||||
Register(spaceState).
|
||||
Register(fx.pool).
|
||||
Register(fx.keyValue).
|
||||
Register(syncstatus.NewNoOpSyncStatus()).
|
||||
Register(fx.objectSync)
|
||||
require.NoError(t, fx.a.Start(context.Background()))
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
"github.com/anyproto/any-sync/app"
|
||||
"github.com/anyproto/any-sync/app/logger"
|
||||
"github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces"
|
||||
"github.com/anyproto/any-sync/commonspace/object/tree/synctree"
|
||||
"github.com/anyproto/any-sync/commonspace/object/tree/treechangeproto"
|
||||
"github.com/anyproto/any-sync/commonspace/object/treemanager"
|
||||
|
@ -30,10 +31,11 @@ var ErrUnexpectedHeadUpdateType = errors.New("unexpected head update type")
|
|||
var log = logger.NewNamed(syncdeps.CName)
|
||||
|
||||
type objectSync struct {
|
||||
spaceId string
|
||||
pool pool.Service
|
||||
manager objectmanager.ObjectManager
|
||||
status syncstatus.StatusUpdater
|
||||
spaceId string
|
||||
pool pool.Service
|
||||
manager objectmanager.ObjectManager
|
||||
status syncstatus.StatusUpdater
|
||||
keyValue kvinterfaces.KeyValueService
|
||||
}
|
||||
|
||||
func New() syncdeps.SyncHandler {
|
||||
|
@ -43,6 +45,7 @@ func New() syncdeps.SyncHandler {
|
|||
func (o *objectSync) Init(a *app.App) (err error) {
|
||||
o.manager = a.MustComponent(treemanager.CName).(objectmanager.ObjectManager)
|
||||
o.pool = a.MustComponent(pool.CName).(pool.Service)
|
||||
o.keyValue = a.MustComponent(kvinterfaces.CName).(kvinterfaces.KeyValueService)
|
||||
o.status = a.MustComponent(syncstatus.CName).(syncstatus.StatusUpdater)
|
||||
o.spaceId = a.MustComponent(spacestate.CName).(*spacestate.SpaceState).SpaceId
|
||||
return
|
||||
|
@ -57,6 +60,9 @@ func (o *objectSync) HandleHeadUpdate(ctx context.Context, headUpdate drpc.Messa
|
|||
if !ok {
|
||||
return nil, ErrUnexpectedHeadUpdateType
|
||||
}
|
||||
if update.ObjectType() == spacesyncproto.ObjectType_KeyValue {
|
||||
return nil, o.keyValue.HandleMessage(ctx, update)
|
||||
}
|
||||
peerId, err := peer.CtxPeerId(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -386,7 +386,12 @@ func (r *testRequest) MsgSize() uint64 {
|
|||
}
|
||||
|
||||
type testMessage struct {
|
||||
objectId string
|
||||
objectId string
|
||||
objectType spacesyncproto.ObjectType
|
||||
}
|
||||
|
||||
func (t *testMessage) ObjectType() spacesyncproto.ObjectType {
|
||||
return t.objectType
|
||||
}
|
||||
|
||||
func (t *testMessage) ObjectId() string {
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package syncdeps
|
||||
|
||||
import "github.com/anyproto/any-sync/commonspace/spacesyncproto"
|
||||
|
||||
type Message interface {
|
||||
ObjectId() string
|
||||
MsgSize() uint64
|
||||
ObjectType() spacesyncproto.ObjectType
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_syncdeps/mock_syncdeps.go github.com/anyproto/any-sync/commonspace/sync/syncdeps ObjectSyncHandler,RequestSender,ResponseCollector
|
||||
//
|
||||
|
||||
// Package mock_syncdeps is a generated GoMock package.
|
||||
package mock_syncdeps
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_syncstatus/mock_syncstatus.go github.com/anyproto/any-sync/commonspace/syncstatus StatusUpdater
|
||||
//
|
||||
|
||||
// Package mock_syncstatus is a generated GoMock package.
|
||||
package mock_syncstatus
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_consensusclient/mock_consensusclient.go github.com/anyproto/any-sync/consensus/consensusclient Service
|
||||
//
|
||||
|
||||
// Package mock_consensusclient is a generated GoMock package.
|
||||
package mock_consensusclient
|
||||
|
||||
|
|
10
go.mod
10
go.mod
|
@ -6,7 +6,7 @@ toolchain go1.23.5
|
|||
|
||||
require (
|
||||
filippo.io/edwards25519 v1.1.0
|
||||
github.com/anyproto/any-store v0.1.8
|
||||
github.com/anyproto/any-store v0.1.11
|
||||
github.com/anyproto/go-chash v0.1.0
|
||||
github.com/anyproto/go-slip10 v1.0.0
|
||||
github.com/anyproto/go-slip21 v1.0.0
|
||||
|
@ -21,7 +21,7 @@ require (
|
|||
github.com/google/uuid v1.6.0
|
||||
github.com/hashicorp/yamux v0.1.2
|
||||
github.com/huandu/skiplist v1.2.1
|
||||
github.com/ipfs/boxo v0.29.0
|
||||
github.com/ipfs/boxo v0.29.1
|
||||
github.com/ipfs/go-block-format v0.2.0
|
||||
github.com/ipfs/go-cid v0.5.0
|
||||
github.com/ipfs/go-ipld-format v0.6.0
|
||||
|
@ -30,7 +30,7 @@ require (
|
|||
github.com/multiformats/go-multibase v0.2.0
|
||||
github.com/multiformats/go-multihash v0.2.3
|
||||
github.com/prometheus/client_golang v1.21.1
|
||||
github.com/quic-go/quic-go v0.50.0
|
||||
github.com/quic-go/quic-go v0.50.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/tyler-smith/go-bip39 v1.1.0
|
||||
github.com/zeebo/blake3 v0.2.4
|
||||
|
@ -38,7 +38,7 @@ require (
|
|||
go.uber.org/mock v0.5.0
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/crypto v0.36.0
|
||||
golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394
|
||||
golang.org/x/net v0.37.0
|
||||
golang.org/x/sys v0.31.0
|
||||
golang.org/x/time v0.11.0
|
||||
|
@ -116,6 +116,6 @@ require (
|
|||
modernc.org/libc v1.61.13 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.8.2 // indirect
|
||||
modernc.org/sqlite v1.36.0 // indirect
|
||||
modernc.org/sqlite v1.36.1 // indirect
|
||||
zombiezen.com/go/sqlite v1.4.0 // indirect
|
||||
)
|
||||
|
|
24
go.sum
24
go.sum
|
@ -6,8 +6,8 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
|
|||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0=
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
|
||||
github.com/anyproto/any-store v0.1.8 h1:/bxUVq6sBTwYkmPL2g1xUAWNb3axF+zPhP2dvdEBH68=
|
||||
github.com/anyproto/any-store v0.1.8/go.mod h1:GpnVhcGm5aUQtOwCnKeTt4jsWgVXZ773WbQVLFdeCFo=
|
||||
github.com/anyproto/any-store v0.1.11 h1:xoaDVF8FJEI6V37fMw/R3ptBCLHj0kYiImwWxC1Ryu8=
|
||||
github.com/anyproto/any-store v0.1.11/go.mod h1:X3UkQ2zLATYNED3gFhY2VcdfDOeJvpEQ0PmDO90A9Yo=
|
||||
github.com/anyproto/go-chash v0.1.0 h1:I9meTPjXFRfXZHRJzjOHC/XF7Q5vzysKkiT/grsogXY=
|
||||
github.com/anyproto/go-chash v0.1.0/go.mod h1:0UjNQi3PDazP0fINpFYu6VKhuna+W/V+1vpXHAfNgLY=
|
||||
github.com/anyproto/go-slip10 v1.0.0 h1:uAEtSuudR3jJBOfkOXf3bErxVoxbuKwdoJN55M1i6IA=
|
||||
|
@ -77,8 +77,8 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD
|
|||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gammazero/chanqueue v1.0.0 h1:FER/sMailGFA3DDvFooEkipAMU+3c9Bg3bheloPSz6o=
|
||||
github.com/gammazero/chanqueue v1.0.0/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc=
|
||||
github.com/gammazero/chanqueue v1.1.0 h1:yiwtloc1azhgGLFo2gMloJtQvkYD936Ai7tBfa+rYJw=
|
||||
github.com/gammazero/chanqueue v1.1.0/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc=
|
||||
github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34=
|
||||
github.com/gammazero/deque v1.0.0/go.mod h1:iflpYvtGfM3U8S8j+sZEKIak3SAKYpA5/SQewgfXDKo=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
|
@ -120,8 +120,8 @@ github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
|||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
|
||||
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
|
||||
github.com/ipfs/boxo v0.29.0 h1:clzd7PglUcE+Ufq1KucS3aKID7pzGVaSgcdRsW395t4=
|
||||
github.com/ipfs/boxo v0.29.0/go.mod h1:c3R52nMlgMsN1tADffYcogKoVRsX1RJE1TMYSpJ4uVs=
|
||||
github.com/ipfs/boxo v0.29.1 h1:z61ZT4YDfTHLjXTsu/+3wvJ8aJlExthDSOCpx6Nh8xc=
|
||||
github.com/ipfs/boxo v0.29.1/go.mod h1:MkDJStXiJS9U99cbAijHdcmwNfVn5DKYBmQCOgjY2NU=
|
||||
github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA=
|
||||
github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU=
|
||||
github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs=
|
||||
|
@ -293,8 +293,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg
|
|||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
|
||||
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
|
||||
github.com/quic-go/quic-go v0.50.0 h1:3H/ld1pa3CYhkcc20TPIyG1bNsdhn9qZBGN3b9/UyUo=
|
||||
github.com/quic-go/quic-go v0.50.0/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E=
|
||||
github.com/quic-go/quic-go v0.50.1 h1:unsgjFIUqW8a2oopkY7YNONpV1gYND6Nt9hnt1PN94Q=
|
||||
github.com/quic-go/quic-go v0.50.1/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E=
|
||||
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg=
|
||||
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
|
@ -379,8 +379,8 @@ golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa h1:t2QcU6V556bFjYgu4L6C+6VrCPyJZ+eyRsABUPs1mz4=
|
||||
golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk=
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw=
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM=
|
||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.21.0 h1:c5qV36ajHpdj4Qi0GnE0jUc/yuo33OLFaa0d+crTD5s=
|
||||
golang.org/x/image v0.21.0/go.mod h1:vUbsLavqK/W303ZroQQVKQ+Af3Yl6Uz1Ppu5J/cLz78=
|
||||
|
@ -475,8 +475,8 @@ modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
|||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.36.0 h1:EQXNRn4nIS+gfsKeUTymHIz1waxuv5BzU7558dHSfH8=
|
||||
modernc.org/sqlite v1.36.0/go.mod h1:7MPwH7Z6bREicF9ZVUR78P1IKuxfZ8mRIDHD0iD+8TU=
|
||||
modernc.org/sqlite v1.36.1 h1:bDa8BJUH4lg6EGkLbahKe/8QqoF8p9gArSc6fTqYhyQ=
|
||||
modernc.org/sqlite v1.36.1/go.mod h1:7MPwH7Z6bREicF9ZVUR78P1IKuxfZ8mRIDHD0iD+8TU=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination=mock/mock_nameserviceclient.go -package=mock_nameserviceclient github.com/anyproto/any-sync/nameservice/nameserviceclient AnyNsClientService
|
||||
//
|
||||
|
||||
// Package mock_nameserviceclient is a generated GoMock package.
|
||||
package mock_nameserviceclient
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
//
|
||||
// mockgen -destination mock_peer/mock_peer.go github.com/anyproto/any-sync/net/peer Peer
|
||||
//
|
||||
|
||||
// Package mock_peer is a generated GoMock package.
|
||||
package mock_peer
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue