diff --git a/accountservice/mock_accountservice/mock_accountservice.go b/accountservice/mock_accountservice/mock_accountservice.go index f3a0e72f..632a9cb7 100644 --- a/accountservice/mock_accountservice/mock_accountservice.go +++ b/accountservice/mock_accountservice/mock_accountservice.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_accountservice/mock_accountservice.go github.com/anyproto/any-sync/accountservice Service // - // Package mock_accountservice is a generated GoMock package. package mock_accountservice diff --git a/acl/mock_acl/mock_acl.go b/acl/mock_acl/mock_acl.go index 14db24ca..807e66d0 100644 --- a/acl/mock_acl/mock_acl.go +++ b/acl/mock_acl/mock_acl.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_acl/mock_acl.go github.com/anyproto/any-sync/acl AclService // - // Package mock_acl is a generated GoMock package. package mock_acl diff --git a/app/debugstat/mock_debugstat/mock_debugstat.go b/app/debugstat/mock_debugstat/mock_debugstat.go index f89757ae..12c7701c 100644 --- a/app/debugstat/mock_debugstat/mock_debugstat.go +++ b/app/debugstat/mock_debugstat/mock_debugstat.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_debugstat/mock_debugstat.go github.com/anyproto/any-sync/app/debugstat StatService // - // Package mock_debugstat is a generated GoMock package. package mock_debugstat diff --git a/app/ldiff/diff.go b/app/ldiff/diff.go index 589004c8..83b4c214 100644 --- a/app/ldiff/diff.go +++ b/app/ldiff/diff.go @@ -1,7 +1,7 @@ // Package ldiff provides a container of elements with fixed id and changeable content. // Diff can calculate the difference with another diff container (you can make it remote) with minimum hops and traffic. // -//go:generate mockgen -destination mock_ldiff/mock_ldiff.go github.com/anyproto/any-sync/app/ldiff Diff,Remote +//go:generate mockgen -destination mock_ldiff/mock_ldiff.go github.com/anyproto/any-sync/app/ldiff Diff,Remote,DiffContainer package ldiff import ( @@ -15,6 +15,8 @@ import ( "github.com/cespare/xxhash" "github.com/huandu/skiplist" "github.com/zeebo/blake3" + + "github.com/anyproto/any-sync/commonspace/spacesyncproto" ) // Diff contains elements and can compare it with Remote diff @@ -36,6 +38,13 @@ type Diff interface { Hash() string // Len returns count of elements in the diff Len() int + // DiffType returns the diff type (diff logic and parameters) + DiffType() spacesyncproto.DiffType +} + +type CompareDiff interface { + CompareDiff(ctx context.Context, dl Remote) (newIds, ourChangedIds, theirChangedIds, removedIds []string, err error) + Diff } // New creates precalculated Diff container @@ -141,6 +150,10 @@ func (d *diff) Compare(lhs, rhs interface{}) int { } } +func (d *diff) DiffType() spacesyncproto.DiffType { + return spacesyncproto.DiffType_V2 +} + // CalcScore implements skiplist interface func (d *diff) CalcScore(key interface{}) float64 { return 0 @@ -237,11 +250,10 @@ func (d *diff) getRange(r Range) (rr RangeResult) { if rng != nil { rr.Hash = rng.hash rr.Count = rng.elements - if !r.Elements && rng.isDivided { + if !r.Elements { return } } - el := d.sl.Find(&element{hash: r.From}) rr.Elements = make([]Element, 0, d.divideFactor) for el != nil && el.Key().(*element).hash <= r.To { @@ -266,17 +278,18 @@ func (d *diff) Ranges(ctx context.Context, ranges []Range, resBuf []RangeResult) } type diffCtx struct { - newIds, changedIds, removedIds []string + newIds, changedIds, theirChangedIds, removedIds []string toSend, prepare []Range myRes, otherRes []RangeResult + compareFunc func(dctx *diffCtx, my, other []Element) } var errMismatched = errors.New("query and results mismatched") // Diff makes diff with remote container func (d *diff) Diff(ctx context.Context, dl Remote) (newIds, changedIds, removedIds []string, err error) { - dctx := &diffCtx{} + dctx := &diffCtx{compareFunc: d.compareElementsEqual} dctx.toSend = append(dctx.toSend, Range{ From: 0, To: math.MaxUint64, @@ -307,6 +320,38 @@ func (d *diff) Diff(ctx context.Context, dl Remote) (newIds, changedIds, removed return dctx.newIds, dctx.changedIds, dctx.removedIds, nil } +func (d *diff) CompareDiff(ctx context.Context, dl Remote) (newIds, ourChangedIds, theirChangedIds, removedIds []string, err error) { + dctx := &diffCtx{compareFunc: d.compareElementsGreater} + dctx.toSend = append(dctx.toSend, Range{ + From: 0, + To: math.MaxUint64, + }) + for len(dctx.toSend) > 0 { + select { + case <-ctx.Done(): + err = ctx.Err() + return + default: + } + if dctx.otherRes, err = dl.Ranges(ctx, dctx.toSend, dctx.otherRes); err != nil { + return + } + if dctx.myRes, err = d.Ranges(ctx, dctx.toSend, dctx.myRes); err != nil { + return + } + if len(dctx.otherRes) != len(dctx.toSend) || len(dctx.myRes) != len(dctx.toSend) { + err = errMismatched + return + } + for i, r := range dctx.toSend { + d.compareResults(dctx, r, dctx.myRes[i], dctx.otherRes[i]) + } + dctx.toSend, dctx.prepare = dctx.prepare, dctx.toSend + dctx.prepare = dctx.prepare[:0] + } + return dctx.newIds, dctx.changedIds, dctx.theirChangedIds, dctx.removedIds, nil +} + func (d *diff) compareResults(dctx *diffCtx, r Range, myRes, otherRes RangeResult) { // both hash equals - do nothing if bytes.Equal(myRes.Hash, otherRes.Hash) { @@ -316,15 +361,14 @@ func (d *diff) compareResults(dctx *diffCtx, r Range, myRes, otherRes RangeResul // other has elements if len(otherRes.Elements) == otherRes.Count { if len(myRes.Elements) == myRes.Count { - d.compareElements(dctx, myRes.Elements, otherRes.Elements) + dctx.compareFunc(dctx, myRes.Elements, otherRes.Elements) } else { r.Elements = true - d.compareElements(dctx, d.getRange(r).Elements, otherRes.Elements) + dctx.compareFunc(dctx, d.getRange(r).Elements, otherRes.Elements) } return } - // request all elements from other, because we don't have enough - if len(myRes.Elements) == myRes.Count { + if otherRes.Count <= d.compareThreshold && len(otherRes.Elements) == 0 || len(myRes.Elements) == myRes.Count { r.Elements = true dctx.prepare = append(dctx.prepare, r) return @@ -336,7 +380,7 @@ func (d *diff) compareResults(dctx *diffCtx, r Range, myRes, otherRes RangeResul return } -func (d *diff) compareElements(dctx *diffCtx, my, other []Element) { +func (d *diff) compareElementsEqual(dctx *diffCtx, my, other []Element) { find := func(list []Element, targetEl Element) (has, eq bool) { for _, el := range list { if el.Id == targetEl.Id { @@ -364,3 +408,40 @@ func (d *diff) compareElements(dctx *diffCtx, my, other []Element) { } } } + +func (d *diff) compareElementsGreater(dctx *diffCtx, my, other []Element) { + find := func(list []Element, targetEl Element) (has, equal, greater bool) { + for _, el := range list { + if el.Id == targetEl.Id { + if el.Head == targetEl.Head { + return true, true, false + } + return true, false, el.Head > targetEl.Head + } + } + return false, false, false + } + + for _, el := range my { + has, eq, theirGreater := find(other, el) + if !has { + dctx.removedIds = append(dctx.removedIds, el.Id) + continue + } else { + if eq { + continue + } + if theirGreater { + dctx.theirChangedIds = append(dctx.theirChangedIds, el.Id) + } else { + dctx.changedIds = append(dctx.changedIds, el.Id) + } + } + } + + for _, el := range other { + if has, _, _ := find(my, el); !has { + dctx.newIds = append(dctx.newIds, el.Id) + } + } +} diff --git a/app/ldiff/diff_test.go b/app/ldiff/diff_test.go index d572647f..9e651526 100644 --- a/app/ldiff/diff_test.go +++ b/app/ldiff/diff_test.go @@ -139,6 +139,35 @@ func TestDiff_Diff(t *testing.T) { assert.Len(t, changedIds, length/2) assert.Len(t, removedIds, 0) }) + t.Run("compare diff", func(t *testing.T) { + d1 := New(16, 128).(CompareDiff) + d2 := New(16, 128) + + length := 10000 + for i := 0; i < length; i++ { + id := fmt.Sprint(i) + head := "a" + uuid.NewString() + d1.Set(Element{ + Id: id, + Head: head, + }) + } + for i := 0; i < length; i++ { + id := fmt.Sprint(i) + head := "b" + uuid.NewString() + d2.Set(Element{ + Id: id, + Head: head, + }) + } + + newIds, changedIds, theirChangedIds, removedIds, err := d1.CompareDiff(ctx, d2) + require.NoError(t, err) + assert.Len(t, newIds, 0) + assert.Len(t, changedIds, 0) + assert.Len(t, theirChangedIds, length) + assert.Len(t, removedIds, 0) + }) t.Run("empty", func(t *testing.T) { d1 := New(16, 16) d2 := New(16, 16) diff --git a/app/ldiff/diffcontainer.go b/app/ldiff/diffcontainer.go new file mode 100644 index 00000000..7b434f11 --- /dev/null +++ b/app/ldiff/diffcontainer.go @@ -0,0 +1,65 @@ +package ldiff + +import ( + "context" + "encoding/hex" + + "github.com/zeebo/blake3" +) + +type RemoteTypeChecker interface { + DiffTypeCheck(ctx context.Context, diffContainer DiffContainer) (needsSync bool, diff Diff, err error) +} + +type DiffContainer interface { + DiffTypeCheck(ctx context.Context, typeChecker RemoteTypeChecker) (needsSync bool, diff Diff, err error) + OldDiff() Diff + NewDiff() Diff + Set(elements ...Element) + RemoveId(id string) error +} + +type diffContainer struct { + newDiff Diff + oldDiff Diff +} + +func (d *diffContainer) NewDiff() Diff { + return d.newDiff +} + +func (d *diffContainer) OldDiff() Diff { + return d.oldDiff +} + +func (d *diffContainer) Set(elements ...Element) { + hasher := hashersPool.Get().(*blake3.Hasher) + defer hashersPool.Put(hasher) + for _, el := range elements { + hasher.Reset() + hasher.WriteString(el.Head) + stringHash := hex.EncodeToString(hasher.Sum(nil)) + d.newDiff.Set(Element{ + Id: el.Id, + Head: stringHash, + }) + } + d.oldDiff.Set(elements...) +} + +func (d *diffContainer) RemoveId(id string) error { + _ = d.newDiff.RemoveId(id) + _ = d.oldDiff.RemoveId(id) + return nil +} + +func (d *diffContainer) DiffTypeCheck(ctx context.Context, typeChecker RemoteTypeChecker) (needsSync bool, diff Diff, err error) { + return typeChecker.DiffTypeCheck(ctx, d) +} + +func NewDiffContainer(new, old Diff) DiffContainer { + return &diffContainer{ + newDiff: new, + oldDiff: old, + } +} diff --git a/app/ldiff/mock_ldiff/mock_ldiff.go b/app/ldiff/mock_ldiff/mock_ldiff.go index a5d2c786..4e828725 100644 --- a/app/ldiff/mock_ldiff/mock_ldiff.go +++ b/app/ldiff/mock_ldiff/mock_ldiff.go @@ -1,11 +1,10 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/anyproto/any-sync/app/ldiff (interfaces: Diff,Remote) +// Source: github.com/anyproto/any-sync/app/ldiff (interfaces: Diff,Remote,DiffContainer) // // Generated by this command: // -// mockgen -destination mock_ldiff/mock_ldiff.go github.com/anyproto/any-sync/app/ldiff Diff,Remote +// mockgen -destination mock_ldiff/mock_ldiff.go github.com/anyproto/any-sync/app/ldiff Diff,Remote,DiffContainer // - // Package mock_ldiff is a generated GoMock package. package mock_ldiff @@ -14,6 +13,7 @@ import ( reflect "reflect" ldiff "github.com/anyproto/any-sync/app/ldiff" + spacesyncproto "github.com/anyproto/any-sync/commonspace/spacesyncproto" gomock "go.uber.org/mock/gomock" ) @@ -57,6 +57,20 @@ func (mr *MockDiffMockRecorder) Diff(arg0, arg1 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Diff", reflect.TypeOf((*MockDiff)(nil).Diff), arg0, arg1) } +// DiffType mocks base method. +func (m *MockDiff) DiffType() spacesyncproto.DiffType { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DiffType") + ret0, _ := ret[0].(spacesyncproto.DiffType) + return ret0 +} + +// DiffType indicates an expected call of DiffType. +func (mr *MockDiffMockRecorder) DiffType() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiffType", reflect.TypeOf((*MockDiff)(nil).DiffType)) +} + // Element mocks base method. func (m *MockDiff) Element(arg0 string) (ldiff.Element, error) { m.ctrl.T.Helper() @@ -210,3 +224,100 @@ func (mr *MockRemoteMockRecorder) Ranges(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ranges", reflect.TypeOf((*MockRemote)(nil).Ranges), arg0, arg1, arg2) } + +// MockDiffContainer is a mock of DiffContainer interface. +type MockDiffContainer struct { + ctrl *gomock.Controller + recorder *MockDiffContainerMockRecorder +} + +// MockDiffContainerMockRecorder is the mock recorder for MockDiffContainer. +type MockDiffContainerMockRecorder struct { + mock *MockDiffContainer +} + +// NewMockDiffContainer creates a new mock instance. +func NewMockDiffContainer(ctrl *gomock.Controller) *MockDiffContainer { + mock := &MockDiffContainer{ctrl: ctrl} + mock.recorder = &MockDiffContainerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDiffContainer) EXPECT() *MockDiffContainerMockRecorder { + return m.recorder +} + +// DiffTypeCheck mocks base method. +func (m *MockDiffContainer) DiffTypeCheck(arg0 context.Context, arg1 ldiff.RemoteTypeChecker) (bool, ldiff.Diff, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DiffTypeCheck", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(ldiff.Diff) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// DiffTypeCheck indicates an expected call of DiffTypeCheck. +func (mr *MockDiffContainerMockRecorder) DiffTypeCheck(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiffTypeCheck", reflect.TypeOf((*MockDiffContainer)(nil).DiffTypeCheck), arg0, arg1) +} + +// NewDiff mocks base method. +func (m *MockDiffContainer) NewDiff() ldiff.Diff { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewDiff") + ret0, _ := ret[0].(ldiff.Diff) + return ret0 +} + +// NewDiff indicates an expected call of NewDiff. +func (mr *MockDiffContainerMockRecorder) NewDiff() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewDiff", reflect.TypeOf((*MockDiffContainer)(nil).NewDiff)) +} + +// OldDiff mocks base method. +func (m *MockDiffContainer) OldDiff() ldiff.Diff { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OldDiff") + ret0, _ := ret[0].(ldiff.Diff) + return ret0 +} + +// OldDiff indicates an expected call of OldDiff. +func (mr *MockDiffContainerMockRecorder) OldDiff() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OldDiff", reflect.TypeOf((*MockDiffContainer)(nil).OldDiff)) +} + +// RemoveId mocks base method. +func (m *MockDiffContainer) RemoveId(arg0 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveId", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveId indicates an expected call of RemoveId. +func (mr *MockDiffContainerMockRecorder) RemoveId(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveId", reflect.TypeOf((*MockDiffContainer)(nil).RemoveId), arg0) +} + +// Set mocks base method. +func (m *MockDiffContainer) Set(arg0 ...ldiff.Element) { + m.ctrl.T.Helper() + varargs := []any{} + for _, a := range arg0 { + varargs = append(varargs, a) + } + m.ctrl.Call(m, "Set", varargs...) +} + +// Set indicates an expected call of Set. +func (mr *MockDiffContainerMockRecorder) Set(arg0 ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockDiffContainer)(nil).Set), arg0...) +} diff --git a/app/olddiff/diff.go b/app/olddiff/diff.go new file mode 100644 index 00000000..11f1bb22 --- /dev/null +++ b/app/olddiff/diff.go @@ -0,0 +1,322 @@ +package olddiff + +import ( + "bytes" + "context" + "encoding/hex" + "errors" + "math" + "sync" + + "github.com/cespare/xxhash" + "github.com/huandu/skiplist" + "github.com/zeebo/blake3" + + "github.com/anyproto/any-sync/app/ldiff" + "github.com/anyproto/any-sync/commonspace/spacesyncproto" +) + +// New creates precalculated Diff container +// +// divideFactor - means how many hashes you want to ask for once +// +// it must be 2 or greater +// normal value usually between 4 and 64 +// +// compareThreshold - means the maximum count of elements remote diff will send directly +// +// if elements under range will be more - remote diff will send only hash +// it must be 1 or greater +// normal value between 8 and 64 +// +// Less threshold and divideFactor - less traffic but more requests +func New(divideFactor, compareThreshold int) ldiff.Diff { + return newDiff(divideFactor, compareThreshold) +} + +func newDiff(divideFactor, compareThreshold int) ldiff.Diff { + if divideFactor < 2 { + divideFactor = 2 + } + if compareThreshold < 1 { + compareThreshold = 1 + } + d := &diff{ + divideFactor: divideFactor, + compareThreshold: compareThreshold, + } + d.sl = skiplist.New(d) + d.ranges = newHashRanges(divideFactor, compareThreshold, d.sl) + d.ranges.dirty[d.ranges.topRange] = struct{}{} + d.ranges.recalculateHashes() + return d +} + +var hashersPool = &sync.Pool{ + New: func() any { + return blake3.New() + }, +} + +var ErrElementNotFound = errors.New("ldiff: element not found") + +type element struct { + ldiff.Element + hash uint64 +} + +// Diff contains elements and can compare it with Remote diff +type diff struct { + sl *skiplist.SkipList + divideFactor int + compareThreshold int + ranges *hashRanges + mu sync.RWMutex +} + +// Compare implements skiplist interface +func (d *diff) Compare(lhs, rhs interface{}) int { + lhe := lhs.(*element) + rhe := rhs.(*element) + if lhe.Id == rhe.Id { + return 0 + } + if lhe.hash > rhe.hash { + return 1 + } else if lhe.hash < rhe.hash { + return -1 + } + if lhe.Id > rhe.Id { + return 1 + } else { + return -1 + } +} + +// CalcScore implements skiplist interface +func (d *diff) CalcScore(key interface{}) float64 { + return 0 +} + +// Set adds or update element in container +func (d *diff) Set(elements ...ldiff.Element) { + d.mu.Lock() + defer d.mu.Unlock() + for _, e := range elements { + hash := xxhash.Sum64([]byte(e.Id)) + el := &element{Element: e, hash: hash} + d.sl.Remove(el) + d.sl.Set(el, nil) + d.ranges.addElement(hash) + } + d.ranges.recalculateHashes() +} + +func (d *diff) Ids() (ids []string) { + d.mu.RLock() + defer d.mu.RUnlock() + + ids = make([]string, 0, d.sl.Len()) + + cur := d.sl.Front() + for cur != nil { + el := cur.Key().(*element).Element + ids = append(ids, el.Id) + cur = cur.Next() + } + return +} + +func (d *diff) Len() int { + d.mu.RLock() + defer d.mu.RUnlock() + return d.sl.Len() +} + +func (d *diff) DiffType() spacesyncproto.DiffType { + return spacesyncproto.DiffType_V1 +} + +func (d *diff) Elements() (elements []ldiff.Element) { + d.mu.RLock() + defer d.mu.RUnlock() + + elements = make([]ldiff.Element, 0, d.sl.Len()) + + cur := d.sl.Front() + for cur != nil { + el := cur.Key().(*element).Element + elements = append(elements, el) + cur = cur.Next() + } + return +} + +func (d *diff) Element(id string) (ldiff.Element, error) { + d.mu.RLock() + defer d.mu.RUnlock() + el := d.sl.Get(&element{Element: ldiff.Element{Id: id}, hash: xxhash.Sum64([]byte(id))}) + if el == nil { + return ldiff.Element{}, ErrElementNotFound + } + if e, ok := el.Key().(*element); ok { + return e.Element, nil + } + return ldiff.Element{}, ErrElementNotFound +} + +func (d *diff) Hash() string { + d.mu.RLock() + defer d.mu.RUnlock() + return hex.EncodeToString(d.ranges.hash()) +} + +// RemoveId removes element by id +func (d *diff) RemoveId(id string) error { + d.mu.Lock() + defer d.mu.Unlock() + hash := xxhash.Sum64([]byte(id)) + el := &element{Element: ldiff.Element{ + Id: id, + }, hash: hash} + if d.sl.Remove(el) == nil { + return ErrElementNotFound + } + d.ranges.removeElement(hash) + d.ranges.recalculateHashes() + return nil +} + +func (d *diff) getRange(r ldiff.Range) (rr ldiff.RangeResult) { + rng := d.ranges.getRange(r.From, r.To) + // if we have the division for this range + if rng != nil { + rr.Hash = rng.hash + rr.Count = rng.elements + if !r.Elements && rng.isDivided { + return + } + } + + el := d.sl.Find(&element{hash: r.From}) + rr.Elements = make([]ldiff.Element, 0, d.divideFactor) + for el != nil && el.Key().(*element).hash <= r.To { + elem := el.Key().(*element).Element + el = el.Next() + rr.Elements = append(rr.Elements, elem) + } + rr.Count = len(rr.Elements) + return +} + +// Ranges calculates given ranges and return results +func (d *diff) Ranges(ctx context.Context, ranges []ldiff.Range, resBuf []ldiff.RangeResult) (results []ldiff.RangeResult, err error) { + d.mu.RLock() + defer d.mu.RUnlock() + + results = resBuf[:0] + for _, r := range ranges { + results = append(results, d.getRange(r)) + } + return +} + +type diffCtx struct { + newIds, changedIds, removedIds []string + + toSend, prepare []ldiff.Range + myRes, otherRes []ldiff.RangeResult +} + +var errMismatched = errors.New("query and results mismatched") + +// Diff makes diff with remote container +func (d *diff) Diff(ctx context.Context, dl ldiff.Remote) (newIds, changedIds, removedIds []string, err error) { + dctx := &diffCtx{} + dctx.toSend = append(dctx.toSend, ldiff.Range{ + From: 0, + To: math.MaxUint64, + }) + for len(dctx.toSend) > 0 { + select { + case <-ctx.Done(): + err = ctx.Err() + return + default: + } + if dctx.otherRes, err = dl.Ranges(ctx, dctx.toSend, dctx.otherRes); err != nil { + return + } + if dctx.myRes, err = d.Ranges(ctx, dctx.toSend, dctx.myRes); err != nil { + return + } + if len(dctx.otherRes) != len(dctx.toSend) || len(dctx.myRes) != len(dctx.toSend) { + err = errMismatched + return + } + for i, r := range dctx.toSend { + d.compareResults(dctx, r, dctx.myRes[i], dctx.otherRes[i]) + } + dctx.toSend, dctx.prepare = dctx.prepare, dctx.toSend + dctx.prepare = dctx.prepare[:0] + } + return dctx.newIds, dctx.changedIds, dctx.removedIds, nil +} + +func (d *diff) compareResults(dctx *diffCtx, r ldiff.Range, myRes, otherRes ldiff.RangeResult) { + // both hash equals - do nothing + if bytes.Equal(myRes.Hash, otherRes.Hash) { + return + } + + // other has elements + if len(otherRes.Elements) == otherRes.Count { + if len(myRes.Elements) == myRes.Count { + d.compareElements(dctx, myRes.Elements, otherRes.Elements) + } else { + r.Elements = true + d.compareElements(dctx, d.getRange(r).Elements, otherRes.Elements) + } + return + } + // request all elements from other, because we don't have enough + if len(myRes.Elements) == myRes.Count { + r.Elements = true + dctx.prepare = append(dctx.prepare, r) + return + } + rangeTuples := genTupleRanges(r.From, r.To, d.divideFactor) + for _, tuple := range rangeTuples { + dctx.prepare = append(dctx.prepare, ldiff.Range{From: tuple.from, To: tuple.to}) + } + return +} + +func (d *diff) compareElements(dctx *diffCtx, my, other []ldiff.Element) { + find := func(list []ldiff.Element, targetEl ldiff.Element) (has, eq bool) { + for _, el := range list { + if el.Id == targetEl.Id { + return true, el.Head == targetEl.Head + } + } + return false, false + } + + for _, el := range my { + has, eq := find(other, el) + if !has { + dctx.removedIds = append(dctx.removedIds, el.Id) + continue + } else { + if !eq { + dctx.changedIds = append(dctx.changedIds, el.Id) + } + } + } + + for _, el := range other { + if has, _ := find(my, el); !has { + dctx.newIds = append(dctx.newIds, el.Id) + } + } +} diff --git a/app/olddiff/diff_test.go b/app/olddiff/diff_test.go new file mode 100644 index 00000000..bdc5c367 --- /dev/null +++ b/app/olddiff/diff_test.go @@ -0,0 +1,428 @@ +package olddiff + +import ( + "context" + "fmt" + "math" + "sort" + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" + + "github.com/anyproto/any-sync/app/ldiff" +) + +func TestDiff_fillRange(t *testing.T) { + d := New(4, 4).(*diff) + for i := 0; i < 10; i++ { + el := ldiff.Element{ + Id: fmt.Sprint(i), + Head: fmt.Sprint("h", i), + } + d.Set(el) + } + t.Log(d.sl.Len()) + + t.Run("elements", func(t *testing.T) { + r := ldiff.Range{From: 0, To: math.MaxUint64} + res := d.getRange(r) + assert.NotNil(t, res.Hash) + assert.Equal(t, res.Count, 10) + }) +} + +func TestDiff_Diff(t *testing.T) { + ctx := context.Background() + t.Run("basic", func(t *testing.T) { + d1 := New(16, 16) + d2 := New(16, 16) + for i := 0; i < 1000; i++ { + id := fmt.Sprint(i) + head := uuid.NewString() + d1.Set(ldiff.Element{ + Id: id, + Head: head, + }) + d2.Set(ldiff.Element{ + Id: id, + Head: head, + }) + } + + newIds, changedIds, removedIds, err := d1.Diff(ctx, d2) + require.NoError(t, err) + assert.Len(t, newIds, 0) + assert.Len(t, changedIds, 0) + assert.Len(t, removedIds, 0) + + d2.Set(ldiff.Element{ + Id: "newD1", + Head: "newD1", + }) + d2.Set(ldiff.Element{ + Id: "1", + Head: "changed", + }) + require.NoError(t, d2.RemoveId("0")) + + newIds, changedIds, removedIds, err = d1.Diff(ctx, d2) + require.NoError(t, err) + assert.Len(t, newIds, 1) + assert.Len(t, changedIds, 1) + assert.Len(t, removedIds, 1) + }) + t.Run("complex", func(t *testing.T) { + d1 := New(16, 128) + d2 := New(16, 128) + length := 10000 + for i := 0; i < length; i++ { + id := fmt.Sprint(i) + head := uuid.NewString() + d1.Set(ldiff.Element{ + Id: id, + Head: head, + }) + } + + newIds, changedIds, removedIds, err := d1.Diff(ctx, d2) + require.NoError(t, err) + assert.Len(t, newIds, 0) + assert.Len(t, changedIds, 0) + assert.Len(t, removedIds, length) + + for i := 0; i < length; i++ { + id := fmt.Sprint(i) + head := uuid.NewString() + d2.Set(ldiff.Element{ + Id: id, + Head: head, + }) + } + + newIds, changedIds, removedIds, err = d1.Diff(ctx, d2) + require.NoError(t, err) + assert.Len(t, newIds, 0) + assert.Len(t, changedIds, length) + assert.Len(t, removedIds, 0) + + for i := 0; i < length; i++ { + id := fmt.Sprint(i) + head := uuid.NewString() + d2.Set(ldiff.Element{ + Id: id, + Head: head, + }) + } + + res, err := d1.Ranges( + context.Background(), + []ldiff.Range{{From: 0, To: math.MaxUint64, Elements: true}}, + nil) + require.NoError(t, err) + require.Len(t, res, 1) + for i, el := range res[0].Elements { + if i < length/2 { + continue + } + id := el.Id + head := el.Head + d2.Set(ldiff.Element{ + Id: id, + Head: head, + }) + } + + newIds, changedIds, removedIds, err = d1.Diff(ctx, d2) + require.NoError(t, err) + assert.Len(t, newIds, 0) + assert.Len(t, changedIds, length/2) + assert.Len(t, removedIds, 0) + }) + t.Run("empty", func(t *testing.T) { + d1 := New(16, 16) + d2 := New(16, 16) + newIds, changedIds, removedIds, err := d1.Diff(ctx, d2) + require.NoError(t, err) + assert.Len(t, newIds, 0) + assert.Len(t, changedIds, 0) + assert.Len(t, removedIds, 0) + }) + t.Run("one empty", func(t *testing.T) { + d1 := New(4, 4) + d2 := New(4, 4) + length := 10000 + for i := 0; i < length; i++ { + d2.Set(ldiff.Element{ + Id: fmt.Sprint(i), + Head: uuid.NewString(), + }) + } + newIds, changedIds, removedIds, err := d1.Diff(ctx, d2) + require.NoError(t, err) + assert.Len(t, newIds, length) + assert.Len(t, changedIds, 0) + assert.Len(t, removedIds, 0) + }) + t.Run("not intersecting", func(t *testing.T) { + d1 := New(16, 16) + d2 := New(16, 16) + length := 10000 + for i := 0; i < length; i++ { + d1.Set(ldiff.Element{ + Id: fmt.Sprint(i), + Head: uuid.NewString(), + }) + } + for i := length; i < length*2; i++ { + d2.Set(ldiff.Element{ + Id: fmt.Sprint(i), + Head: uuid.NewString(), + }) + } + newIds, changedIds, removedIds, err := d1.Diff(ctx, d2) + require.NoError(t, err) + assert.Len(t, newIds, length) + assert.Len(t, changedIds, 0) + assert.Len(t, removedIds, length) + }) + t.Run("context cancel", func(t *testing.T) { + d1 := New(4, 4) + d2 := New(4, 4) + for i := 0; i < 10; i++ { + d2.Set(ldiff.Element{ + Id: fmt.Sprint(i), + Head: uuid.NewString(), + }) + } + var cancel func() + ctx, cancel = context.WithCancel(ctx) + cancel() + _, _, _, err := d1.Diff(ctx, d2) + assert.ErrorIs(t, err, context.Canceled) + }) +} + +func BenchmarkDiff_Ranges(b *testing.B) { + d := New(16, 16) + for i := 0; i < 10000; i++ { + id := fmt.Sprint(i) + head := uuid.NewString() + d.Set(ldiff.Element{ + Id: id, + Head: head, + }) + } + ctx := context.Background() + b.ResetTimer() + b.ReportAllocs() + var resBuf []ldiff.RangeResult + var ranges = []ldiff.Range{{From: 0, To: math.MaxUint64}} + for i := 0; i < b.N; i++ { + d.Ranges(ctx, ranges, resBuf) + resBuf = resBuf[:0] + } +} + +func TestDiff_Hash(t *testing.T) { + d := New(16, 16) + h1 := d.Hash() + assert.NotEmpty(t, h1) + d.Set(ldiff.Element{Id: "1"}) + h2 := d.Hash() + assert.NotEmpty(t, h2) + assert.NotEqual(t, h1, h2) +} + +func TestDiff_Element(t *testing.T) { + d := New(16, 16) + for i := 0; i < 10; i++ { + d.Set(ldiff.Element{Id: fmt.Sprint("id", i), Head: fmt.Sprint("head", i)}) + } + _, err := d.Element("not found") + assert.Equal(t, ErrElementNotFound, err) + + el, err := d.Element("id5") + require.NoError(t, err) + assert.Equal(t, "head5", el.Head) + + d.Set(ldiff.Element{"id5", "otherHead"}) + el, err = d.Element("id5") + require.NoError(t, err) + assert.Equal(t, "otherHead", el.Head) +} + +func TestDiff_Ids(t *testing.T) { + d := New(16, 16) + var ids []string + for i := 0; i < 10; i++ { + id := fmt.Sprint("id", i) + d.Set(ldiff.Element{Id: id, Head: fmt.Sprint("head", i)}) + ids = append(ids, id) + } + gotIds := d.Ids() + sort.Strings(gotIds) + assert.Equal(t, ids, gotIds) + assert.Equal(t, len(ids), d.Len()) +} + +func TestDiff_Elements(t *testing.T) { + d := New(16, 16) + var els []ldiff.Element + for i := 0; i < 10; i++ { + id := fmt.Sprint("id", i) + el := ldiff.Element{Id: id, Head: fmt.Sprint("head", i)} + d.Set(el) + els = append(els, el) + } + gotEls := d.Elements() + sort.Slice(gotEls, func(i, j int) bool { + return gotEls[i].Id < gotEls[j].Id + }) + assert.Equal(t, els, gotEls) +} + +func TestRangesAddRemove(t *testing.T) { + length := 10000 + divideFactor := 4 + compareThreshold := 4 + addTwice := func() string { + d := New(divideFactor, compareThreshold) + var els []ldiff.Element + for i := 0; i < length; i++ { + if i < length/20 { + continue + } + els = append(els, ldiff.Element{ + Id: fmt.Sprint(i), + Head: fmt.Sprint("h", i), + }) + } + d.Set(els...) + els = els[:0] + for i := 0; i < length/20; i++ { + els = append(els, ldiff.Element{ + Id: fmt.Sprint(i), + Head: fmt.Sprint("h", i), + }) + } + d.Set(els...) + return d.Hash() + } + addOnce := func() string { + d := New(divideFactor, compareThreshold) + var els []ldiff.Element + for i := 0; i < length; i++ { + els = append(els, ldiff.Element{ + Id: fmt.Sprint(i), + Head: fmt.Sprint("h", i), + }) + } + d.Set(els...) + return d.Hash() + } + addRemove := func() string { + d := New(divideFactor, compareThreshold) + var els []ldiff.Element + for i := 0; i < length; i++ { + els = append(els, ldiff.Element{ + Id: fmt.Sprint(i), + Head: fmt.Sprint("h", i), + }) + } + d.Set(els...) + for i := 0; i < length/20; i++ { + err := d.RemoveId(fmt.Sprint(i)) + require.NoError(t, err) + } + els = els[:0] + for i := 0; i < length/20; i++ { + els = append(els, ldiff.Element{ + Id: fmt.Sprint(i), + Head: fmt.Sprint("h", i), + }) + } + d.Set(els...) + return d.Hash() + } + require.Equal(t, addTwice(), addOnce(), addRemove()) +} + +func printBestParams() { + numTests := 10 + length := 100000 + calcParams := func(divideFactor, compareThreshold, length int) (total, maxLevel, avgLevel, zeroEls int) { + d := New(divideFactor, compareThreshold).(*diff) + var els []ldiff.Element + for i := 0; i < length; i++ { + els = append(els, ldiff.Element{ + Id: uuid.NewString(), + Head: uuid.NewString(), + }) + } + d.Set(els...) + for _, rng := range d.ranges.ranges { + if rng.elements == 0 { + zeroEls++ + } + if rng.level > maxLevel { + maxLevel = rng.level + } + avgLevel += rng.level + } + total = len(d.ranges.ranges) + avgLevel = avgLevel / total + return + } + type result struct { + divFactor, compThreshold, numRanges, maxLevel, avgLevel, zeroEls int + } + sf := func(i, j result) int { + if i.numRanges < j.numRanges { + return -1 + } else if i.numRanges == j.numRanges { + return 0 + } else { + return 1 + } + } + var results []result + for divFactor := 0; divFactor < 6; divFactor++ { + df := 1 << divFactor + for compThreshold := 0; compThreshold < 10; compThreshold++ { + ct := 1 << compThreshold + fmt.Println("starting, df:", df, "ct:", ct) + var rngs []result + for i := 0; i < numTests; i++ { + total, maxLevel, avgLevel, zeroEls := calcParams(df, ct, length) + rngs = append(rngs, result{ + divFactor: df, + compThreshold: ct, + numRanges: total, + maxLevel: maxLevel, + avgLevel: avgLevel, + zeroEls: zeroEls, + }) + } + slices.SortFunc(rngs, sf) + ranges := rngs[len(rngs)/2] + results = append(results, ranges) + } + } + slices.SortFunc(results, sf) + fmt.Println(results) + // 100000 - [{16 512 273 2 1 0} {4 512 341 4 3 0} {2 512 511 8 7 0} {1 512 511 8 7 0} + // {8 256 585 3 2 0} {8 512 585 3 2 0} {1 256 1023 9 8 0} {2 256 1023 9 8 0} + // {32 256 1057 2 1 0} {32 512 1057 2 1 0} {32 128 1089 3 1 0} {4 256 1365 5 4 0} + // {4 128 1369 6 4 0} {2 128 2049 11 9 0} {1 128 2049 11 9 0} {1 64 4157 12 10 0} + // {2 64 4159 12 10 0} {16 128 4369 3 2 0} {16 64 4369 3 2 0} {16 256 4369 3 2 0} + // {8 64 4681 4 3 0} {8 128 4681 4 3 0} {4 64 5461 6 5 0} {4 32 6389 7 5 0} + // {8 32 6505 5 4 17} {16 32 8033 4 3 374} {2 32 8619 13 11 0} {1 32 8621 13 11 0} + // {2 16 17837 15 12 0} {1 16 17847 15 12 0} {4 16 21081 8 6 22} {32 64 33825 3 2 1578} + // {32 32 33825 3 2 1559} {32 16 33825 3 2 1518} {8 16 35881 5 4 1313} {16 16 66737 4 3 13022}] + // 1000000 - [{8 256 11753 5 4 0}] + // 1000000 - [{16 128 69905 4 3 0}] + // 1000000 - [{32 256 33825 3 2 0}] +} diff --git a/app/olddiff/hashrange.go b/app/olddiff/hashrange.go new file mode 100644 index 00000000..cfc58976 --- /dev/null +++ b/app/olddiff/hashrange.go @@ -0,0 +1,223 @@ +package olddiff + +import ( + "math" + + "github.com/huandu/skiplist" + "github.com/zeebo/blake3" + "golang.org/x/exp/slices" +) + +type hashRange struct { + from, to uint64 + parent *hashRange + isDivided bool + elements int + level int + hash []byte +} + +type rangeTuple struct { + from, to uint64 +} + +type hashRanges struct { + ranges map[rangeTuple]*hashRange + topRange *hashRange + sl *skiplist.SkipList + dirty map[*hashRange]struct{} + divideFactor int + compareThreshold int +} + +func newHashRanges(divideFactor, compareThreshold int, sl *skiplist.SkipList) *hashRanges { + h := &hashRanges{ + ranges: make(map[rangeTuple]*hashRange), + dirty: make(map[*hashRange]struct{}), + divideFactor: divideFactor, + compareThreshold: compareThreshold, + sl: sl, + } + h.topRange = &hashRange{ + from: 0, + to: math.MaxUint64, + isDivided: true, + level: 0, + } + h.ranges[rangeTuple{from: 0, to: math.MaxUint64}] = h.topRange + h.makeBottomRanges(h.topRange) + return h +} + +func (h *hashRanges) hash() []byte { + return h.topRange.hash +} + +func (h *hashRanges) addElement(elHash uint64) { + rng := h.topRange + rng.elements++ + for rng.isDivided { + rng = h.getBottomRange(rng, elHash) + rng.elements++ + } + h.dirty[rng] = struct{}{} + if rng.elements > h.compareThreshold { + rng.isDivided = true + h.makeBottomRanges(rng) + } + if rng.parent != nil { + if _, ok := h.dirty[rng.parent]; ok { + delete(h.dirty, rng.parent) + } + } +} + +func (h *hashRanges) removeElement(elHash uint64) { + rng := h.topRange + rng.elements-- + for rng.isDivided { + rng = h.getBottomRange(rng, elHash) + rng.elements-- + } + parent := rng.parent + if parent.elements <= h.compareThreshold && parent != h.topRange { + ranges := genTupleRanges(parent.from, parent.to, h.divideFactor) + for _, tuple := range ranges { + child := h.ranges[tuple] + delete(h.ranges, tuple) + delete(h.dirty, child) + } + parent.isDivided = false + h.dirty[parent] = struct{}{} + } else { + h.dirty[rng] = struct{}{} + } +} + +func (h *hashRanges) recalculateHashes() { + for len(h.dirty) > 0 { + var slDirty []*hashRange + for rng := range h.dirty { + slDirty = append(slDirty, rng) + } + slices.SortFunc(slDirty, func(a, b *hashRange) int { + if a.level < b.level { + return -1 + } else if a.level > b.level { + return 1 + } else { + return 0 + } + }) + for _, rng := range slDirty { + if rng.isDivided { + rng.hash = h.calcDividedHash(rng) + } else { + rng.hash, rng.elements = h.calcElementsHash(rng.from, rng.to) + } + delete(h.dirty, rng) + if rng.parent != nil { + h.dirty[rng.parent] = struct{}{} + } + } + } +} + +func (h *hashRanges) getRange(from, to uint64) *hashRange { + return h.ranges[rangeTuple{from: from, to: to}] +} + +func (h *hashRanges) getBottomRange(rng *hashRange, elHash uint64) *hashRange { + df := uint64(h.divideFactor) + perRange := (rng.to - rng.from) / df + align := ((rng.to-rng.from)%df + 1) % df + if align == 0 { + perRange++ + } + bucket := (elHash - rng.from) / perRange + tuple := rangeTuple{from: rng.from + bucket*perRange, to: rng.from - 1 + (bucket+1)*perRange} + if bucket == df-1 { + tuple.to += align + } + return h.ranges[tuple] +} + +func (h *hashRanges) makeBottomRanges(rng *hashRange) { + ranges := genTupleRanges(rng.from, rng.to, h.divideFactor) + for _, tuple := range ranges { + newRange := h.makeRange(tuple, rng) + h.ranges[tuple] = newRange + if newRange.elements > h.compareThreshold { + if _, ok := h.dirty[rng]; ok { + delete(h.dirty, rng) + } + h.dirty[newRange] = struct{}{} + newRange.isDivided = true + h.makeBottomRanges(newRange) + } + } +} + +func (h *hashRanges) makeRange(tuple rangeTuple, parent *hashRange) *hashRange { + newRange := &hashRange{ + from: tuple.from, + to: tuple.to, + parent: parent, + } + hash, els := h.calcElementsHash(tuple.from, tuple.to) + newRange.hash = hash + newRange.level = parent.level + 1 + newRange.elements = els + return newRange +} + +func (h *hashRanges) calcDividedHash(rng *hashRange) (hash []byte) { + hasher := hashersPool.Get().(*blake3.Hasher) + defer hashersPool.Put(hasher) + hasher.Reset() + ranges := genTupleRanges(rng.from, rng.to, h.divideFactor) + for _, tuple := range ranges { + child := h.ranges[tuple] + hasher.Write(child.hash) + } + hash = hasher.Sum(nil) + return +} + +func genTupleRanges(from, to uint64, divideFactor int) (prepare []rangeTuple) { + df := uint64(divideFactor) + perRange := (to - from) / df + align := ((to-from)%df + 1) % df + if align == 0 { + perRange++ + } + var j = from + for i := 0; i < divideFactor; i++ { + if i == divideFactor-1 { + perRange += align + } + prepare = append(prepare, rangeTuple{from: j, to: j + perRange - 1}) + j += perRange + } + return +} + +func (h *hashRanges) calcElementsHash(from, to uint64) (hash []byte, els int) { + hasher := hashersPool.Get().(*blake3.Hasher) + defer hashersPool.Put(hasher) + hasher.Reset() + + el := h.sl.Find(&element{hash: from}) + for el != nil && el.Key().(*element).hash <= to { + elem := el.Key().(*element).Element + el = el.Next() + + hasher.WriteString(elem.Id) + hasher.WriteString(elem.Head) + els++ + } + if els != 0 { + hash = hasher.Sum(nil) + } + return +} diff --git a/commonspace/acl/aclclient/mock_aclclient/mock_aclclient.go b/commonspace/acl/aclclient/mock_aclclient/mock_aclclient.go index 3994a66d..6cd1b279 100644 --- a/commonspace/acl/aclclient/mock_aclclient/mock_aclclient.go +++ b/commonspace/acl/aclclient/mock_aclclient/mock_aclclient.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_aclclient/mock_aclclient.go github.com/anyproto/any-sync/commonspace/acl/aclclient AclJoiningClient,AclSpaceClient // - // Package mock_aclclient is a generated GoMock package. package mock_aclclient diff --git a/commonspace/credentialprovider/mock_credentialprovider/mock_credentialprovider.go b/commonspace/credentialprovider/mock_credentialprovider/mock_credentialprovider.go index 18d2417b..00c3f139 100644 --- a/commonspace/credentialprovider/mock_credentialprovider/mock_credentialprovider.go +++ b/commonspace/credentialprovider/mock_credentialprovider/mock_credentialprovider.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_credentialprovider/mock_credentialprovider.go github.com/anyproto/any-sync/commonspace/credentialprovider CredentialProvider // - // Package mock_credentialprovider is a generated GoMock package. package mock_credentialprovider diff --git a/commonspace/deletion_test.go b/commonspace/deletion_test.go index b74d5798..add216a5 100644 --- a/commonspace/deletion_test.go +++ b/commonspace/deletion_test.go @@ -11,6 +11,7 @@ import ( "github.com/anyproto/any-sync/commonspace/object/accountdata" "github.com/anyproto/any-sync/commonspace/object/tree/objecttree" + "github.com/anyproto/any-sync/commonspace/spacepayloads" "github.com/anyproto/any-sync/commonspace/spacestorage" "github.com/anyproto/any-sync/commonspace/syncstatus" "github.com/anyproto/any-sync/util/crypto" @@ -47,7 +48,7 @@ func TestSpaceDeleteIdsMarkDeleted(t *testing.T) { totalObjs := 1000 // creating space - sp, err := fx.spaceService.CreateSpace(ctx, SpaceCreatePayload{ + sp, err := fx.spaceService.CreateSpace(ctx, spacepayloads.SpaceCreatePayload{ SigningKey: acc.SignKey, SpaceType: "type", ReadKey: rk, @@ -140,7 +141,7 @@ func TestSpaceDeleteIds(t *testing.T) { totalObjs := 1000 // creating space - sp, err := fx.spaceService.CreateSpace(ctx, SpaceCreatePayload{ + sp, err := fx.spaceService.CreateSpace(ctx, spacepayloads.SpaceCreatePayload{ SigningKey: acc.SignKey, SpaceType: "type", ReadKey: rk, diff --git a/commonspace/deletionmanager/mock_deletionmanager/mock_deletionmanager.go b/commonspace/deletionmanager/mock_deletionmanager/mock_deletionmanager.go index 5033bfd6..a5a4c57e 100644 --- a/commonspace/deletionmanager/mock_deletionmanager/mock_deletionmanager.go +++ b/commonspace/deletionmanager/mock_deletionmanager/mock_deletionmanager.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_deletionmanager/mock_deletionmanager.go github.com/anyproto/any-sync/commonspace/deletionmanager DeletionManager,Deleter // - // Package mock_deletionmanager is a generated GoMock package. package mock_deletionmanager diff --git a/commonspace/deletionstate/mock_deletionstate/mock_deletionstate.go b/commonspace/deletionstate/mock_deletionstate/mock_deletionstate.go index 38eb81a0..1faf94a7 100644 --- a/commonspace/deletionstate/mock_deletionstate/mock_deletionstate.go +++ b/commonspace/deletionstate/mock_deletionstate/mock_deletionstate.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_deletionstate/mock_deletionstate.go github.com/anyproto/any-sync/commonspace/deletionstate ObjectDeletionState // - // Package mock_deletionstate is a generated GoMock package. package mock_deletionstate diff --git a/commonspace/headsync/diffsyncer.go b/commonspace/headsync/diffsyncer.go index 9838e2c2..845646f4 100644 --- a/commonspace/headsync/diffsyncer.go +++ b/commonspace/headsync/diffsyncer.go @@ -8,6 +8,7 @@ import ( "github.com/quic-go/quic-go" "github.com/anyproto/any-sync/commonspace/headsync/headstorage" + "github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces" "github.com/anyproto/any-sync/commonspace/object/treesyncer" "github.com/anyproto/any-sync/net/rpc/rpcerr" @@ -36,12 +37,13 @@ const logPeriodSecs = 200 func newDiffSyncer(hs *headSync) DiffSyncer { return &diffSyncer{ - diff: hs.diff, + diffContainer: hs.diffContainer, spaceId: hs.spaceId, storage: hs.storage, peerManager: hs.peerManager, clientFactory: spacesyncproto.ClientFactoryFunc(spacesyncproto.NewDRPCSpaceSyncClient), credentialProvider: hs.credentialProvider, + keyValue: hs.keyValue, log: newSyncLogger(hs.log, logPeriodSecs), deletionState: hs.deletionState, syncAcl: hs.syncAcl, @@ -51,7 +53,7 @@ func newDiffSyncer(hs *headSync) DiffSyncer { type diffSyncer struct { spaceId string - diff ldiff.Diff + diffContainer ldiff.DiffContainer peerManager peermanager.PeerManager headUpdater *headUpdater treeManager treemanager.TreeManager @@ -63,6 +65,7 @@ type diffSyncer struct { cancel context.CancelFunc deletionState deletionstate.ObjectDeletionState credentialProvider credentialprovider.CredentialProvider + keyValue kvinterfaces.KeyValueService syncAcl syncacl.SyncAcl } @@ -82,7 +85,7 @@ func (d *diffSyncer) OnUpdate(headsUpdate headstorage.HeadsUpdate) { func (d *diffSyncer) updateHeads(update headstorage.HeadsUpdate) { if update.DeletedStatus != nil { - _ = d.diff.RemoveId(update.Id) + _ = d.diffContainer.RemoveId(update.Id) } else { if d.deletionState.Exists(update.Id) { return @@ -90,13 +93,22 @@ func (d *diffSyncer) updateHeads(update headstorage.HeadsUpdate) { if update.IsDerived != nil && *update.IsDerived && len(update.Heads) == 1 && update.Heads[0] == update.Id { return } - d.diff.Set(ldiff.Element{ - Id: update.Id, - Head: concatStrings(update.Heads), - }) + if update.Id == d.keyValue.DefaultStore().Id() { + d.diffContainer.NewDiff().Set(ldiff.Element{ + Id: update.Id, + Head: concatStrings(update.Heads), + }) + } else { + d.diffContainer.Set(ldiff.Element{ + Id: update.Id, + Head: concatStrings(update.Heads), + }) + } } // probably we should somehow batch the updates - err := d.storage.StateStorage().SetHash(d.ctx, d.diff.Hash()) + oldHash := d.diffContainer.OldDiff().Hash() + newHash := d.diffContainer.NewDiff().Hash() + err := d.storage.StateStorage().SetHash(d.ctx, oldHash, newHash) if err != nil { d.log.Warn("can't write space hash", zap.Error(err)) } @@ -117,13 +129,13 @@ func (d *diffSyncer) Sync(ctx context.Context) error { d.log.DebugCtx(ctx, "start diffsync", zap.Strings("peerIds", peerIds)) for _, p := range peers { if err = d.syncWithPeer(peer.CtxWithPeerAddr(ctx, p.Id()), p); err != nil { - if !errors.Is(err, &quic.IdleTimeoutError{}) && !errors.Is(err, context.DeadlineExceeded) { + var idleTimeoutErr *quic.IdleTimeoutError + if !errors.As(err, &idleTimeoutErr) && !errors.Is(err, context.DeadlineExceeded) { d.log.ErrorCtx(ctx, "can't sync with peer", zap.String("peer", p.Id()), zap.Error(err)) } } } d.log.DebugCtx(ctx, "diff done", zap.String("spaceId", d.spaceId), zap.Duration("dur", time.Since(st))) - d.peerManager.KeepAlive(ctx) return nil } @@ -150,28 +162,49 @@ func (d *diffSyncer) syncWithPeer(ctx context.Context, p peer.Peer) (err error) syncAclId = d.syncAcl.Id() newIds, changedIds, removedIds []string ) - - newIds, changedIds, removedIds, err = d.diff.Diff(ctx, rdiff) + storageId := d.keyValue.DefaultStore().Id() + needsSync, diff, err := d.diffContainer.DiffTypeCheck(ctx, rdiff) err = rpcerr.Unwrap(err) if err != nil { return d.onDiffError(ctx, p, cl, err) } + if needsSync { + newIds, changedIds, removedIds, err = diff.Diff(ctx, rdiff) + err = rpcerr.Unwrap(err) + if err != nil { + return d.onDiffError(ctx, p, cl, err) + } + } totalLen := len(newIds) + len(changedIds) + len(removedIds) // not syncing ids which were removed through settings document missingIds := d.deletionState.Filter(newIds) existingIds := append(d.deletionState.Filter(removedIds), d.deletionState.Filter(changedIds)...) - - prevExistingLen := len(existingIds) + var ( + isStorage = false + isAcl = false + ) existingIds = slice.DiscardFromSlice(existingIds, func(s string) bool { - return s == syncAclId + if s == storageId { + isStorage = true + return true + } + if s == syncAclId { + isAcl = true + return true + } + return false }) - // if we removed acl head from the list - if len(existingIds) < prevExistingLen { + if isAcl { if syncErr := d.syncAcl.SyncWithPeer(ctx, p); syncErr != nil { log.Warn("failed to send acl sync message to peer", zap.String("aclId", syncAclId)) } } + if isStorage { + if err = d.keyValue.SyncWithPeer(p); err != nil { + log.Warn("failed to send storage sync message to peer", zap.String("storageId", storageId)) + } + } // treeSyncer should not get acl id, that's why we filter existing ids before err = d.treeSyncer.SyncAll(ctx, p, existingIds, missingIds) diff --git a/commonspace/headsync/diffsyncer_test.go b/commonspace/headsync/diffsyncer_test.go index fd499a36..e9915670 100644 --- a/commonspace/headsync/diffsyncer_test.go +++ b/commonspace/headsync/diffsyncer_test.go @@ -80,6 +80,7 @@ func TestDiffSyncer(t *testing.T) { fx.peerManagerMock.EXPECT(). GetResponsiblePeers(gomock.Any()). Return([]peer.Peer{mPeer}, nil) + fx.diffContainerMock.EXPECT().DiffTypeCheck(gomock.Any(), gomock.Any()).Return(true, fx.diffMock, nil) fx.diffMock.EXPECT(). Diff(gomock.Any(), gomock.Eq(NewRemoteDiff(fx.spaceState.SpaceId, fx.clientMock))). Return([]string{"new"}, []string{"changed"}, nil, nil) @@ -103,6 +104,7 @@ func TestDiffSyncer(t *testing.T) { fx.peerManagerMock.EXPECT(). GetResponsiblePeers(gomock.Any()). Return([]peer.Peer{mPeer}, nil) + fx.diffContainerMock.EXPECT().DiffTypeCheck(gomock.Any(), gomock.Any()).Return(true, fx.diffMock, nil) fx.diffMock.EXPECT(). Diff(gomock.Any(), gomock.Eq(remDiff)). Return([]string{"new"}, []string{"changed"}, nil, nil) @@ -116,6 +118,31 @@ func TestDiffSyncer(t *testing.T) { require.NoError(t, fx.diffSyncer.Sync(ctx)) }) + t.Run("diff syncer sync, store changed", func(t *testing.T) { + fx := newHeadSyncFixture(t) + fx.initDiffSyncer(t) + defer fx.stop() + mPeer := rpctest.MockPeer{} + remDiff := NewRemoteDiff(fx.spaceState.SpaceId, fx.clientMock) + fx.treeSyncerMock.EXPECT().ShouldSync(gomock.Any()).Return(true) + fx.aclMock.EXPECT().Id().AnyTimes().Return("aclId") + fx.peerManagerMock.EXPECT(). + GetResponsiblePeers(gomock.Any()). + Return([]peer.Peer{mPeer}, nil) + fx.diffContainerMock.EXPECT().DiffTypeCheck(gomock.Any(), gomock.Any()).Return(true, fx.diffMock, nil) + fx.diffMock.EXPECT(). + Diff(gomock.Any(), gomock.Eq(remDiff)). + Return([]string{"new"}, []string{"changed"}, nil, nil) + fx.deletionStateMock.EXPECT().Filter([]string{"new"}).Return([]string{"new"}).Times(1) + fx.deletionStateMock.EXPECT().Filter([]string{"changed"}).Return([]string{"changed", "store"}).Times(1) + fx.deletionStateMock.EXPECT().Filter(nil).Return(nil).Times(1) + fx.treeSyncerMock.EXPECT().SyncAll(gomock.Any(), mPeer, []string{"changed"}, []string{"new"}).Return(nil) + fx.kvMock.EXPECT().SyncWithPeer(mPeer).Return(nil) + fx.peerManagerMock.EXPECT().KeepAlive(gomock.Any()) + + require.NoError(t, fx.diffSyncer.Sync(ctx)) + }) + t.Run("diff syncer sync conf error", func(t *testing.T) { fx := newHeadSyncFixture(t) fx.initDiffSyncer(t) @@ -133,9 +160,12 @@ func TestDiffSyncer(t *testing.T) { fx.initDiffSyncer(t) defer fx.stop() deletedId := "id" - fx.diffMock.EXPECT().RemoveId(deletedId).Return(nil) - fx.diffMock.EXPECT().Hash().Return("hash") - fx.stateStorage.EXPECT().SetHash(gomock.Any(), "hash").Return(nil) + fx.diffContainerMock.EXPECT().RemoveId(deletedId).Return(nil) + fx.diffContainerMock.EXPECT().NewDiff().Return(fx.diffMock) + fx.diffContainerMock.EXPECT().OldDiff().Return(fx.diffMock) + fx.diffMock.EXPECT().Hash().AnyTimes().Return("hash") + fx.stateStorage.EXPECT().SetHash(gomock.Any(), "hash", "hash").Return(nil) + upd := headstorage.DeletedStatusDeleted fx.diffSyncer.updateHeads(headstorage.HeadsUpdate{ Id: "id", @@ -150,11 +180,14 @@ func TestDiffSyncer(t *testing.T) { updatedId := "id" fx.diffMock.EXPECT().Hash().Return("hash") fx.deletionStateMock.EXPECT().Exists(updatedId).Return(false) - fx.diffMock.EXPECT().Set(ldiff.Element{ + fx.diffContainerMock.EXPECT().Set(ldiff.Element{ Id: updatedId, Head: "head", }) - fx.stateStorage.EXPECT().SetHash(gomock.Any(), "hash").Return(nil) + fx.diffContainerMock.EXPECT().NewDiff().Return(fx.diffMock) + fx.diffContainerMock.EXPECT().OldDiff().Return(fx.diffMock) + fx.diffMock.EXPECT().Hash().AnyTimes().Return("hash") + fx.stateStorage.EXPECT().SetHash(gomock.Any(), "hash", "hash").Return(nil) fx.diffSyncer.updateHeads(headstorage.HeadsUpdate{ Id: "id", Heads: []string{"head"}, @@ -180,6 +213,7 @@ func TestDiffSyncer(t *testing.T) { fx.peerManagerMock.EXPECT(). GetResponsiblePeers(gomock.Any()). Return([]peer.Peer{rpctest.MockPeer{}}, nil) + fx.diffContainerMock.EXPECT().DiffTypeCheck(gomock.Any(), gomock.Any()).Return(true, fx.diffMock, nil) fx.diffMock.EXPECT(). Diff(gomock.Any(), gomock.Eq(remDiff)). Return(nil, nil, nil, spacesyncproto.ErrSpaceMissing) @@ -219,6 +253,8 @@ func TestDiffSyncer(t *testing.T) { fx.peerManagerMock.EXPECT(). GetResponsiblePeers(gomock.Any()). Return([]peer.Peer{rpctest.MockPeer{}}, nil) + + fx.diffContainerMock.EXPECT().DiffTypeCheck(gomock.Any(), gomock.Any()).Return(true, fx.diffMock, nil) fx.diffMock.EXPECT(). Diff(gomock.Any(), gomock.Eq(remDiff)). Return(nil, nil, nil, spacesyncproto.ErrUnexpected) @@ -232,15 +268,12 @@ func TestDiffSyncer(t *testing.T) { fx.initDiffSyncer(t) defer fx.stop() mPeer := rpctest.MockPeer{} - remDiff := NewRemoteDiff(fx.spaceState.SpaceId, fx.clientMock) fx.treeSyncerMock.EXPECT().ShouldSync(gomock.Any()).Return(true) fx.aclMock.EXPECT().Id().AnyTimes().Return("aclId") fx.peerManagerMock.EXPECT(). GetResponsiblePeers(gomock.Any()). Return([]peer.Peer{mPeer}, nil) - fx.diffMock.EXPECT(). - Diff(gomock.Any(), gomock.Eq(remDiff)). - Return(nil, nil, nil, spacesyncproto.ErrSpaceIsDeleted) + fx.diffContainerMock.EXPECT().DiffTypeCheck(gomock.Any(), gomock.Any()).Return(true, fx.diffMock, spacesyncproto.ErrSpaceIsDeleted) fx.peerManagerMock.EXPECT().KeepAlive(gomock.Any()) require.NoError(t, fx.diffSyncer.Sync(ctx)) diff --git a/commonspace/headsync/headstorage/mock_headstorage/mock_headstorage.go b/commonspace/headsync/headstorage/mock_headstorage/mock_headstorage.go index ab1d1b82..71cdec39 100644 --- a/commonspace/headsync/headstorage/mock_headstorage/mock_headstorage.go +++ b/commonspace/headsync/headstorage/mock_headstorage/mock_headstorage.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_headstorage/mock_headstorage.go github.com/anyproto/any-sync/commonspace/headsync/headstorage HeadStorage // - // Package mock_headstorage is a generated GoMock package. package mock_headstorage diff --git a/commonspace/headsync/headsync.go b/commonspace/headsync/headsync.go index 0030ac92..6c701c6b 100644 --- a/commonspace/headsync/headsync.go +++ b/commonspace/headsync/headsync.go @@ -10,11 +10,13 @@ import ( "github.com/anyproto/any-sync/app" "github.com/anyproto/any-sync/app/ldiff" "github.com/anyproto/any-sync/app/logger" + "github.com/anyproto/any-sync/app/olddiff" "github.com/anyproto/any-sync/commonspace/config" "github.com/anyproto/any-sync/commonspace/credentialprovider" "github.com/anyproto/any-sync/commonspace/deletionstate" "github.com/anyproto/any-sync/commonspace/headsync/headstorage" "github.com/anyproto/any-sync/commonspace/object/acl/syncacl" + "github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces" "github.com/anyproto/any-sync/commonspace/object/treesyncer" "github.com/anyproto/any-sync/commonspace/peermanager" "github.com/anyproto/any-sync/commonspace/spacestate" @@ -37,7 +39,6 @@ type TreeHeads struct { type HeadSync interface { app.ComponentRunnable ExternalIds() []string - DebugAllHeads() (res []TreeHeads) AllIds() []string HandleRangeRequest(ctx context.Context, req *spacesyncproto.HeadSyncRequest) (resp *spacesyncproto.HeadSyncResponse, err error) } @@ -49,7 +50,7 @@ type headSync struct { periodicSync periodicsync.PeriodicSync storage spacestorage.SpaceStorage - diff ldiff.Diff + diffContainer ldiff.DiffContainer log logger.CtxLogger syncer DiffSyncer configuration nodeconf.NodeConf @@ -58,6 +59,7 @@ type headSync struct { credentialProvider credentialprovider.CredentialProvider deletionState deletionstate.ObjectDeletionState syncAcl syncacl.SyncAcl + keyValue kvinterfaces.KeyValueService } func New() HeadSync { @@ -75,11 +77,12 @@ func (h *headSync) Init(a *app.App) (err error) { h.configuration = a.MustComponent(nodeconf.CName).(nodeconf.NodeConf) h.log = log.With(zap.String("spaceId", h.spaceId)) h.storage = a.MustComponent(spacestorage.CName).(spacestorage.SpaceStorage) - h.diff = ldiff.New(32, 256) + h.diffContainer = ldiff.NewDiffContainer(ldiff.New(32, 256), olddiff.New(32, 256)) h.peerManager = a.MustComponent(peermanager.CName).(peermanager.PeerManager) h.credentialProvider = a.MustComponent(credentialprovider.CName).(credentialprovider.CredentialProvider) h.treeSyncer = a.MustComponent(treesyncer.CName).(treesyncer.TreeSyncer) h.deletionState = a.MustComponent(deletionstate.CName).(deletionstate.ObjectDeletionState) + h.keyValue = a.MustComponent(kvinterfaces.CName).(kvinterfaces.KeyValueService) h.syncer = createDiffSyncer(h) sync := func(ctx context.Context) (err error) { return h.syncer.Sync(ctx) @@ -103,39 +106,26 @@ func (h *headSync) Run(ctx context.Context) (err error) { } func (h *headSync) HandleRangeRequest(ctx context.Context, req *spacesyncproto.HeadSyncRequest) (resp *spacesyncproto.HeadSyncResponse, err error) { - resp, err = HandleRangeRequest(ctx, h.diff, req) - if err != nil { - return + if req.DiffType == spacesyncproto.DiffType_V2 { + return HandleRangeRequest(ctx, h.diffContainer.NewDiff(), req) + } else { + return HandleRangeRequest(ctx, h.diffContainer.OldDiff(), req) } - // this is done to fix the problem with compatibility with old clients - resp.DiffType = spacesyncproto.DiffType_Precalculated - return } func (h *headSync) AllIds() []string { - return h.diff.Ids() + return h.diffContainer.NewDiff().Ids() } func (h *headSync) ExternalIds() []string { settingsId := h.storage.StateStorage().SettingsId() aclId := h.syncAcl.Id() + keyValueId := h.keyValue.DefaultStore().Id() return slice.DiscardFromSlice(h.AllIds(), func(id string) bool { - return id == settingsId || id == aclId + return id == settingsId || id == aclId || id == keyValueId }) } -func (h *headSync) DebugAllHeads() (res []TreeHeads) { - els := h.diff.Elements() - for _, el := range els { - idHead := TreeHeads{ - Id: el.Id, - Heads: splitString(el.Head), - } - res = append(res, idHead) - } - return -} - func (h *headSync) Close(ctx context.Context) (err error) { h.syncer.Close() h.periodicSync.Close() @@ -144,14 +134,23 @@ func (h *headSync) Close(ctx context.Context) (err error) { func (h *headSync) fillDiff(ctx context.Context) error { var els = make([]ldiff.Element, 0, 100) + var aclOrStorage []ldiff.Element err := h.storage.HeadStorage().IterateEntries(ctx, headstorage.IterOpts{}, func(entry headstorage.HeadsEntry) (bool, error) { if entry.IsDerived && entry.Heads[0] == entry.Id { return true, nil } - els = append(els, ldiff.Element{ - Id: entry.Id, - Head: concatStrings(entry.Heads), - }) + if entry.CommonSnapshot != "" { + els = append(els, ldiff.Element{ + Id: entry.Id, + Head: concatStrings(entry.Heads), + }) + } else { + // this whole stuff is done to prevent storage hash from being set to old diff + aclOrStorage = append(aclOrStorage, ldiff.Element{ + Id: entry.Id, + Head: concatStrings(entry.Heads), + }) + } return true, nil }) if err != nil { @@ -162,8 +161,12 @@ func (h *headSync) fillDiff(ctx context.Context) error { Head: h.syncAcl.Head().Id, }) log.Debug("setting acl", zap.String("aclId", h.syncAcl.Id()), zap.String("headId", h.syncAcl.Head().Id)) - h.diff.Set(els...) - if err := h.storage.StateStorage().SetHash(ctx, h.diff.Hash()); err != nil { + h.diffContainer.Set(els...) + // acl will be set twice to the diff but it doesn't matter + h.diffContainer.NewDiff().Set(aclOrStorage...) + oldHash := h.diffContainer.OldDiff().Hash() + newHash := h.diffContainer.NewDiff().Hash() + if err := h.storage.StateStorage().SetHash(ctx, oldHash, newHash); err != nil { h.log.Error("can't write space hash", zap.Error(err)) return err } diff --git a/commonspace/headsync/headsync_test.go b/commonspace/headsync/headsync_test.go index 731d3b21..3ec4ee72 100644 --- a/commonspace/headsync/headsync_test.go +++ b/commonspace/headsync/headsync_test.go @@ -22,6 +22,9 @@ import ( "github.com/anyproto/any-sync/commonspace/object/acl/list" "github.com/anyproto/any-sync/commonspace/object/acl/syncacl" "github.com/anyproto/any-sync/commonspace/object/acl/syncacl/mock_syncacl" + "github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage/mock_keyvaluestorage" + "github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces" + "github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces/mock_kvinterfaces" "github.com/anyproto/any-sync/commonspace/object/treemanager" "github.com/anyproto/any-sync/commonspace/object/treemanager/mock_treemanager" "github.com/anyproto/any-sync/commonspace/object/treesyncer" @@ -34,6 +37,7 @@ import ( "github.com/anyproto/any-sync/commonspace/spacesyncproto/mock_spacesyncproto" "github.com/anyproto/any-sync/nodeconf" "github.com/anyproto/any-sync/nodeconf/mock_nodeconf" + "github.com/anyproto/any-sync/testutil/anymock" ) type mockConfig struct { @@ -57,6 +61,8 @@ type headSyncFixture struct { app *app.App configurationMock *mock_nodeconf.MockService + kvMock *mock_kvinterfaces.MockKeyValueService + defStoreMock *mock_keyvaluestorage.MockStorage storageMock *mock_spacestorage.MockSpaceStorage peerManagerMock *mock_peermanager.MockPeerManager credentialProviderMock *mock_credentialprovider.MockCredentialProvider @@ -65,6 +71,7 @@ type headSyncFixture struct { diffSyncerMock *mock_headsync.MockDiffSyncer treeSyncerMock *mock_treesyncer.MockTreeSyncer diffMock *mock_ldiff.MockDiff + diffContainerMock *mock_ldiff.MockDiffContainer clientMock *mock_spacesyncproto.MockDRPCSpaceSyncClient aclMock *mock_syncacl.MockSyncAcl headStorage *mock_headstorage.MockHeadStorage @@ -91,9 +98,15 @@ func newHeadSyncFixture(t *testing.T) *headSyncFixture { deletionStateMock := mock_deletionstate.NewMockObjectDeletionState(ctrl) deletionStateMock.EXPECT().Name().AnyTimes().Return(deletionstate.CName) diffSyncerMock := mock_headsync.NewMockDiffSyncer(ctrl) + diffContainerMock := mock_ldiff.NewMockDiffContainer(ctrl) treeSyncerMock := mock_treesyncer.NewMockTreeSyncer(ctrl) headStorage := mock_headstorage.NewMockHeadStorage(ctrl) stateStorage := mock_statestorage.NewMockStateStorage(ctrl) + kvMock := mock_kvinterfaces.NewMockKeyValueService(ctrl) + anymock.ExpectComp(kvMock.EXPECT(), kvinterfaces.CName) + defStore := mock_keyvaluestorage.NewMockStorage(ctrl) + kvMock.EXPECT().DefaultStore().Return(defStore).AnyTimes() + defStore.EXPECT().Id().Return("store").AnyTimes() storageMock.EXPECT().HeadStorage().AnyTimes().Return(headStorage) storageMock.EXPECT().StateStorage().AnyTimes().Return(stateStorage) treeSyncerMock.EXPECT().Name().AnyTimes().Return(treesyncer.CName) @@ -106,6 +119,7 @@ func newHeadSyncFixture(t *testing.T) *headSyncFixture { a := &app.App{} a.Register(spaceState). Register(aclMock). + Register(kvMock). Register(mockConfig{}). Register(configurationMock). Register(storageMock). @@ -119,8 +133,11 @@ func newHeadSyncFixture(t *testing.T) *headSyncFixture { spaceState: spaceState, ctrl: ctrl, app: a, + kvMock: kvMock, + defStoreMock: defStore, configurationMock: configurationMock, storageMock: storageMock, + diffContainerMock: diffContainerMock, peerManagerMock: peerManagerMock, credentialProviderMock: credentialProviderMock, treeManagerMock: treeManagerMock, @@ -144,7 +161,7 @@ func (fx *headSyncFixture) init(t *testing.T) { fx.headStorage.EXPECT().AddObserver(gomock.Any()) err := fx.headSync.Init(fx.app) require.NoError(t, err) - fx.headSync.diff = fx.diffMock + fx.headSync.diffContainer = fx.diffContainerMock } func (fx *headSyncFixture) stop() { @@ -161,14 +178,16 @@ func TestHeadSync(t *testing.T) { headEntries := []headstorage.HeadsEntry{ { - Id: "id1", - Heads: []string{"h1", "h2"}, - IsDerived: false, + Id: "id1", + Heads: []string{"h1", "h2"}, + CommonSnapshot: "id1", + IsDerived: false, }, { - Id: "id2", - Heads: []string{"h3", "h4"}, - IsDerived: false, + Id: "id2", + Heads: []string{"h3", "h4"}, + CommonSnapshot: "id2", + IsDerived: false, }, } fx.headStorage.EXPECT().IterateEntries(gomock.Any(), gomock.Any(), gomock.Any()). @@ -183,7 +202,7 @@ func TestHeadSync(t *testing.T) { fx.aclMock.EXPECT().Id().AnyTimes().Return("aclId") fx.aclMock.EXPECT().Head().AnyTimes().Return(&list.AclRecord{Id: "headId"}) - fx.diffMock.EXPECT().Set(ldiff.Element{ + fx.diffContainerMock.EXPECT().Set(ldiff.Element{ Id: "id1", Head: "h1h2", }, ldiff.Element{ @@ -193,8 +212,11 @@ func TestHeadSync(t *testing.T) { Id: "aclId", Head: "headId", }) - fx.diffMock.EXPECT().Hash().Return("hash") - fx.stateStorage.EXPECT().SetHash(gomock.Any(), "hash").Return(nil) + fx.diffMock.EXPECT().Set([]ldiff.Element{}) + fx.diffContainerMock.EXPECT().NewDiff().AnyTimes().Return(fx.diffMock) + fx.diffContainerMock.EXPECT().OldDiff().AnyTimes().Return(fx.diffMock) + fx.diffMock.EXPECT().Hash().AnyTimes().Return("hash") + fx.stateStorage.EXPECT().SetHash(gomock.Any(), "hash", "hash").Return(nil) fx.diffSyncerMock.EXPECT().Sync(gomock.Any()).Return(nil) fx.diffSyncerMock.EXPECT().Close() err := fx.headSync.Run(ctx) @@ -210,14 +232,16 @@ func TestHeadSync(t *testing.T) { headEntries := []headstorage.HeadsEntry{ { - Id: "id1", - Heads: []string{"id1"}, - IsDerived: true, + Id: "id1", + Heads: []string{"id1"}, + CommonSnapshot: "id1", + IsDerived: true, }, { - Id: "id2", - Heads: []string{"h3", "h4"}, - IsDerived: false, + Id: "id2", + Heads: []string{"h3", "h4"}, + CommonSnapshot: "id2", + IsDerived: false, }, } fx.headStorage.EXPECT().IterateEntries(gomock.Any(), gomock.Any(), gomock.Any()). @@ -232,15 +256,18 @@ func TestHeadSync(t *testing.T) { fx.aclMock.EXPECT().Id().AnyTimes().Return("aclId") fx.aclMock.EXPECT().Head().AnyTimes().Return(&list.AclRecord{Id: "headId"}) - fx.diffMock.EXPECT().Set(ldiff.Element{ + fx.diffContainerMock.EXPECT().Set(ldiff.Element{ Id: "id2", Head: "h3h4", }, ldiff.Element{ Id: "aclId", Head: "headId", }) - fx.diffMock.EXPECT().Hash().Return("hash") - fx.stateStorage.EXPECT().SetHash(gomock.Any(), "hash").Return(nil) + fx.diffMock.EXPECT().Set([]ldiff.Element{}) + fx.diffContainerMock.EXPECT().NewDiff().AnyTimes().Return(fx.diffMock) + fx.diffContainerMock.EXPECT().OldDiff().AnyTimes().Return(fx.diffMock) + fx.diffMock.EXPECT().Hash().AnyTimes().Return("hash") + fx.stateStorage.EXPECT().SetHash(gomock.Any(), "hash", "hash").Return(nil) fx.diffSyncerMock.EXPECT().Sync(gomock.Any()).Return(nil) fx.diffSyncerMock.EXPECT().Close() err := fx.headSync.Run(ctx) diff --git a/commonspace/headsync/mock_headsync/mock_headsync.go b/commonspace/headsync/mock_headsync/mock_headsync.go index 5b1d8c81..83d5c812 100644 --- a/commonspace/headsync/mock_headsync/mock_headsync.go +++ b/commonspace/headsync/mock_headsync/mock_headsync.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_headsync/mock_headsync.go github.com/anyproto/any-sync/commonspace/headsync DiffSyncer // - // Package mock_headsync is a generated GoMock package. package mock_headsync diff --git a/commonspace/headsync/remotediff.go b/commonspace/headsync/remotediff.go index 8c70006e..f50708bf 100644 --- a/commonspace/headsync/remotediff.go +++ b/commonspace/headsync/remotediff.go @@ -1,7 +1,10 @@ package headsync import ( + "bytes" "context" + "encoding/hex" + "math" "github.com/anyproto/any-sync/app/ldiff" "github.com/anyproto/any-sync/commonspace/spacesyncproto" @@ -12,6 +15,7 @@ type Client interface { } type RemoteDiff interface { + ldiff.RemoteTypeChecker ldiff.Remote } @@ -68,6 +72,39 @@ func (r *remote) Ranges(ctx context.Context, ranges []ldiff.Range, resBuf []ldif return } +func (r *remote) DiffTypeCheck(ctx context.Context, diffContainer ldiff.DiffContainer) (needsSync bool, diff ldiff.Diff, err error) { + req := &spacesyncproto.HeadSyncRequest{ + SpaceId: r.spaceId, + DiffType: spacesyncproto.DiffType_V2, + Ranges: []*spacesyncproto.HeadSyncRange{{From: 0, To: math.MaxUint64}}, + } + resp, err := r.client.HeadSync(ctx, req) + if err != nil { + return + } + needsSync = true + checkHash := func(diff ldiff.Diff) (bool, error) { + hashB, err := hex.DecodeString(diff.Hash()) + if err != nil { + return false, err + } + if len(resp.Results) != 0 && bytes.Equal(hashB, resp.Results[0].Hash) { + return false, nil + } + return true, nil + } + r.diffType = resp.DiffType + switch resp.DiffType { + case spacesyncproto.DiffType_V2: + diff = diffContainer.NewDiff() + needsSync, err = checkHash(diff) + default: + diff = diffContainer.OldDiff() + needsSync, err = checkHash(diff) + } + return +} + func HandleRangeRequest(ctx context.Context, d ldiff.Diff, req *spacesyncproto.HeadSyncRequest) (resp *spacesyncproto.HeadSyncResponse, err error) { ranges := make([]ldiff.Range, 0, len(req.Ranges)) // basically we gather data applicable for both diffs @@ -104,5 +141,6 @@ func HandleRangeRequest(ctx context.Context, d ldiff.Diff, req *spacesyncproto.H Count: uint32(rangeRes.Count), }) } + resp.DiffType = d.DiffType() return } diff --git a/commonspace/headsync/remotediff_test.go b/commonspace/headsync/remotediff_test.go index 24ff0bb6..a9a6c65b 100644 --- a/commonspace/headsync/remotediff_test.go +++ b/commonspace/headsync/remotediff_test.go @@ -2,6 +2,7 @@ package headsync import ( "context" + "crypto/rand" "fmt" "testing" @@ -9,46 +10,88 @@ import ( "github.com/stretchr/testify/require" "github.com/anyproto/any-sync/app/ldiff" + "github.com/anyproto/any-sync/app/olddiff" "github.com/anyproto/any-sync/commonspace/spacesyncproto" ) -func TestRemote(t *testing.T) { - contLocal := ldiff.New(32, 256) - contRemote := ldiff.New(32, 256) +func benchmarkDifferentDiffs(t *testing.T, diffFactory func() ldiff.Diff, headLength int) { + moduloValues := []int{1, 10, 100, 1000, 10000, 100000} + totalElements := 100000 - test := func(t *testing.T, ldLocal, ldRemote ldiff.Diff) { - var ( - localEls []ldiff.Element - remoteEls []ldiff.Element - ) + for _, modVal := range moduloValues { + t.Run(fmt.Sprintf("New_%d", totalElements/modVal), func(t *testing.T) { + // Create a new diff instance for each test using the factory + contLocal := diffFactory() + contRemote := diffFactory() + remClient := &mockClient{t: t, l: contRemote} - for i := 0; i < 100000; i++ { - el := ldiff.Element{ - Id: fmt.Sprint(i), - Head: fmt.Sprint(i), + var ( + localEls []ldiff.Element + remoteEls []ldiff.Element + ) + + buf := make([]byte, headLength) + _, _ = rand.Read(buf) + + for i := 0; i < totalElements; i++ { + el := ldiff.Element{ + Id: fmt.Sprint(i), + Head: string(buf), + } + remoteEls = append(remoteEls, el) + if i%modVal != 0 { + localEls = append(localEls, el) + } } - remoteEls = append(remoteEls, el) - if i%100 == 0 { - localEls = append(localEls, el) - } - } - ldLocal.Set(localEls...) - ldRemote.Set(remoteEls...) - rd := NewRemoteDiff("1", &mockClient{l: ldRemote}) - newIds, changedIds, removedIds, err := ldLocal.Diff(context.Background(), rd) - require.NoError(t, err) - assert.Len(t, newIds, 99000) - assert.Len(t, changedIds, 0) - assert.Len(t, removedIds, 0) + contLocal.Set(localEls...) + remClient.l.Set(remoteEls...) + + rd := NewRemoteDiff("1", remClient) + newIds, changedIds, removedIds, err := contLocal.Diff(context.Background(), rd) + require.NoError(t, err) + + expectedNewCount := totalElements / modVal + assert.Len(t, newIds, expectedNewCount) + assert.Len(t, changedIds, 0) + assert.Len(t, removedIds, 0) + + fmt.Printf("New count %d: total bytes sent: %d, %d\n", expectedNewCount, remClient.totalInSent, remClient.totalOutSent) + }) } - test(t, contLocal, contRemote) +} + +func TestBenchRemoteWithDifferentCounts(t *testing.T) { + t.Run("StandardLdiff", func(t *testing.T) { + benchmarkDifferentDiffs(t, func() ldiff.Diff { + return ldiff.New(32, 256) + }, 32) + }) + //old has higher head lengths because of hashes + t.Run("OldLdiff", func(t *testing.T) { + benchmarkDifferentDiffs(t, func() ldiff.Diff { + return olddiff.New(32, 256) + }, 100) + }) } type mockClient struct { - l ldiff.Diff + l ldiff.Diff + totalInSent int + totalOutSent int + t *testing.T } func (m *mockClient) HeadSync(ctx context.Context, in *spacesyncproto.HeadSyncRequest) (*spacesyncproto.HeadSyncResponse, error) { - return HandleRangeRequest(ctx, m.l, in) + res, err := in.Marshal() + require.NoError(m.t, err) + m.totalInSent += len(res) + resp, err := HandleRangeRequest(ctx, m.l, in) + if err != nil { + return nil, err + } + marsh, err := resp.Marshal() + require.NoError(m.t, err) + m.totalOutSent += len(marsh) + return resp, nil } diff --git a/commonspace/headsync/statestorage/mock_statestorage/mock_statestorage.go b/commonspace/headsync/statestorage/mock_statestorage/mock_statestorage.go index 02d32758..139b73a3 100644 --- a/commonspace/headsync/statestorage/mock_statestorage/mock_statestorage.go +++ b/commonspace/headsync/statestorage/mock_statestorage/mock_statestorage.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_statestorage/mock_statestorage.go github.com/anyproto/any-sync/commonspace/headsync/statestorage StateStorage // - // Package mock_statestorage is a generated GoMock package. package mock_statestorage @@ -56,17 +55,17 @@ func (mr *MockStateStorageMockRecorder) GetState(arg0 any) *gomock.Call { } // SetHash mocks base method. -func (m *MockStateStorage) SetHash(arg0 context.Context, arg1 string) error { +func (m *MockStateStorage) SetHash(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetHash", arg0, arg1) + ret := m.ctrl.Call(m, "SetHash", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // SetHash indicates an expected call of SetHash. -func (mr *MockStateStorageMockRecorder) SetHash(arg0, arg1 any) *gomock.Call { +func (mr *MockStateStorageMockRecorder) SetHash(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHash", reflect.TypeOf((*MockStateStorage)(nil).SetHash), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHash", reflect.TypeOf((*MockStateStorage)(nil).SetHash), arg0, arg1, arg2) } // SetObserver mocks base method. diff --git a/commonspace/headsync/statestorage/statestorage.go b/commonspace/headsync/statestorage/statestorage.go index 24a16c61..23eda96b 100644 --- a/commonspace/headsync/statestorage/statestorage.go +++ b/commonspace/headsync/statestorage/statestorage.go @@ -9,7 +9,8 @@ import ( ) type State struct { - Hash string + OldHash string + NewHash string AclId string SettingsId string SpaceId string @@ -17,20 +18,22 @@ type State struct { } type Observer interface { - OnHashChange(hash string) + OnHashChange(oldHash, newHash string) } type StateStorage interface { GetState(ctx context.Context) (State, error) SettingsId() string - SetHash(ctx context.Context, hash string) error + SetHash(ctx context.Context, oldHash, newHash string) error SetObserver(observer Observer) } const ( stateCollectionKey = "state" idKey = "id" - hashKey = "h" + oldHashKey = "oh" + newHashKey = "nh" + legacyHashKey = "h" headerKey = "e" aclIdKey = "a" settingsIdKey = "s" @@ -58,10 +61,10 @@ func (s *stateStorage) SetObserver(observer Observer) { s.observer = observer } -func (s *stateStorage) SetHash(ctx context.Context, hash string) (err error) { +func (s *stateStorage) SetHash(ctx context.Context, oldHash, newHash string) (err error) { defer func() { if s.observer != nil && err == nil { - s.observer.OnHashChange(hash) + s.observer.OnHashChange(oldHash, newHash) } }() tx, err := s.stateColl.WriteTx(ctx) @@ -69,7 +72,8 @@ func (s *stateStorage) SetHash(ctx context.Context, hash string) (err error) { return err } mod := query.ModifyFunc(func(a *anyenc.Arena, v *anyenc.Value) (result *anyenc.Value, modified bool, err error) { - v.Set(hashKey, a.NewString(hash)) + v.Set(oldHashKey, a.NewString(oldHash)) + v.Set(newHashKey, a.NewString(newHash)) return v, true, nil }) _, err = s.stateColl.UpsertId(tx.Context(), s.spaceId, mod) @@ -99,13 +103,22 @@ func New(ctx context.Context, spaceId string, store anystore.DB) (StateStorage, return storage, nil } -func Create(ctx context.Context, state State, store anystore.DB) (StateStorage, error) { - arena := &anyenc.Arena{} - stateCollection, err := store.Collection(ctx, stateCollectionKey) +func Create(ctx context.Context, state State, store anystore.DB) (st StateStorage, err error) { + tx, err := store.WriteTx(ctx) if err != nil { return nil, err } - tx, err := stateCollection.WriteTx(ctx) + storage, err := CreateTx(tx.Context(), state, store) + if err != nil { + tx.Rollback() + return nil, err + } + return storage, tx.Commit() +} + +func CreateTx(ctx context.Context, state State, store anystore.DB) (StateStorage, error) { + arena := &anyenc.Arena{} + stateCollection, err := store.Collection(ctx, stateCollectionKey) if err != nil { return nil, err } @@ -115,9 +128,8 @@ func Create(ctx context.Context, state State, store anystore.DB) (StateStorage, doc.Set(settingsIdKey, arena.NewString(state.SettingsId)) doc.Set(headerKey, arena.NewBinary(state.SpaceHeader)) doc.Set(aclIdKey, arena.NewString(state.AclId)) - err = stateCollection.Insert(tx.Context(), doc) + err = stateCollection.Insert(ctx, doc) if err != nil { - tx.Rollback() return nil, err } return &stateStorage{ @@ -126,7 +138,7 @@ func Create(ctx context.Context, state State, store anystore.DB) (StateStorage, settingsId: state.SettingsId, stateColl: stateCollection, arena: arena, - }, tx.Commit() + }, nil } func (s *stateStorage) SettingsId() string { @@ -134,11 +146,21 @@ func (s *stateStorage) SettingsId() string { } func (s *stateStorage) stateFromDoc(doc anystore.Doc) State { + var ( + oldHash = doc.Value().GetString(oldHashKey) + newHash = doc.Value().GetString(newHashKey) + ) + // legacy hash is used for backward compatibility, which was due to a mistake in key names + if oldHash == "" || newHash == "" { + oldHash = doc.Value().GetString(legacyHashKey) + newHash = oldHash + } return State{ SpaceId: doc.Value().GetString(idKey), SettingsId: doc.Value().GetString(settingsIdKey), AclId: doc.Value().GetString(aclIdKey), - Hash: doc.Value().GetString(hashKey), + OldHash: oldHash, + NewHash: newHash, SpaceHeader: doc.Value().GetBytes(headerKey), } } diff --git a/commonspace/headsync/util.go b/commonspace/headsync/util.go index 549a77f2..d8267ed2 100644 --- a/commonspace/headsync/util.go +++ b/commonspace/headsync/util.go @@ -1,8 +1,13 @@ package headsync -import "strings" +import ( + "strings" +) func concatStrings(strs []string) string { + if len(strs) == 1 { + return strs[0] + } var ( b strings.Builder totalLen int @@ -17,11 +22,3 @@ func concatStrings(strs []string) string { } return b.String() } - -func splitString(str string) (res []string) { - const cidLen = 59 - for i := 0; i < len(str); i += cidLen { - res = append(res, str[i:i+cidLen]) - } - return -} diff --git a/commonspace/mock_commonspace/mock_commonspace.go b/commonspace/mock_commonspace/mock_commonspace.go index ba446d9f..652ccdc0 100644 --- a/commonspace/mock_commonspace/mock_commonspace.go +++ b/commonspace/mock_commonspace/mock_commonspace.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_commonspace/mock_commonspace.go github.com/anyproto/any-sync/commonspace Space // - // Package mock_commonspace is a generated GoMock package. package mock_commonspace @@ -18,6 +17,7 @@ import ( aclclient "github.com/anyproto/any-sync/commonspace/acl/aclclient" headsync "github.com/anyproto/any-sync/commonspace/headsync" syncacl "github.com/anyproto/any-sync/commonspace/object/acl/syncacl" + kvinterfaces "github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces" treesyncer "github.com/anyproto/any-sync/commonspace/object/treesyncer" objecttreebuilder "github.com/anyproto/any-sync/commonspace/objecttreebuilder" spacestorage "github.com/anyproto/any-sync/commonspace/spacestorage" @@ -223,6 +223,20 @@ func (mr *MockSpaceMockRecorder) Init(arg0 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockSpace)(nil).Init), arg0) } +// KeyValue mocks base method. +func (m *MockSpace) KeyValue() kvinterfaces.KeyValueService { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "KeyValue") + ret0, _ := ret[0].(kvinterfaces.KeyValueService) + return ret0 +} + +// KeyValue indicates an expected call of KeyValue. +func (mr *MockSpaceMockRecorder) KeyValue() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeyValue", reflect.TypeOf((*MockSpace)(nil).KeyValue)) +} + // Storage mocks base method. func (m *MockSpace) Storage() spacestorage.SpaceStorage { m.ctrl.T.Helper() diff --git a/commonspace/object/acl/aclrecordproto/aclrecord.pb.go b/commonspace/object/acl/aclrecordproto/aclrecord.pb.go index 9a12af9b..d81b2684 100644 --- a/commonspace/object/acl/aclrecordproto/aclrecord.pb.go +++ b/commonspace/object/acl/aclrecordproto/aclrecord.pb.go @@ -31,6 +31,7 @@ const ( AclUserPermissions_Admin AclUserPermissions = 2 AclUserPermissions_Writer AclUserPermissions = 3 AclUserPermissions_Reader AclUserPermissions = 4 + AclUserPermissions_Guest AclUserPermissions = 5 ) var AclUserPermissions_name = map[int32]string{ @@ -39,6 +40,7 @@ var AclUserPermissions_name = map[int32]string{ 2: "Admin", 3: "Writer", 4: "Reader", + 5: "Guest", } var AclUserPermissions_value = map[string]int32{ @@ -47,6 +49,7 @@ var AclUserPermissions_value = map[string]int32{ "Admin": 2, "Writer": 3, "Reader": 4, + "Guest": 5, } func (x AclUserPermissions) String() string { @@ -1335,71 +1338,71 @@ func init() { } var fileDescriptor_c8e9f754f34e929b = []byte{ - // 1014 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xdf, 0xb5, 0x93, 0x38, 0x7e, 0x4e, 0x52, 0x77, 0x0a, 0xed, 0x36, 0x05, 0xcb, 0x0c, 0x6a, - 0x65, 0x55, 0xa8, 0xa9, 0x8c, 0x5a, 0xa1, 0x0a, 0x94, 0x6e, 0x93, 0x0a, 0xa7, 0xa5, 0x50, 0x4d, - 0x45, 0x41, 0x48, 0x20, 0x4d, 0x66, 0x47, 0x65, 0x61, 0xff, 0x98, 0xd9, 0xb1, 0x91, 0xbf, 0x05, - 0x37, 0xbe, 0x08, 0x57, 0xee, 0x1c, 0x38, 0xe4, 0xc8, 0x11, 0x25, 0x37, 0x6e, 0x7c, 0x03, 0x34, - 0xb3, 0xeb, 0xdd, 0x9d, 0xf5, 0xda, 0x49, 0x0e, 0x1c, 0x92, 0xec, 0xbc, 0x7f, 0xf3, 0xde, 0xef, - 0xfd, 0xe6, 0xcd, 0x04, 0x3e, 0x66, 0x71, 0x18, 0xc6, 0x51, 0x32, 0xa6, 0x8c, 0xef, 0xc5, 0xc7, - 0x3f, 0x70, 0x26, 0xf7, 0x28, 0x0b, 0xd4, 0x8f, 0xe0, 0x2c, 0x16, 0xde, 0x58, 0xc4, 0x32, 0xde, - 0xd3, 0xbf, 0x93, 0x42, 0x7a, 0x4f, 0x0b, 0x50, 0x3b, 0x17, 0xe0, 0x7f, 0x1b, 0xd0, 0x72, 0x59, - 0x40, 0xe2, 0x58, 0xa2, 0x5d, 0xd8, 0xf4, 0x3d, 0x1e, 0x49, 0x5f, 0xce, 0x1c, 0xbb, 0x6f, 0x0f, - 0xb6, 0x48, 0xbe, 0x46, 0xef, 0x40, 0x3b, 0xa4, 0x89, 0xe4, 0xe2, 0x39, 0x9f, 0x39, 0x0d, 0xad, - 0x2c, 0x04, 0xc8, 0x81, 0x96, 0x4e, 0xe5, 0xc8, 0x73, 0x9a, 0x7d, 0x7b, 0xd0, 0x26, 0xf3, 0x25, - 0xba, 0x0b, 0x5d, 0x1e, 0x31, 0x31, 0x1b, 0x4b, 0xee, 0x11, 0x4e, 0x3d, 0xe5, 0xbe, 0xa6, 0xdd, - 0x17, 0xe4, 0x6a, 0x0f, 0xe9, 0x87, 0x3c, 0x91, 0x34, 0x1c, 0x3b, 0xeb, 0x7d, 0x7b, 0xd0, 0x24, - 0x85, 0x00, 0x7d, 0x00, 0x57, 0xe7, 0xd9, 0xbc, 0xf2, 0xdf, 0x44, 0x54, 0x4e, 0x04, 0x77, 0x36, - 0x74, 0xa8, 0x45, 0x05, 0xba, 0x03, 0x3b, 0x21, 0x97, 0xd4, 0xa3, 0x92, 0xbe, 0x9c, 0x1c, 0xab, - 0x5d, 0x5b, 0xda, 0xb4, 0x22, 0x45, 0x8f, 0xc0, 0xc9, 0xf3, 0x78, 0x31, 0x57, 0x09, 0x7f, 0xaa, - 0x3c, 0x36, 0xb5, 0xc7, 0x52, 0x3d, 0x7a, 0x08, 0xd7, 0x73, 0xdd, 0x17, 0x3f, 0x47, 0x5c, 0xcc, - 0x0d, 0x9c, 0xb6, 0xf6, 0x5c, 0xa2, 0xc5, 0xf7, 0xa1, 0xeb, 0xb2, 0xc0, 0x65, 0x2c, 0x9e, 0x44, - 0xf2, 0x28, 0x9a, 0xfa, 0x92, 0xab, 0xda, 0x7d, 0xfd, 0xa5, 0x36, 0x4e, 0xc1, 0x2f, 0x04, 0xf8, - 0x77, 0x1b, 0xde, 0x2e, 0x5c, 0x08, 0xff, 0x69, 0xc2, 0x13, 0xf9, 0x2c, 0xf6, 0x23, 0x55, 0x67, - 0x6a, 0x76, 0x64, 0x76, 0xae, 0x22, 0x2d, 0xec, 0x88, 0xee, 0xfb, 0x91, 0xa7, 0x9b, 0xd8, 0x26, - 0x15, 0x29, 0xfa, 0x08, 0x6e, 0x98, 0x9e, 0x05, 0xd6, 0x4d, 0x1d, 0x78, 0x99, 0x5a, 0xb1, 0x67, - 0x8e, 0x6d, 0xd6, 0xe1, 0x7c, 0x8d, 0xff, 0xb4, 0xe1, 0xc6, 0x42, 0xfe, 0x2e, 0x63, 0x7c, 0xbc, - 0x9a, 0x75, 0x03, 0xb8, 0x22, 0x52, 0xe3, 0x4a, 0xda, 0x55, 0x71, 0x2d, 0xcf, 0x9a, 0x4b, 0x78, - 0xb6, 0x0f, 0x9d, 0x31, 0x17, 0xa1, 0x9f, 0x24, 0x7e, 0x1c, 0x25, 0x3a, 0xd9, 0x9d, 0xe1, 0xbb, - 0xf7, 0x8a, 0x53, 0xe2, 0xb2, 0xe0, 0xcb, 0x84, 0x8b, 0x97, 0x85, 0x11, 0x29, 0x7b, 0xe0, 0x43, - 0x70, 0x16, 0xaa, 0x39, 0xe4, 0x2c, 0xf0, 0x23, 0x5e, 0x97, 0xb2, 0x5d, 0x9b, 0x32, 0x7e, 0x0c, - 0xd7, 0xab, 0x34, 0x20, 0x7c, 0x1a, 0xff, 0xc8, 0x6b, 0x9a, 0x65, 0xd7, 0x35, 0x0b, 0x7f, 0x0b, - 0xd7, 0x5c, 0x16, 0x3c, 0xad, 0xd6, 0xb7, 0x0a, 0xd1, 0x3a, 0x9c, 0x1a, 0xf5, 0x38, 0xe1, 0xef, - 0xe0, 0x56, 0x91, 0x60, 0x01, 0xc6, 0xc1, 0xf7, 0x34, 0x7a, 0xc3, 0x13, 0xb4, 0x0f, 0x2d, 0x96, - 0x7e, 0x3a, 0x76, 0xbf, 0x39, 0xe8, 0x0c, 0x6f, 0x9b, 0x10, 0x2e, 0x71, 0x24, 0x73, 0x2f, 0x3c, - 0x82, 0x9d, 0xc2, 0x2c, 0x71, 0x3d, 0x0f, 0x3d, 0x84, 0x36, 0xf5, 0x3c, 0x5f, 0xea, 0xbe, 0xa4, - 0x41, 0x9d, 0xda, 0xa0, 0xae, 0xe7, 0x91, 0xc2, 0x14, 0xff, 0x66, 0xc3, 0xb6, 0xa1, 0x5c, 0x89, - 0x41, 0xa5, 0xff, 0x8d, 0xcb, 0xf6, 0xdf, 0xa0, 0x7a, 0xd3, 0xa4, 0xfa, 0x65, 0x06, 0x1e, 0x7e, - 0x50, 0x73, 0x2a, 0x0e, 0x68, 0xc4, 0x78, 0xa0, 0xb6, 0x10, 0x66, 0xf3, 0xf3, 0x35, 0x9e, 0xc1, - 0xee, 0x72, 0x78, 0xff, 0xd7, 0xca, 0xf1, 0x3f, 0xb6, 0x9e, 0x5d, 0x59, 0x01, 0xd9, 0x8e, 0x8f, - 0xa1, 0x43, 0xd3, 0x64, 0x9e, 0xf3, 0xd9, 0xbc, 0x6f, 0x3d, 0x33, 0x6a, 0x95, 0xa4, 0xa4, 0xec, - 0x52, 0x33, 0xad, 0x1b, 0x97, 0x9e, 0xd6, 0xcd, 0x73, 0xa6, 0xf5, 0x7d, 0xb8, 0x56, 0xcc, 0xe3, - 0xa0, 0xd2, 0x9b, 0x3a, 0x15, 0x9e, 0x94, 0xe7, 0x34, 0xe1, 0x61, 0x3c, 0xe5, 0xa8, 0x07, 0x90, - 0xa1, 0xe9, 0x67, 0xbc, 0xdf, 0x22, 0x25, 0x09, 0x72, 0x61, 0x5b, 0x94, 0xc1, 0xd1, 0x85, 0x74, - 0x86, 0xb7, 0x4c, 0x34, 0x0c, 0xfc, 0x88, 0xe9, 0x81, 0x6f, 0xd6, 0xb0, 0x22, 0xdd, 0x1d, 0xff, - 0xda, 0x82, 0x2b, 0x2e, 0x0b, 0x0e, 0xe2, 0x48, 0xf2, 0x48, 0xbe, 0xa6, 0xc1, 0x84, 0xa3, 0x07, - 0xb0, 0x91, 0x8e, 0x05, 0xdd, 0xed, 0x85, 0xad, 0x8c, 0xf9, 0x32, 0xb2, 0x48, 0x66, 0x8c, 0x3e, - 0x85, 0x2d, 0xbf, 0x34, 0x73, 0xb2, 0x3c, 0xdf, 0x5b, 0xe1, 0x9c, 0x1a, 0x8e, 0x2c, 0x62, 0x38, - 0xa2, 0x43, 0xe8, 0x88, 0xe2, 0x42, 0xd2, 0x6d, 0xe8, 0x0c, 0xfb, 0xb5, 0x71, 0x4a, 0x17, 0xd7, - 0xc8, 0x22, 0x65, 0x37, 0xf4, 0x4c, 0xe1, 0x56, 0xba, 0x16, 0x74, 0x5f, 0x3a, 0x43, 0xbc, 0x2a, - 0x4e, 0x6a, 0x39, 0xb2, 0x88, 0xe9, 0x8a, 0x5e, 0x41, 0x77, 0x5c, 0x39, 0x15, 0xfa, 0x39, 0x71, - 0xd1, 0x09, 0x35, 0xb2, 0xc8, 0x42, 0x00, 0x74, 0x00, 0xdb, 0xb4, 0xcc, 0x04, 0xfd, 0xf4, 0x58, - 0x86, 0x76, 0x6a, 0xa2, 0x32, 0x33, 0x7c, 0x54, 0x10, 0x93, 0x1d, 0xad, 0x73, 0xd9, 0x91, 0x96, - 0x57, 0x3e, 0x6e, 0x2f, 0x60, 0x47, 0x18, 0x77, 0x8e, 0x7e, 0xa8, 0x74, 0x86, 0xef, 0xaf, 0xc2, - 0x2a, 0x33, 0x1d, 0x59, 0xa4, 0xe2, 0x8c, 0xbe, 0x86, 0xb7, 0x68, 0x0d, 0xd7, 0xf4, 0x1b, 0xe6, - 0x9c, 0x06, 0xe4, 0x65, 0xd6, 0x46, 0x40, 0xaf, 0xe1, 0x6a, 0x15, 0xc6, 0xc4, 0x01, 0x1d, 0xf6, - 0xce, 0x85, 0x1a, 0x91, 0x8c, 0x2c, 0xb2, 0x18, 0x02, 0x7d, 0x92, 0xcf, 0x1b, 0x75, 0x69, 0x38, - 0x1d, 0x1d, 0xf1, 0x66, 0x6d, 0x44, 0x65, 0xa0, 0xa8, 0x56, 0xb2, 0x2f, 0x51, 0x2d, 0x9d, 0xb5, - 0xce, 0xd6, 0xf9, 0x95, 0xa6, 0x96, 0x25, 0xaa, 0xa5, 0x82, 0x27, 0x2d, 0x58, 0x9f, 0xaa, 0x53, - 0x88, 0x9f, 0xea, 0x67, 0xf4, 0xa1, 0xba, 0x01, 0x1e, 0x01, 0xd0, 0xfc, 0x8c, 0x66, 0xd3, 0x70, - 0xd7, 0x0c, 0x5e, 0x3e, 0xc0, 0xa4, 0x64, 0x7d, 0xf7, 0x33, 0x40, 0x8b, 0x23, 0x18, 0x6d, 0xc2, - 0xda, 0xe7, 0x71, 0xc4, 0xbb, 0x16, 0x6a, 0xc3, 0xba, 0x7e, 0x4b, 0x76, 0x6d, 0xf5, 0xe9, 0x7a, - 0xa1, 0x1f, 0x75, 0x1b, 0x08, 0x60, 0xe3, 0x2b, 0xe1, 0x4b, 0x2e, 0xba, 0x4d, 0xf5, 0xad, 0xf8, - 0xc3, 0x45, 0x77, 0xed, 0xc9, 0xfe, 0x1f, 0xa7, 0x3d, 0xfb, 0xe4, 0xb4, 0x67, 0xff, 0x7d, 0xda, - 0xb3, 0x7f, 0x39, 0xeb, 0x59, 0x27, 0x67, 0x3d, 0xeb, 0xaf, 0xb3, 0x9e, 0xf5, 0xcd, 0xed, 0x0b, - 0xfd, 0xff, 0x70, 0xbc, 0xa1, 0xff, 0x7c, 0xf8, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xac, 0x5c, - 0x8a, 0x61, 0x6f, 0x0c, 0x00, 0x00, + // 1020 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x6f, 0xdc, 0x44, + 0x14, 0xb7, 0x77, 0x93, 0x6c, 0xf6, 0x39, 0x49, 0xdd, 0x29, 0xb4, 0x6e, 0x0a, 0xab, 0x65, 0x50, + 0xab, 0x55, 0x85, 0x9a, 0x6a, 0x51, 0x2b, 0x54, 0x81, 0x52, 0x37, 0xa9, 0xba, 0x69, 0x55, 0xa8, + 0xa6, 0xa2, 0x45, 0x48, 0x20, 0x39, 0xe3, 0x51, 0x31, 0xf8, 0xcf, 0x32, 0x9e, 0x5d, 0xb4, 0xdf, + 0x82, 0x1b, 0x5f, 0x84, 0x2b, 0x77, 0x0e, 0x1c, 0x7a, 0xe4, 0x88, 0x92, 0x1b, 0x37, 0xbe, 0x01, + 0x9a, 0xb1, 0xd7, 0xf6, 0x78, 0xbd, 0x9b, 0xe4, 0xd0, 0x43, 0x12, 0xcf, 0xfb, 0x37, 0xef, 0xfd, + 0xde, 0x6f, 0xde, 0x4c, 0xe0, 0x73, 0x9a, 0x44, 0x51, 0x12, 0xa7, 0x63, 0x8f, 0xb2, 0xbd, 0xe4, + 0xf8, 0x47, 0x46, 0xc5, 0x9e, 0x47, 0x43, 0xf9, 0xc3, 0x19, 0x4d, 0xb8, 0x3f, 0xe6, 0x89, 0x48, + 0xf6, 0xd4, 0xef, 0xb4, 0x94, 0xde, 0x51, 0x02, 0xd4, 0x2d, 0x04, 0xf8, 0xbf, 0x16, 0x74, 0x5c, + 0x1a, 0x92, 0x24, 0x11, 0x68, 0x17, 0x36, 0x03, 0x9f, 0xc5, 0x22, 0x10, 0x33, 0xc7, 0xec, 0x9b, + 0x83, 0x2d, 0x52, 0xac, 0xd1, 0x07, 0xd0, 0x8d, 0xbc, 0x54, 0x30, 0xfe, 0x8c, 0xcd, 0x9c, 0x96, + 0x52, 0x96, 0x02, 0xe4, 0x40, 0x47, 0xa5, 0x72, 0xe4, 0x3b, 0xed, 0xbe, 0x39, 0xe8, 0x92, 0xf9, + 0x12, 0xdd, 0x06, 0x9b, 0xc5, 0x94, 0xcf, 0xc6, 0x82, 0xf9, 0x84, 0x79, 0xbe, 0x74, 0x5f, 0x53, + 0xee, 0x0b, 0x72, 0xb9, 0x87, 0x08, 0x22, 0x96, 0x0a, 0x2f, 0x1a, 0x3b, 0xeb, 0x7d, 0x73, 0xd0, + 0x26, 0xa5, 0x00, 0x7d, 0x02, 0x97, 0xe7, 0xd9, 0xbc, 0x0c, 0xde, 0xc4, 0x9e, 0x98, 0x70, 0xe6, + 0x6c, 0xa8, 0x50, 0x8b, 0x0a, 0x74, 0x0b, 0x76, 0x22, 0x26, 0x3c, 0xdf, 0x13, 0xde, 0x8b, 0xc9, + 0xb1, 0xdc, 0xb5, 0xa3, 0x4c, 0x6b, 0x52, 0xf4, 0x00, 0x9c, 0x22, 0x8f, 0xe7, 0x73, 0x15, 0x0f, + 0xa6, 0xd2, 0x63, 0x53, 0x79, 0x2c, 0xd5, 0xa3, 0xfb, 0x70, 0xb5, 0xd0, 0x7d, 0xf5, 0x4b, 0xcc, + 0xf8, 0xdc, 0xc0, 0xe9, 0x2a, 0xcf, 0x25, 0x5a, 0x7c, 0x17, 0x6c, 0x97, 0x86, 0x2e, 0xa5, 0xc9, + 0x24, 0x16, 0x47, 0xf1, 0x34, 0x10, 0x4c, 0xd6, 0x1e, 0xa8, 0x2f, 0xb9, 0x71, 0x06, 0x7e, 0x29, + 0xc0, 0x7f, 0x98, 0xf0, 0x7e, 0xe9, 0x42, 0xd8, 0xcf, 0x13, 0x96, 0x8a, 0xa7, 0x49, 0x10, 0xcb, + 0x3a, 0x33, 0xb3, 0x23, 0xbd, 0x73, 0x35, 0x69, 0x69, 0x47, 0x54, 0xdf, 0x8f, 0x7c, 0xd5, 0xc4, + 0x2e, 0xa9, 0x49, 0xd1, 0x67, 0x70, 0x4d, 0xf7, 0x2c, 0xb1, 0x6e, 0xab, 0xc0, 0xcb, 0xd4, 0x92, + 0x3d, 0x73, 0x6c, 0xf3, 0x0e, 0x17, 0x6b, 0xfc, 0x97, 0x09, 0xd7, 0x16, 0xf2, 0x77, 0x29, 0x65, + 0xe3, 0xd5, 0xac, 0x1b, 0xc0, 0x25, 0x9e, 0x19, 0xd7, 0xd2, 0xae, 0x8b, 0x1b, 0x79, 0xd6, 0x5e, + 0xc2, 0xb3, 0x7d, 0xb0, 0xc6, 0x8c, 0x47, 0x41, 0x9a, 0x06, 0x49, 0x9c, 0xaa, 0x64, 0x77, 0x86, + 0x1f, 0xde, 0x29, 0x4f, 0x89, 0x4b, 0xc3, 0xaf, 0x53, 0xc6, 0x5f, 0x94, 0x46, 0xa4, 0xea, 0x81, + 0x0f, 0xc1, 0x59, 0xa8, 0xe6, 0x90, 0xd1, 0x30, 0x88, 0x59, 0x53, 0xca, 0x66, 0x63, 0xca, 0xf8, + 0x21, 0x5c, 0xad, 0xd3, 0x80, 0xb0, 0x69, 0xf2, 0x13, 0x6b, 0x68, 0x96, 0xd9, 0xd4, 0x2c, 0xfc, + 0x1d, 0x5c, 0x71, 0x69, 0xf8, 0xb8, 0x5e, 0xdf, 0x2a, 0x44, 0x9b, 0x70, 0x6a, 0x35, 0xe3, 0x84, + 0xbf, 0x87, 0x1b, 0x65, 0x82, 0x25, 0x18, 0x07, 0x3f, 0x78, 0xf1, 0x1b, 0x96, 0xa2, 0x7d, 0xe8, + 0xd0, 0xec, 0xd3, 0x31, 0xfb, 0xed, 0x81, 0x35, 0xbc, 0xa9, 0x43, 0xb8, 0xc4, 0x91, 0xcc, 0xbd, + 0xf0, 0x08, 0x76, 0x4a, 0xb3, 0xd4, 0xf5, 0x7d, 0x74, 0x1f, 0xba, 0x9e, 0xef, 0x07, 0x42, 0xf5, + 0x25, 0x0b, 0xea, 0x34, 0x06, 0x75, 0x7d, 0x9f, 0x94, 0xa6, 0xf8, 0x77, 0x13, 0xb6, 0x35, 0xe5, + 0x4a, 0x0c, 0x6a, 0xfd, 0x6f, 0x5d, 0xb4, 0xff, 0x1a, 0xd5, 0xdb, 0x3a, 0xd5, 0x2f, 0x32, 0xf0, + 0xf0, 0xbd, 0x86, 0x53, 0x71, 0xe0, 0xc5, 0x94, 0x85, 0x72, 0x0b, 0xae, 0x37, 0xbf, 0x58, 0xe3, + 0x19, 0xec, 0x2e, 0x87, 0xf7, 0x9d, 0x56, 0x8e, 0xff, 0x35, 0xd5, 0xec, 0xca, 0x0b, 0xc8, 0x77, + 0x7c, 0x08, 0x96, 0x97, 0x25, 0xf3, 0x8c, 0xcd, 0xe6, 0x7d, 0xeb, 0xe9, 0x51, 0xeb, 0x24, 0x25, + 0x55, 0x97, 0x86, 0x69, 0xdd, 0xba, 0xf0, 0xb4, 0x6e, 0x9f, 0x31, 0xad, 0xef, 0xc2, 0x95, 0x72, + 0x1e, 0x87, 0xb5, 0xde, 0x34, 0xa9, 0xf0, 0xa4, 0x3a, 0xa7, 0x09, 0x8b, 0x92, 0x29, 0x43, 0x3d, + 0x80, 0x1c, 0xcd, 0x20, 0xe7, 0xfd, 0x16, 0xa9, 0x48, 0x90, 0x0b, 0xdb, 0xbc, 0x0a, 0x8e, 0x2a, + 0xc4, 0x1a, 0xde, 0xd0, 0xd1, 0xd0, 0xf0, 0x23, 0xba, 0x07, 0xbe, 0xde, 0xc0, 0x8a, 0x6c, 0x77, + 0xfc, 0x5b, 0x07, 0x2e, 0xb9, 0x34, 0x3c, 0x48, 0x62, 0xc1, 0x62, 0xf1, 0xca, 0x0b, 0x27, 0x0c, + 0xdd, 0x83, 0x8d, 0x6c, 0x2c, 0xa8, 0x6e, 0x2f, 0x6c, 0xa5, 0xcd, 0x97, 0x91, 0x41, 0x72, 0x63, + 0xf4, 0x04, 0xb6, 0x82, 0xca, 0xcc, 0xc9, 0xf3, 0xfc, 0x68, 0x85, 0x73, 0x66, 0x38, 0x32, 0x88, + 0xe6, 0x88, 0x0e, 0xc1, 0xe2, 0xe5, 0x85, 0xa4, 0xda, 0x60, 0x0d, 0xfb, 0x8d, 0x71, 0x2a, 0x17, + 0xd7, 0xc8, 0x20, 0x55, 0x37, 0xf4, 0x54, 0xe2, 0x56, 0xb9, 0x16, 0x54, 0x5f, 0xac, 0x21, 0x5e, + 0x15, 0x27, 0xb3, 0x1c, 0x19, 0x44, 0x77, 0x45, 0x2f, 0xc1, 0x1e, 0xd7, 0x4e, 0x85, 0x7a, 0x4e, + 0x9c, 0x77, 0x42, 0x8d, 0x0c, 0xb2, 0x10, 0x00, 0x1d, 0xc0, 0xb6, 0x57, 0x65, 0x82, 0x7a, 0x7a, + 0x2c, 0x43, 0x3b, 0x33, 0x91, 0x99, 0x69, 0x3e, 0x32, 0x88, 0xce, 0x8e, 0xce, 0x99, 0xec, 0xc8, + 0xca, 0xab, 0x1e, 0xb7, 0xe7, 0xb0, 0xc3, 0xb5, 0x3b, 0x47, 0x3d, 0x54, 0xac, 0xe1, 0xc7, 0xab, + 0xb0, 0xca, 0x4d, 0x47, 0x06, 0xa9, 0x39, 0xa3, 0x6f, 0xe0, 0x3d, 0xaf, 0x81, 0x6b, 0xea, 0x0d, + 0x73, 0x46, 0x03, 0x8a, 0x32, 0x1b, 0x23, 0xa0, 0x57, 0x70, 0xb9, 0x0e, 0x63, 0xea, 0x80, 0x0a, + 0x7b, 0xeb, 0x5c, 0x8d, 0x48, 0x47, 0x06, 0x59, 0x0c, 0x81, 0xbe, 0x28, 0xe6, 0x8d, 0xbc, 0x34, + 0x1c, 0x4b, 0x45, 0xbc, 0xde, 0x18, 0x51, 0x1a, 0x48, 0xaa, 0x55, 0xec, 0x2b, 0x54, 0xcb, 0x66, + 0xad, 0xb3, 0x75, 0x76, 0xa5, 0x99, 0x65, 0x85, 0x6a, 0x99, 0xe0, 0x51, 0x07, 0xd6, 0xa7, 0xf2, + 0x14, 0xe2, 0xc7, 0xea, 0x19, 0x7d, 0x28, 0x6f, 0x80, 0x07, 0x00, 0x5e, 0x71, 0x46, 0xf3, 0x69, + 0xb8, 0xab, 0x07, 0xaf, 0x1e, 0x60, 0x52, 0xb1, 0xbe, 0xfd, 0x1a, 0xd0, 0xe2, 0x08, 0x46, 0x9b, + 0xb0, 0xf6, 0x65, 0x12, 0x33, 0xdb, 0x40, 0x5d, 0x58, 0x57, 0x6f, 0x49, 0xdb, 0x94, 0x9f, 0xae, + 0x1f, 0x05, 0xb1, 0xdd, 0x42, 0x00, 0x1b, 0xaf, 0x79, 0x20, 0x18, 0xb7, 0xdb, 0xf2, 0x5b, 0xf2, + 0x87, 0x71, 0x7b, 0x4d, 0x9a, 0x3c, 0x91, 0xc9, 0xda, 0xeb, 0x8f, 0xf6, 0xff, 0x3c, 0xe9, 0x99, + 0x6f, 0x4f, 0x7a, 0xe6, 0x3f, 0x27, 0x3d, 0xf3, 0xd7, 0xd3, 0x9e, 0xf1, 0xf6, 0xb4, 0x67, 0xfc, + 0x7d, 0xda, 0x33, 0xbe, 0xbd, 0x79, 0xae, 0x7f, 0x25, 0x8e, 0x37, 0xd4, 0x9f, 0x4f, 0xff, 0x0f, + 0x00, 0x00, 0xff, 0xff, 0xb7, 0x65, 0xd3, 0x8c, 0x7a, 0x0c, 0x00, 0x00, } func (m *AclRoot) Marshal() (dAtA []byte, err error) { diff --git a/commonspace/object/acl/aclrecordproto/protos/aclrecord.proto b/commonspace/object/acl/aclrecordproto/protos/aclrecord.proto index a9614f15..721b9c81 100644 --- a/commonspace/object/acl/aclrecordproto/protos/aclrecord.proto +++ b/commonspace/object/acl/aclrecordproto/protos/aclrecord.proto @@ -133,4 +133,5 @@ enum AclUserPermissions { Admin = 2; Writer = 3; Reader = 4; + Guest = 5; } diff --git a/commonspace/object/acl/list/aclstate.go b/commonspace/object/acl/list/aclstate.go index 86f86416..2697c781 100644 --- a/commonspace/object/acl/list/aclstate.go +++ b/commonspace/object/acl/list/aclstate.go @@ -131,6 +131,20 @@ func (st *AclState) CurrentReadKeyId() string { return st.readKeyChanges[len(st.readKeyChanges)-1] } +func (st *AclState) ReadKeyForAclId(id string) (string, error) { + recIdx, ok := st.list.indexes[id] + if !ok { + return "", ErrNoSuchRecord + } + for i := len(st.readKeyChanges) - 1; i >= 0; i-- { + recId := st.readKeyChanges[i] + if recIdx >= st.list.indexes[recId] { + return recId, nil + } + } + return "", ErrNoSuchRecord +} + func (st *AclState) AccountKey() crypto.PrivKey { return st.key } @@ -151,6 +165,13 @@ func (st *AclState) CurrentMetadataKey() (crypto.PubKey, error) { return curKeys.MetadataPubKey, nil } +func (st *AclState) FirstMetadataKey() (crypto.PrivKey, error) { + if firstKey, ok := st.keys[st.id]; ok && firstKey.MetadataPrivKey != nil { + return firstKey.MetadataPrivKey, nil + } + return nil, ErrNoMetadataKey +} + func (st *AclState) Keys() map[string]AclKeys { return st.keys } @@ -196,6 +217,10 @@ func (st *AclState) Invites() []crypto.PubKey { return invites } +func (st *AclState) Key() crypto.PrivKey { + return st.key +} + func (st *AclState) InviteIds() []string { var invites []string for invId := range st.inviteKeys { @@ -664,6 +689,9 @@ func (st *AclState) applyRequestRemove(ch *aclrecordproto.AclAccountRequestRemov st.pendingRequests[mapKeyFromPubKey(record.Identity)] = record.Id pk := mapKeyFromPubKey(record.Identity) accSt, exists := st.accountStates[pk] + if !accSt.Permissions.CanRequestRemove() { + return ErrInsufficientPermissions + } if !exists { return ErrNoSuchAccount } diff --git a/commonspace/object/acl/list/aclstate_test.go b/commonspace/object/acl/list/aclstate_test.go index cae40dc8..2cc178f8 100644 --- a/commonspace/object/acl/list/aclstate_test.go +++ b/commonspace/object/acl/list/aclstate_test.go @@ -1,8 +1,11 @@ package list import ( + "crypto/rand" "testing" + "github.com/anyproto/any-sync/util/crypto" + "github.com/stretchr/testify/require" ) @@ -68,3 +71,43 @@ func TestAclStateIsEmpty(t *testing.T) { require.True(t, st.IsEmpty()) }) } + +func TestAclState_FirstMetadataKey(t *testing.T) { + t.Run("returns first metadata key successfully", func(t *testing.T) { + privKey, _, err := crypto.GenerateEd25519Key(rand.Reader) + require.NoError(t, err) + pubKey := privKey.GetPublic() + readKey := crypto.NewAES() + state := &AclState{ + id: "recordId", + keys: map[string]AclKeys{ + "recordId": { + ReadKey: readKey, + MetadataPrivKey: privKey, + MetadataPubKey: pubKey, + }, + }, + } + key, err := state.FirstMetadataKey() + require.NoError(t, err) + require.Equal(t, privKey, key) + }) + t.Run("first metadata is nil", func(t *testing.T) { + state := &AclState{ + id: "recordId", + keys: map[string]AclKeys{ + "recordId": { + ReadKey: crypto.NewAES(), + }, + }, + } + key, err := state.FirstMetadataKey() + require.ErrorIs(t, err, ErrNoMetadataKey) + require.Nil(t, key) + }) + t.Run("returns error when no read key changes", func(t *testing.T) { + state := &AclState{} + _, err := state.FirstMetadataKey() + require.ErrorIs(t, err, ErrNoMetadataKey) + }) +} diff --git a/commonspace/object/acl/list/acltestsuite.go b/commonspace/object/acl/list/acltestsuite.go index 95825c59..be70e750 100644 --- a/commonspace/object/acl/list/acltestsuite.go +++ b/commonspace/object/acl/list/acltestsuite.go @@ -332,12 +332,18 @@ func (a *AclTestExecutor) Execute(cmd string) (err error) { getPerm := func(perm string) AclPermissions { var aclPerm aclrecordproto.AclUserPermissions switch perm { + case "own": + aclPerm = aclrecordproto.AclUserPermissions_Owner case "adm": aclPerm = aclrecordproto.AclUserPermissions_Admin case "rw": aclPerm = aclrecordproto.AclUserPermissions_Writer + case "none": + aclPerm = aclrecordproto.AclUserPermissions_None case "r": aclPerm = aclrecordproto.AclUserPermissions_Reader + case "g": + aclPerm = aclrecordproto.AclUserPermissions_Guest } return AclPermissions(aclPerm) } diff --git a/commonspace/object/acl/list/acltestsuite_test.go b/commonspace/object/acl/list/acltestsuite_test.go index 77e75943..5375b73b 100644 --- a/commonspace/object/acl/list/acltestsuite_test.go +++ b/commonspace/object/acl/list/acltestsuite_test.go @@ -118,6 +118,16 @@ func TestAclExecutor(t *testing.T) { {"p.batch::revoke:i1;revoke:i2", nil}, {"f.join::i1", ErrNoSuchInvite}, {"f.join::i2", ErrNoSuchInvite}, + // add stream guest user + {"a.add::guest,g,guestm", nil}, + // guest can't request removal + {"guest.request_remove::guest", ErrInsufficientPermissions}, + {"guest.remove::guest", ErrInsufficientPermissions}, + // can't change permission of existing guest user + {"a.changes::guest,rw", ErrInsufficientPermissions}, + {"a.changes::guest,none", ErrInsufficientPermissions}, + // can't change permission of existing user to guest, should be only possible to create it with add + {"a.changes::r,g", ErrInsufficientPermissions}, } for _, cmd := range cmds { err := a.Execute(cmd.cmd) diff --git a/commonspace/object/acl/list/mock_list/mock_list.go b/commonspace/object/acl/list/mock_list/mock_list.go index f5c7785d..91365261 100644 --- a/commonspace/object/acl/list/mock_list/mock_list.go +++ b/commonspace/object/acl/list/mock_list/mock_list.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_list/mock_list.go github.com/anyproto/any-sync/commonspace/object/acl/list AclList,Storage // - // Package mock_list is a generated GoMock package. package mock_list diff --git a/commonspace/object/acl/list/models.go b/commonspace/object/acl/list/models.go index 6b3d92a6..acb14740 100644 --- a/commonspace/object/acl/list/models.go +++ b/commonspace/object/acl/list/models.go @@ -69,6 +69,7 @@ type AclPermissions aclrecordproto.AclUserPermissions const ( AclPermissionsNone = AclPermissions(aclrecordproto.AclUserPermissions_None) AclPermissionsReader = AclPermissions(aclrecordproto.AclUserPermissions_Reader) + AclPermissionsGuest = AclPermissions(aclrecordproto.AclUserPermissions_Guest) // like reader, but can't request removal and can't be upgraded to another permission AclPermissionsWriter = AclPermissions(aclrecordproto.AclUserPermissions_Writer) AclPermissionsAdmin = AclPermissions(aclrecordproto.AclUserPermissions_Admin) AclPermissionsOwner = AclPermissions(aclrecordproto.AclUserPermissions_Owner) @@ -105,3 +106,12 @@ func (p AclPermissions) CanManageAccounts() bool { return false } } + +func (p AclPermissions) CanRequestRemove() bool { + switch aclrecordproto.AclUserPermissions(p) { + case aclrecordproto.AclUserPermissions_Guest: + return false + default: + return true + } +} diff --git a/commonspace/object/acl/list/storage.go b/commonspace/object/acl/list/storage.go index 4c96d24c..9168bbe4 100644 --- a/commonspace/object/acl/list/storage.go +++ b/commonspace/object/acl/list/storage.go @@ -61,6 +61,19 @@ type storage struct { } func CreateStorage(ctx context.Context, root *consensusproto.RawRecordWithId, headStorage headstorage.HeadStorage, store anystore.DB) (Storage, error) { + tx, err := store.WriteTx(ctx) + if err != nil { + return nil, err + } + storage, err := CreateStorageTx(tx.Context(), root, headStorage, store) + if err != nil { + tx.Rollback() + return nil, err + } + return storage, tx.Commit() +} + +func CreateStorageTx(ctx context.Context, root *consensusproto.RawRecordWithId, headStorage headstorage.HeadStorage, store anystore.DB) (Storage, error) { st := &storage{ id: root.Id, store: store, @@ -89,24 +102,18 @@ func CreateStorage(ctx context.Context, root *consensusproto.RawRecordWithId, he st.arena = &anyenc.Arena{} defer st.arena.Reset() doc := newStorageRecordValue(rec, st.arena) - tx, err := st.store.WriteTx(ctx) + err = st.recordsColl.Insert(ctx, doc) if err != nil { return nil, err } - err = st.recordsColl.Insert(tx.Context(), doc) - if err != nil { - tx.Rollback() - return nil, err - } - err = st.headStorage.UpdateEntryTx(tx.Context(), headstorage.HeadsUpdate{ + err = st.headStorage.UpdateEntryTx(ctx, headstorage.HeadsUpdate{ Id: root.Id, Heads: []string{root.Id}, }) if err != nil { - tx.Rollback() return nil, err } - return st, tx.Commit() + return st, nil } func NewStorage(ctx context.Context, id string, headStorage headstorage.HeadStorage, store anystore.DB) (Storage, error) { @@ -199,6 +206,13 @@ func (s *storage) AddAll(ctx context.Context, records []StorageRecord) error { if err != nil { return fmt.Errorf("failed to create write tx: %w", err) } + defer func() { + if err != nil { + _ = tx.Rollback() + } else { + err = tx.Commit() + } + }() vals := make([]*anyenc.Value, 0, len(records)) for _, ch := range records { newVal := newStorageRecordValue(ch, arena) @@ -206,20 +220,14 @@ func (s *storage) AddAll(ctx context.Context, records []StorageRecord) error { } err = s.recordsColl.Insert(tx.Context(), vals...) if err != nil { - tx.Rollback() - return nil + return err } head := records[len(records)-1].Id update := headstorage.HeadsUpdate{ Id: s.id, Heads: []string{head}, } - err = s.headStorage.UpdateEntryTx(tx.Context(), update) - if err != nil { - tx.Rollback() - return err - } - return tx.Commit() + return s.headStorage.UpdateEntryTx(tx.Context(), update) } func (s *storage) Id() string { diff --git a/commonspace/object/acl/list/validator.go b/commonspace/object/acl/list/validator.go index ddd4ef50..4410200e 100644 --- a/commonspace/object/acl/list/validator.go +++ b/commonspace/object/acl/list/validator.go @@ -117,10 +117,34 @@ func (c *contentValidator) ValidatePermissionChange(ch *aclrecordproto.AclAccoun if err != nil { return err } - _, exists := c.aclState.accountStates[mapKeyFromPubKey(chIdentity)] + currentState, exists := c.aclState.accountStates[mapKeyFromPubKey(chIdentity)] if !exists { return ErrNoSuchAccount } + + if currentState.Permissions == AclPermissionsGuest { + // it shouldn't be possible to change permission of guest user + // it should be only possible to remove it with AccountRemove acl change + return ErrInsufficientPermissions + } + + if currentState.Permissions == AclPermissionsOwner { + // it shouldn't be possible to change permission of owner + return ErrInsufficientPermissions + } + + if ch.Permissions == aclrecordproto.AclUserPermissions_Owner { + // not supported + // if we are going to support owner transfer, it should be done with a separate acl change so we can't have more than 1 owner at a time + return ErrInsufficientPermissions + } + + if ch.Permissions == aclrecordproto.AclUserPermissions_Guest && currentState.Permissions != AclPermissionsReader { + // generally, it should be only possible to create guest user with AccountsAdd acl change + // but in order to migrate the current guest users we allow to change permissions to guest from reader + return ErrInsufficientPermissions + } + return } diff --git a/commonspace/object/acl/syncacl/headupdate.go b/commonspace/object/acl/syncacl/headupdate.go index a7bf2ed2..0cd189a5 100644 --- a/commonspace/object/acl/syncacl/headupdate.go +++ b/commonspace/object/acl/syncacl/headupdate.go @@ -1,6 +1,7 @@ package syncacl import ( + "github.com/anyproto/any-sync/commonspace/spacesyncproto" "github.com/anyproto/any-sync/commonspace/sync/objectsync/objectmessages" "github.com/anyproto/any-sync/consensus/consensusproto" ) @@ -24,6 +25,10 @@ func (h *InnerHeadUpdate) MsgSize() uint64 { return size + uint64(len(h.head)) + uint64(len(h.root.Id)) + uint64(len(h.root.Payload)) } +func (h *InnerHeadUpdate) ObjectType() spacesyncproto.ObjectType { + return spacesyncproto.ObjectType_Acl +} + func (h *InnerHeadUpdate) Prepare() error { logMsg := consensusproto.WrapHeadUpdate(&consensusproto.LogHeadUpdate{ Head: h.head, diff --git a/commonspace/object/acl/syncacl/mock_syncacl/mock_syncacl.go b/commonspace/object/acl/syncacl/mock_syncacl/mock_syncacl.go index 9f7a5f24..3a301180 100644 --- a/commonspace/object/acl/syncacl/mock_syncacl/mock_syncacl.go +++ b/commonspace/object/acl/syncacl/mock_syncacl/mock_syncacl.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_syncacl/mock_syncacl.go github.com/anyproto/any-sync/commonspace/object/acl/syncacl SyncClient,SyncAcl // - // Package mock_syncacl is a generated GoMock package. package mock_syncacl diff --git a/commonspace/object/keyvalue/keyvalue.go b/commonspace/object/keyvalue/keyvalue.go new file mode 100644 index 00000000..eab795de --- /dev/null +++ b/commonspace/object/keyvalue/keyvalue.go @@ -0,0 +1,246 @@ +package keyvalue + +import ( + "context" + "errors" + + "github.com/anyproto/protobuf/proto" + "go.uber.org/zap" + "storj.io/drpc" + + "github.com/anyproto/any-sync/accountservice" + "github.com/anyproto/any-sync/app" + "github.com/anyproto/any-sync/app/logger" + "github.com/anyproto/any-sync/commonspace/object/acl/list" + "github.com/anyproto/any-sync/commonspace/object/acl/syncacl" + "github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage" + "github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage/syncstorage" + "github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces" + "github.com/anyproto/any-sync/commonspace/spacestate" + "github.com/anyproto/any-sync/commonspace/spacestorage" + "github.com/anyproto/any-sync/commonspace/spacesyncproto" + "github.com/anyproto/any-sync/commonspace/sync" + "github.com/anyproto/any-sync/commonspace/sync/objectsync/objectmessages" + "github.com/anyproto/any-sync/net/peer" + "github.com/anyproto/any-sync/net/rpc/rpcerr" + "github.com/anyproto/any-sync/util/cidutil" +) + +var ErrUnexpectedMessageType = errors.New("unexpected message type") + +var log = logger.NewNamed(kvinterfaces.CName) + +type keyValueService struct { + storageId string + spaceId string + ctx context.Context + cancel context.CancelFunc + + limiter *concurrentLimiter + defaultStore keyvaluestorage.Storage + clientFactory spacesyncproto.ClientFactory +} + +func New() kvinterfaces.KeyValueService { + return &keyValueService{} +} + +func (k *keyValueService) DefaultStore() keyvaluestorage.Storage { + return k.defaultStore +} + +func (k *keyValueService) SyncWithPeer(p peer.Peer) (err error) { + k.limiter.ScheduleRequest(k.ctx, p.Id(), func() { + err = k.syncWithPeer(k.ctx, p) + if err != nil { + log.Error("failed to sync with peer", zap.String("peerId", p.Id()), zap.Error(err)) + } + }) + return nil +} + +func (k *keyValueService) syncWithPeer(ctx context.Context, p peer.Peer) (err error) { + conn, err := p.AcquireDrpcConn(ctx) + if err != nil { + return + } + defer p.ReleaseDrpcConn(conn) + var ( + client = k.clientFactory.Client(conn) + rdiff = NewRemoteDiff(k.spaceId, client) + diff = k.defaultStore.InnerStorage().Diff() + ) + newIds, changedIds, theirChangedIds, removedIds, err := diff.CompareDiff(ctx, rdiff) + err = rpcerr.Unwrap(err) + if err != nil { + return err + } + innerStorage := k.defaultStore.InnerStorage() + stream, err := client.StoreElements(ctx) + if err != nil { + return err + } + defer stream.CloseSend() + err = stream.Send(&spacesyncproto.StoreKeyValue{SpaceId: k.spaceId}) + if err != nil { + return err + } + for _, id := range append(removedIds, changedIds...) { + kv, err := innerStorage.GetKeyPeerId(ctx, id) + if err != nil { + return err + } + err = stream.Send(kv.Proto()) + if err != nil { + return err + } + } + for _, id := range append(theirChangedIds, newIds...) { + kv := &spacesyncproto.StoreKeyValue{ + KeyPeerId: id, + } + err := stream.Send(kv) + if err != nil { + return err + } + } + err = stream.Send(&spacesyncproto.StoreKeyValue{}) + if err != nil { + return err + } + var messages []*spacesyncproto.StoreKeyValue + for { + msg, err := stream.Recv() + if err != nil { + return err + } + if msg.KeyPeerId == "" { + break + } + messages = append(messages, msg) + } + return k.defaultStore.SetRaw(ctx, messages...) +} + +func (k *keyValueService) HandleStoreDiffRequest(ctx context.Context, req *spacesyncproto.StoreDiffRequest) (resp *spacesyncproto.StoreDiffResponse, err error) { + return HandleRangeRequest(ctx, k.defaultStore.InnerStorage().Diff(), req) +} + +func (k *keyValueService) HandleStoreElementsRequest(ctx context.Context, stream spacesyncproto.DRPCSpaceSync_StoreElementsStream) (err error) { + var ( + messagesToSave []*spacesyncproto.StoreKeyValue + messagesToSend []string + ) + for { + msg, err := stream.Recv() + if err != nil { + return err + } + if msg.KeyPeerId == "" { + break + } + if msg.Value != nil { + messagesToSave = append(messagesToSave, msg) + } else { + messagesToSend = append(messagesToSend, msg.KeyPeerId) + } + } + innerStorage := k.defaultStore.InnerStorage() + isError := false + for _, msg := range messagesToSend { + kv, err := innerStorage.GetKeyPeerId(ctx, msg) + if err != nil { + log.Warn("failed to get key value", zap.String("key", msg), zap.Error(err)) + continue + } + err = stream.Send(kv.Proto()) + if err != nil { + log.Warn("failed to send key value", zap.String("key", msg), zap.Error(err)) + isError = true + break + } + } + if !isError { + err = stream.Send(&spacesyncproto.StoreKeyValue{}) + if err != nil { + return err + } + } + return k.defaultStore.SetRaw(ctx, messagesToSave...) +} + +func (k *keyValueService) HandleMessage(ctx context.Context, headUpdate drpc.Message) (err error) { + update, ok := headUpdate.(*objectmessages.HeadUpdate) + if !ok { + return ErrUnexpectedMessageType + } + keyValueMsg := &spacesyncproto.StoreKeyValues{} + err = proto.Unmarshal(update.Bytes, keyValueMsg) + if err != nil { + objectmessages.FreeHeadUpdate(update) + return err + } + objectmessages.FreeHeadUpdate(update) + return k.defaultStore.SetRaw(ctx, keyValueMsg.KeyValues...) +} + +func (k *keyValueService) Init(a *app.App) (err error) { + k.ctx, k.cancel = context.WithCancel(context.Background()) + spaceState := a.MustComponent(spacestate.CName).(*spacestate.SpaceState) + k.spaceId = spaceState.SpaceId + k.clientFactory = spacesyncproto.ClientFactoryFunc(spacesyncproto.NewDRPCSpaceSyncClient) + k.limiter = newConcurrentLimiter() + accountService := a.MustComponent(accountservice.CName).(accountservice.Service) + aclList := a.MustComponent(syncacl.CName).(list.AclList) + spaceStorage := a.MustComponent(spacestorage.CName).(spacestorage.SpaceStorage) + syncService := a.MustComponent(sync.CName).(sync.SyncService) + k.storageId, err = storageIdFromSpace(k.spaceId) + if err != nil { + return err + } + indexer := a.Component(keyvaluestorage.IndexerCName).(keyvaluestorage.Indexer) + if indexer == nil { + indexer = keyvaluestorage.NoOpIndexer{} + } + syncClient := syncstorage.New(spaceState.SpaceId, syncService) + k.defaultStore, err = keyvaluestorage.New( + k.ctx, + k.storageId, + spaceStorage.AnyStore(), + spaceStorage.HeadStorage(), + accountService.Account(), + syncClient, + aclList, + indexer) + return +} + +func (k *keyValueService) Name() (name string) { + return kvinterfaces.CName +} + +func (k *keyValueService) Run(ctx context.Context) (err error) { + return k.defaultStore.Prepare() +} + +func (k *keyValueService) Close(ctx context.Context) (err error) { + k.cancel() + k.limiter.Close() + return nil +} + +func storageIdFromSpace(spaceId string) (storageId string, err error) { + header := &spacesyncproto.StorageHeader{ + SpaceId: spaceId, + StorageName: "default", + } + data, err := proto.Marshal(header) + if err != nil { + return "", err + } + cid, err := cidutil.NewCidFromBytes(data) + if err != nil { + return "", err + } + return cid, nil +} diff --git a/commonspace/object/keyvalue/keyvalue_test.go b/commonspace/object/keyvalue/keyvalue_test.go new file mode 100644 index 00000000..301d9d0d --- /dev/null +++ b/commonspace/object/keyvalue/keyvalue_test.go @@ -0,0 +1,371 @@ +package keyvalue + +import ( + "bytes" + "context" + "fmt" + "math/rand" + "path/filepath" + "sort" + "strconv" + "strings" + "testing" + "time" + + anystore "github.com/anyproto/any-store" + "github.com/stretchr/testify/require" + + "github.com/anyproto/any-sync/commonspace/object/accountdata" + "github.com/anyproto/any-sync/commonspace/object/acl/list" + "github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage" + "github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage/innerstorage" + "github.com/anyproto/any-sync/commonspace/spacepayloads" + "github.com/anyproto/any-sync/commonspace/spacestorage" + "github.com/anyproto/any-sync/commonspace/spacesyncproto" + "github.com/anyproto/any-sync/net/peer" + "github.com/anyproto/any-sync/net/rpc/rpctest" + "github.com/anyproto/any-sync/util/crypto" +) + +func TestKeyValueService(t *testing.T) { + t.Run("different keys", func(t *testing.T) { + fxClient, fxServer, serverPeer := prepareFixtures(t) + fxClient.add(t, "key1", []byte("value1")) + fxClient.add(t, "key2", []byte("value2")) + fxServer.add(t, "key3", []byte("value3")) + fxServer.add(t, "key4", []byte("value4")) + err := fxClient.SyncWithPeer(serverPeer) + require.NoError(t, err) + fxClient.limiter.Close() + fxClient.check(t, "key3", []byte("value3")) + fxClient.check(t, "key4", []byte("value4")) + fxServer.check(t, "key1", []byte("value1")) + fxServer.check(t, "key2", []byte("value2")) + }) + + t.Run("change same keys, different values", func(t *testing.T) { + fxClient, fxServer, serverPeer := prepareFixtures(t) + fxClient.add(t, "key1", []byte("value1")) + fxServer.add(t, "key1", []byte("value2")) + err := fxClient.SyncWithPeer(serverPeer) + require.NoError(t, err) + fxClient.limiter.Close() + fxClient.check(t, "key1", []byte("value1")) + fxClient.check(t, "key1", []byte("value2")) + fxServer.check(t, "key1", []byte("value1")) + fxServer.check(t, "key1", []byte("value2")) + fxClient.add(t, "key1", []byte("value1-2")) + fxServer.add(t, "key1", []byte("value2-2")) + err = fxClient.SyncWithPeer(serverPeer) + require.NoError(t, err) + fxClient.limiter.Close() + fxClient.check(t, "key1", []byte("value1-2")) + fxClient.check(t, "key1", []byte("value2-2")) + fxServer.check(t, "key1", []byte("value1-2")) + fxServer.check(t, "key1", []byte("value2-2")) + }) + + t.Run("random keys and values", func(t *testing.T) { + rand.Seed(time.Now().UnixNano()) + diffEntries := 100 + ovelappingEntries := 10 + fxClient, fxServer, serverPeer := prepareFixtures(t) + numClientEntries := 5 + rand.Intn(diffEntries) + numServerEntries := 5 + rand.Intn(diffEntries) + allKeys := make(map[string]bool) + for i := 0; i < numClientEntries; i++ { + key := fmt.Sprintf("client-key-%d", i) + value := []byte(fmt.Sprintf("client-value-%d", i)) + fxClient.add(t, key, value) + allKeys[key] = true + } + for i := 0; i < numServerEntries; i++ { + key := fmt.Sprintf("server-key-%d", i) + value := []byte(fmt.Sprintf("server-value-%d", i)) + fxServer.add(t, key, value) + allKeys[key] = true + } + numOverlappingKeys := 3 + rand.Intn(ovelappingEntries) + for i := 0; i < numOverlappingKeys; i++ { + key := fmt.Sprintf("overlap-key-%d", i) + clientValue := []byte(fmt.Sprintf("client-overlap-value-%d", i)) + serverValue := []byte(fmt.Sprintf("server-overlap-value-%d", i)) + fxClient.add(t, key, clientValue) + fxServer.add(t, key, serverValue) + allKeys[key] = true + } + err := fxClient.SyncWithPeer(serverPeer) + require.NoError(t, err) + fxClient.limiter.Close() + + for key := range allKeys { + if strings.HasPrefix(key, "client-key-") { + i, _ := strconv.Atoi(strings.TrimPrefix(key, "client-key-")) + value := []byte(fmt.Sprintf("client-value-%d", i)) + fxClient.check(t, key, value) + fxServer.check(t, key, value) + } + if strings.HasPrefix(key, "server-key-") { + i, _ := strconv.Atoi(strings.TrimPrefix(key, "server-key-")) + value := []byte(fmt.Sprintf("server-value-%d", i)) + fxClient.check(t, key, value) + fxServer.check(t, key, value) + } + } + for i := 0; i < numOverlappingKeys; i++ { + key := fmt.Sprintf("overlap-key-%d", i) + clientValue := []byte(fmt.Sprintf("client-overlap-value-%d", i)) + serverValue := []byte(fmt.Sprintf("server-overlap-value-%d", i)) + + fxClient.check(t, key, clientValue) + fxClient.check(t, key, serverValue) + fxServer.check(t, key, clientValue) + fxServer.check(t, key, serverValue) + } + foundClientKeys := make(map[string]bool) + foundServerKeys := make(map[string]bool) + err = fxClient.defaultStore.Iterate(context.Background(), func(decryptor keyvaluestorage.Decryptor, key string, values []innerstorage.KeyValue) (bool, error) { + foundClientKeys[key] = true + return true, nil + }) + require.NoError(t, err) + err = fxServer.defaultStore.Iterate(context.Background(), func(decryptor keyvaluestorage.Decryptor, key string, values []innerstorage.KeyValue) (bool, error) { + foundServerKeys[key] = true + return true, nil + }) + require.NoError(t, err) + require.True(t, mapEqual(allKeys, foundServerKeys), "expected all client keys to be found") + require.True(t, mapEqual(foundClientKeys, foundServerKeys), "expected all client keys to be found") + }) +} + +func TestKeyValueServiceIterate(t *testing.T) { + t.Run("empty storage", func(t *testing.T) { + fxClient, _, _ := prepareFixtures(t) + var keys []string + err := fxClient.defaultStore.Iterate(context.Background(), func(decryptor keyvaluestorage.Decryptor, key string, values []innerstorage.KeyValue) (bool, error) { + keys = append(keys, key) + return true, nil + }) + require.NoError(t, err) + require.Empty(t, keys, "expected no keys in empty storage") + }) + + t.Run("single key later value", func(t *testing.T) { + fxClient, _, _ := prepareFixtures(t) + err := fxClient.defaultStore.Set(context.Background(), "test-key", []byte("value1")) + require.NoError(t, err) + err = fxClient.defaultStore.Set(context.Background(), "test-key", []byte("value2")) + require.NoError(t, err) + var keys []string + valueCount := 0 + err = fxClient.defaultStore.Iterate(context.Background(), func(decryptor keyvaluestorage.Decryptor, key string, values []innerstorage.KeyValue) (bool, error) { + keys = append(keys, key) + valueCount = len(values) + + for _, kv := range values { + val, err := decryptor(kv) + require.NoError(t, err) + require.Equal(t, "value2", string(val)) + } + return true, nil + }) + require.NoError(t, err) + require.Equal(t, 1, len(keys), "expected one key") + require.Equal(t, "test-key", keys[0], "expected key to be 'test-key'") + require.Equal(t, 1, valueCount, "expected one value for key") + }) + + t.Run("multiple keys", func(t *testing.T) { + fxClient, _, _ := prepareFixtures(t) + testKeys := []string{"key1", "key2", "key3"} + for _, key := range testKeys { + err := fxClient.defaultStore.Set(context.Background(), key, []byte("value-"+key)) + require.NoError(t, err) + } + var foundKeys []string + err := fxClient.defaultStore.Iterate(context.Background(), func(decryptor keyvaluestorage.Decryptor, key string, values []innerstorage.KeyValue) (bool, error) { + foundKeys = append(foundKeys, key) + require.Equal(t, 1, len(values), "Expected one value for key: "+key) + val, err := decryptor(values[0]) + require.NoError(t, err) + require.Equal(t, "value-"+key, string(val), "Value doesn't match for key: "+key) + + return true, nil + }) + require.NoError(t, err) + sort.Strings(foundKeys) + sort.Strings(testKeys) + require.Equal(t, testKeys, foundKeys, "Expected all keys to be found") + }) + + t.Run("early termination", func(t *testing.T) { + fxClient, _, _ := prepareFixtures(t) + testKeys := []string{"key1", "key2", "key3", "key4", "key5"} + for _, key := range testKeys { + err := fxClient.defaultStore.Set(context.Background(), key, []byte("value-"+key)) + require.NoError(t, err) + } + + var foundKeys []string + err := fxClient.defaultStore.Iterate(context.Background(), func(decryptor keyvaluestorage.Decryptor, key string, values []innerstorage.KeyValue) (bool, error) { + foundKeys = append(foundKeys, key) + return len(foundKeys) < 2, nil + }) + require.NoError(t, err) + require.Equal(t, 2, len(foundKeys), "expected to find exactly 2 keys before stopping") + }) + + t.Run("error during iteration", func(t *testing.T) { + fxClient, _, _ := prepareFixtures(t) + + err := fxClient.defaultStore.Set(context.Background(), "test-key", []byte("test-value")) + require.NoError(t, err) + + expectedErr := context.Canceled + err = fxClient.defaultStore.Iterate(context.Background(), func(decryptor keyvaluestorage.Decryptor, key string, values []innerstorage.KeyValue) (bool, error) { + return false, expectedErr + }) + require.Equal(t, expectedErr, err, "expected error to be propagated") + }) +} + +func prepareFixtures(t *testing.T) (fxClient *fixture, fxServer *fixture, serverPeer peer.Peer) { + firstKeys, err := accountdata.NewRandom() + require.NoError(t, err) + secondKeys, err := accountdata.NewRandom() + require.NoError(t, err) + secondKeys.SignKey = firstKeys.SignKey + payload := newStorageCreatePayload(t, firstKeys) + fxClient = newFixture(t, firstKeys, payload) + fxServer = newFixture(t, secondKeys, payload) + serverConn, clientConn := rpctest.MultiConnPair(firstKeys.PeerId, secondKeys.PeerId) + serverPeer, err = peer.NewPeer(serverConn, fxClient.server) + require.NoError(t, err) + _, err = peer.NewPeer(clientConn, fxServer.server) + require.NoError(t, err) + return +} + +func mapEqual[K comparable, V comparable](map1, map2 map[K]V) bool { + if len(map1) != len(map2) { + return false + } + for key, val1 := range map1 { + if val2, ok := map2[key]; !ok || val1 != val2 { + return false + } + } + return true +} + +var ctx = context.Background() + +type noOpSyncClient struct{} + +func (n noOpSyncClient) Broadcast(ctx context.Context, objectId string, keyValues ...innerstorage.KeyValue) error { + return nil +} + +type fixture struct { + *keyValueService + server *rpctest.TestServer +} + +func newFixture(t *testing.T, keys *accountdata.AccountKeys, spacePayload spacestorage.SpaceStorageCreatePayload) *fixture { + storePath := filepath.Join(t.TempDir(), "store.db") + anyStore, err := anystore.Open(ctx, storePath, nil) + require.NoError(t, err) + storage, err := spacestorage.Create(ctx, anyStore, spacePayload) + require.NoError(t, err) + aclStorage, err := storage.AclStorage() + require.NoError(t, err) + aclList, err := list.BuildAclListWithIdentity(keys, aclStorage, list.NoOpAcceptorVerifier{}) + require.NoError(t, err) + storageId := "kv.storage" + rpcHandler := rpctest.NewTestServer() + defaultStorage, err := keyvaluestorage.New(ctx, + storageId, + anyStore, + storage.HeadStorage(), + keys, + noOpSyncClient{}, + aclList, + keyvaluestorage.NoOpIndexer{}) + require.NoError(t, err) + ctx, cancel := context.WithCancel(ctx) + service := &keyValueService{ + spaceId: storage.Id(), + storageId: storageId, + limiter: newConcurrentLimiter(), + ctx: ctx, + cancel: cancel, + clientFactory: spacesyncproto.ClientFactoryFunc(spacesyncproto.NewDRPCSpaceSyncClient), + defaultStore: defaultStorage, + } + require.NoError(t, spacesyncproto.DRPCRegisterSpaceSync(rpcHandler, &testServer{service: service, t: t})) + return &fixture{ + keyValueService: service, + server: rpcHandler, + } +} + +func (fx *fixture) add(t *testing.T, key string, value []byte) { + err := fx.defaultStore.Set(ctx, key, value) + require.NoError(t, err) +} + +func (fx *fixture) check(t *testing.T, key string, value []byte) (isFound bool) { + err := fx.defaultStore.GetAll(ctx, key, func(decryptor keyvaluestorage.Decryptor, values []innerstorage.KeyValue) error { + for _, v := range values { + decryptedValue, err := decryptor(v) + require.NoError(t, err) + if bytes.Equal(value, decryptedValue) { + isFound = true + break + } + } + return nil + }) + require.NoError(t, err) + return +} + +func newStorageCreatePayload(t *testing.T, keys *accountdata.AccountKeys) spacestorage.SpaceStorageCreatePayload { + masterKey, _, err := crypto.GenerateRandomEd25519KeyPair() + require.NoError(t, err) + metaKey, _, err := crypto.GenerateRandomEd25519KeyPair() + require.NoError(t, err) + readKey := crypto.NewAES() + meta := []byte("account") + payload := spacepayloads.SpaceCreatePayload{ + SigningKey: keys.SignKey, + SpaceType: "space", + ReplicationKey: 10, + SpacePayload: nil, + MasterKey: masterKey, + ReadKey: readKey, + MetadataKey: metaKey, + Metadata: meta, + } + createSpace, err := spacepayloads.StoragePayloadForSpaceCreate(payload) + require.NoError(t, err) + return createSpace +} + +type testServer struct { + spacesyncproto.DRPCSpaceSyncUnimplementedServer + service *keyValueService + t *testing.T +} + +func (t *testServer) StoreDiff(ctx context.Context, req *spacesyncproto.StoreDiffRequest) (*spacesyncproto.StoreDiffResponse, error) { + return t.service.HandleStoreDiffRequest(ctx, req) +} + +func (t *testServer) StoreElements(stream spacesyncproto.DRPCSpaceSync_StoreElementsStream) error { + msg, err := stream.Recv() + require.NoError(t.t, err) + require.NotEmpty(t.t, msg.SpaceId) + return t.service.HandleStoreElementsRequest(ctx, stream) +} diff --git a/commonspace/object/keyvalue/keyvaluestorage/innerstorage/element.go b/commonspace/object/keyvalue/keyvaluestorage/innerstorage/element.go new file mode 100644 index 00000000..e4e48000 --- /dev/null +++ b/commonspace/object/keyvalue/keyvaluestorage/innerstorage/element.go @@ -0,0 +1,92 @@ +package innerstorage + +import ( + "errors" + + "github.com/anyproto/any-store/anyenc" + + "github.com/anyproto/any-sync/commonspace/spacesyncproto" + "github.com/anyproto/any-sync/util/crypto" +) + +var ErrInvalidSignature = errors.New("invalid signature") + +type KeyValue struct { + KeyPeerId string + ReadKeyId string + Key string + Value Value + TimestampMilli int + Identity string + PeerId string + AclId string +} + +type Value struct { + Value []byte + PeerSignature []byte + IdentitySignature []byte +} + +func KeyValueFromProto(proto *spacesyncproto.StoreKeyValue, verify bool) (kv KeyValue, err error) { + kv.KeyPeerId = proto.KeyPeerId + kv.Value.Value = proto.Value + kv.Value.PeerSignature = proto.PeerSignature + kv.Value.IdentitySignature = proto.IdentitySignature + innerValue := &spacesyncproto.StoreKeyInner{} + if err = innerValue.Unmarshal(proto.Value); err != nil { + return kv, err + } + kv.TimestampMilli = int(innerValue.TimestampMicro) + identity, err := crypto.UnmarshalEd25519PublicKeyProto(innerValue.Identity) + if err != nil { + return kv, err + } + peerId, err := crypto.UnmarshalEd25519PublicKeyProto(innerValue.Peer) + if err != nil { + return kv, err + } + kv.Identity = identity.Account() + kv.PeerId = peerId.PeerId() + kv.Key = innerValue.Key + kv.AclId = innerValue.AclHeadId + // TODO: check that key-peerId is equal to key+peerId? + if verify { + if verify, _ = identity.Verify(proto.Value, proto.IdentitySignature); !verify { + return kv, ErrInvalidSignature + } + if verify, _ = peerId.Verify(proto.Value, proto.PeerSignature); !verify { + return kv, ErrInvalidSignature + } + } + return kv, nil +} + +func (v Value) AnyEnc(a *anyenc.Arena) *anyenc.Value { + obj := a.NewObject() + obj.Set("v", a.NewBinary(v.Value)) + obj.Set("p", a.NewBinary(v.PeerSignature)) + obj.Set("i", a.NewBinary(v.IdentitySignature)) + return obj +} + +func (kv KeyValue) AnyEnc(a *anyenc.Arena) *anyenc.Value { + obj := a.NewObject() + obj.Set("id", a.NewString(kv.KeyPeerId)) + obj.Set("k", a.NewString(kv.Key)) + obj.Set("r", a.NewString(kv.ReadKeyId)) + obj.Set("v", kv.Value.AnyEnc(a)) + obj.Set("t", a.NewNumberInt(kv.TimestampMilli)) + obj.Set("i", a.NewString(kv.Identity)) + obj.Set("p", a.NewString(kv.PeerId)) + return obj +} + +func (kv KeyValue) Proto() *spacesyncproto.StoreKeyValue { + return &spacesyncproto.StoreKeyValue{ + KeyPeerId: kv.KeyPeerId, + Value: kv.Value.Value, + PeerSignature: kv.Value.PeerSignature, + IdentitySignature: kv.Value.IdentitySignature, + } +} diff --git a/commonspace/object/keyvalue/keyvaluestorage/innerstorage/keyvaluestorage.go b/commonspace/object/keyvalue/keyvaluestorage/innerstorage/keyvaluestorage.go new file mode 100644 index 00000000..5dcd6990 --- /dev/null +++ b/commonspace/object/keyvalue/keyvaluestorage/innerstorage/keyvaluestorage.go @@ -0,0 +1,249 @@ +package innerstorage + +import ( + "context" + "encoding/binary" + "errors" + "strings" + + anystore "github.com/anyproto/any-store" + "github.com/anyproto/any-store/anyenc" + "github.com/anyproto/any-store/query" + + "github.com/anyproto/any-sync/app/ldiff" + "github.com/anyproto/any-sync/commonspace/headsync/headstorage" +) + +var ( + parserPool = &anyenc.ParserPool{} + arenaPool = &anyenc.ArenaPool{} +) + +type KeyValueStorage interface { + Set(ctx context.Context, keyValues ...KeyValue) (err error) + Diff() ldiff.CompareDiff + GetKeyPeerId(ctx context.Context, keyPeerId string) (keyValue KeyValue, err error) + IterateValues(context.Context, func(kv KeyValue) (bool, error)) (err error) + IteratePrefix(context.Context, string, func(kv KeyValue) error) (err error) +} + +type storage struct { + diff ldiff.CompareDiff + headStorage headstorage.HeadStorage + collection anystore.Collection + store anystore.DB + storageName string +} + +func New(ctx context.Context, storageName string, headStorage headstorage.HeadStorage, store anystore.DB) (kv KeyValueStorage, err error) { + collection, err := store.Collection(ctx, storageName) + if err != nil { + return nil, err + } + tx, err := store.WriteTx(ctx) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + _ = tx.Rollback() + } else { + err = tx.Commit() + } + }() + storage := &storage{ + storageName: storageName, + headStorage: headStorage, + collection: collection, + store: store, + diff: ldiff.New(32, 256).(ldiff.CompareDiff), + } + iter, err := storage.collection.Find(nil).Iter(ctx) + if err != nil { + return + } + defer func() { + _ = iter.Close() + }() + var ( + doc anystore.Doc + elements []ldiff.Element + ) + for iter.Next() { + if doc, err = iter.Doc(); err != nil { + return + } + elements = append(elements, anyEncToElement(doc.Value())) + } + storage.diff.Set(elements...) + hash := storage.diff.Hash() + err = headStorage.UpdateEntryTx(tx.Context(), headstorage.HeadsUpdate{ + Id: storageName, + Heads: []string{hash}, + }) + return storage, err +} + +func (s *storage) Diff() ldiff.CompareDiff { + return s.diff +} + +func (s *storage) GetKeyPeerId(ctx context.Context, keyPeerId string) (value KeyValue, err error) { + doc, err := s.collection.FindId(ctx, keyPeerId) + if err != nil { + return + } + return s.keyValueFromDoc(doc), nil +} + +func (s *storage) IterateValues(ctx context.Context, iterFunc func(kv KeyValue) (bool, error)) (err error) { + iter, err := s.collection.Find(nil).Iter(ctx) + if err != nil { + return + } + defer func() { + _ = iter.Close() + }() + var doc anystore.Doc + for iter.Next() { + if doc, err = iter.Doc(); err != nil { + return + } + continueIteration, err := iterFunc(s.keyValueFromDoc(doc)) + if err != nil { + return err + } + if !continueIteration { + break + } + } + return nil +} + +func (s *storage) IteratePrefix(ctx context.Context, prefix string, iterFunc func(kv KeyValue) error) (err error) { + filter := query.Key{Path: []string{"id"}, Filter: query.NewComp(query.CompOpGte, prefix)} + qry := s.collection.Find(filter).Sort("id") + iter, err := qry.Iter(ctx) + if err != nil { + return + } + defer func() { + _ = iter.Close() + }() + var doc anystore.Doc + for iter.Next() { + if doc, err = iter.Doc(); err != nil { + return + } + if !strings.Contains(doc.Value().GetString("id"), prefix) { + break + } + err := iterFunc(s.keyValueFromDoc(doc)) + if err != nil { + return err + } + } + return nil +} + +func (s *storage) keyValueFromDoc(doc anystore.Doc) KeyValue { + valueObj := doc.Value().GetObject("v") + value := Value{ + Value: valueObj.Get("v").GetBytes(), + PeerSignature: valueObj.Get("p").GetBytes(), + IdentitySignature: valueObj.Get("i").GetBytes(), + } + return KeyValue{ + KeyPeerId: doc.Value().GetString("id"), + ReadKeyId: doc.Value().GetString("r"), + Value: value, + TimestampMilli: doc.Value().GetInt("t"), + Identity: doc.Value().GetString("i"), + PeerId: doc.Value().GetString("p"), + Key: doc.Value().GetString("k"), + } +} + +func (s *storage) init(ctx context.Context) (err error) { + s.diff = ldiff.New(32, 256).(ldiff.CompareDiff) + iter, err := s.collection.Find(nil).Iter(ctx) + if err != nil { + return + } + defer func() { + _ = iter.Close() + }() + var doc anystore.Doc + var elements []ldiff.Element + for iter.Next() { + if doc, err = iter.Doc(); err != nil { + return + } + elements = append(elements, anyEncToElement(doc.Value())) + } + s.diff.Set(elements...) + return +} + +func (s *storage) Set(ctx context.Context, values ...KeyValue) (err error) { + tx, err := s.collection.WriteTx(ctx) + if err != nil { + return + } + defer func() { + if err != nil { + _ = tx.Rollback() + } else { + err = tx.Commit() + } + }() + ctx = tx.Context() + elements, err := s.updateValues(ctx, values...) + if err != nil { + return + } + s.diff.Set(elements...) + err = s.headStorage.UpdateEntryTx(ctx, headstorage.HeadsUpdate{ + Id: s.storageName, + Heads: []string{s.diff.Hash()}, + }) + return +} + +func (s *storage) updateValues(ctx context.Context, values ...KeyValue) (elements []ldiff.Element, err error) { + parser := parserPool.Get() + defer parserPool.Put(parser) + arena := arenaPool.Get() + defer arenaPool.Put(arena) + + elements = make([]ldiff.Element, 0, len(values)) + var doc anystore.Doc + for _, value := range values { + doc, err = s.collection.FindIdWithParser(ctx, parser, value.KeyPeerId) + isNotFound := errors.Is(err, anystore.ErrDocNotFound) + if err != nil && !isNotFound { + return + } + if !isNotFound { + if doc.Value().GetInt("t") >= value.TimestampMilli { + continue + } + } + arena.Reset() + val := value.AnyEnc(arena) + if err = s.collection.UpsertOne(ctx, val); err != nil { + return + } + elements = append(elements, anyEncToElement(val)) + } + return +} + +func anyEncToElement(val *anyenc.Value) ldiff.Element { + byteRepr := make([]byte, 8) + binary.BigEndian.PutUint64(byteRepr, uint64(val.GetInt("t"))) + return ldiff.Element{ + Id: val.GetString("id"), + Head: string(byteRepr), + } +} diff --git a/commonspace/object/keyvalue/keyvaluestorage/mock_keyvaluestorage/mock_keyvaluestorage.go b/commonspace/object/keyvalue/keyvaluestorage/mock_keyvaluestorage/mock_keyvaluestorage.go new file mode 100644 index 00000000..548d61f1 --- /dev/null +++ b/commonspace/object/keyvalue/keyvaluestorage/mock_keyvaluestorage/mock_keyvaluestorage.go @@ -0,0 +1,144 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage (interfaces: Storage) +// +// Generated by this command: +// +// mockgen -destination mock_keyvaluestorage/mock_keyvaluestorage.go github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage Storage +// +// Package mock_keyvaluestorage is a generated GoMock package. +package mock_keyvaluestorage + +import ( + context "context" + reflect "reflect" + + innerstorage "github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage/innerstorage" + spacesyncproto "github.com/anyproto/any-sync/commonspace/spacesyncproto" + gomock "go.uber.org/mock/gomock" +) + +// MockStorage is a mock of Storage interface. +type MockStorage struct { + ctrl *gomock.Controller + recorder *MockStorageMockRecorder +} + +// MockStorageMockRecorder is the mock recorder for MockStorage. +type MockStorageMockRecorder struct { + mock *MockStorage +} + +// NewMockStorage creates a new mock instance. +func NewMockStorage(ctrl *gomock.Controller) *MockStorage { + mock := &MockStorage{ctrl: ctrl} + mock.recorder = &MockStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStorage) EXPECT() *MockStorageMockRecorder { + return m.recorder +} + +// GetAll mocks base method. +func (m *MockStorage) GetAll(arg0 context.Context, arg1 string, arg2 func(func(innerstorage.KeyValue) ([]byte, error), []innerstorage.KeyValue) error) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAll", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// GetAll indicates an expected call of GetAll. +func (mr *MockStorageMockRecorder) GetAll(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAll", reflect.TypeOf((*MockStorage)(nil).GetAll), arg0, arg1, arg2) +} + +// Id mocks base method. +func (m *MockStorage) Id() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Id") + ret0, _ := ret[0].(string) + return ret0 +} + +// Id indicates an expected call of Id. +func (mr *MockStorageMockRecorder) Id() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Id", reflect.TypeOf((*MockStorage)(nil).Id)) +} + +// InnerStorage mocks base method. +func (m *MockStorage) InnerStorage() innerstorage.KeyValueStorage { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InnerStorage") + ret0, _ := ret[0].(innerstorage.KeyValueStorage) + return ret0 +} + +// InnerStorage indicates an expected call of InnerStorage. +func (mr *MockStorageMockRecorder) InnerStorage() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InnerStorage", reflect.TypeOf((*MockStorage)(nil).InnerStorage)) +} + +// Iterate mocks base method. +func (m *MockStorage) Iterate(arg0 context.Context, arg1 func(func(innerstorage.KeyValue) ([]byte, error), string, []innerstorage.KeyValue) (bool, error)) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Iterate", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Iterate indicates an expected call of Iterate. +func (mr *MockStorageMockRecorder) Iterate(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterate", reflect.TypeOf((*MockStorage)(nil).Iterate), arg0, arg1) +} + +// Prepare mocks base method. +func (m *MockStorage) Prepare() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Prepare") + ret0, _ := ret[0].(error) + return ret0 +} + +// Prepare indicates an expected call of Prepare. +func (mr *MockStorageMockRecorder) Prepare() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Prepare", reflect.TypeOf((*MockStorage)(nil).Prepare)) +} + +// Set mocks base method. +func (m *MockStorage) Set(arg0 context.Context, arg1 string, arg2 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Set", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// Set indicates an expected call of Set. +func (mr *MockStorageMockRecorder) Set(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockStorage)(nil).Set), arg0, arg1, arg2) +} + +// SetRaw mocks base method. +func (m *MockStorage) SetRaw(arg0 context.Context, arg1 ...*spacesyncproto.StoreKeyValue) error { + m.ctrl.T.Helper() + varargs := []any{arg0} + for _, a := range arg1 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "SetRaw", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetRaw indicates an expected call of SetRaw. +func (mr *MockStorageMockRecorder) SetRaw(arg0 any, arg1 ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{arg0}, arg1...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetRaw", reflect.TypeOf((*MockStorage)(nil).SetRaw), varargs...) +} diff --git a/commonspace/object/keyvalue/keyvaluestorage/storage.go b/commonspace/object/keyvalue/keyvaluestorage/storage.go new file mode 100644 index 00000000..9f8caf13 --- /dev/null +++ b/commonspace/object/keyvalue/keyvaluestorage/storage.go @@ -0,0 +1,368 @@ +//go:generate mockgen -destination mock_keyvaluestorage/mock_keyvaluestorage.go github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage Storage +package keyvaluestorage + +import ( + "context" + "encoding/binary" + "fmt" + "sync" + "time" + + anystore "github.com/anyproto/any-store" + "github.com/anyproto/protobuf/proto" + "go.uber.org/zap" + + "github.com/anyproto/any-sync/app" + "github.com/anyproto/any-sync/app/logger" + "github.com/anyproto/any-sync/commonspace/headsync/headstorage" + "github.com/anyproto/any-sync/commonspace/object/accountdata" + "github.com/anyproto/any-sync/commonspace/object/acl/list" + "github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage/innerstorage" + "github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage/syncstorage" + "github.com/anyproto/any-sync/commonspace/spacesyncproto" + "github.com/anyproto/any-sync/util/crypto" + "github.com/anyproto/any-sync/util/slice" +) + +var log = logger.NewNamed("common.keyvalue.keyvaluestorage") + +const IndexerCName = "common.keyvalue.indexer" + +type Indexer interface { + app.Component + Index(decryptor Decryptor, keyValue ...innerstorage.KeyValue) error +} + +type Decryptor = func(kv innerstorage.KeyValue) (value []byte, err error) + +type NoOpIndexer struct{} + +func (n NoOpIndexer) Init(a *app.App) (err error) { + return nil +} + +func (n NoOpIndexer) Name() (name string) { + return IndexerCName +} + +func (n NoOpIndexer) Index(decryptor Decryptor, keyValue ...innerstorage.KeyValue) error { + return nil +} + +type Storage interface { + Id() string + Prepare() error + Set(ctx context.Context, key string, value []byte) error + SetRaw(ctx context.Context, keyValue ...*spacesyncproto.StoreKeyValue) error + GetAll(ctx context.Context, key string, get func(decryptor Decryptor, values []innerstorage.KeyValue) error) error + Iterate(ctx context.Context, f func(decryptor Decryptor, key string, values []innerstorage.KeyValue) (bool, error)) error + InnerStorage() innerstorage.KeyValueStorage +} + +type storage struct { + inner innerstorage.KeyValueStorage + keys *accountdata.AccountKeys + aclList list.AclList + syncClient syncstorage.SyncClient + indexer Indexer + storageId string + byteRepr []byte + readKeys map[string]crypto.SymKey + currentReadKey crypto.SymKey + mx sync.Mutex +} + +func New( + ctx context.Context, + storageId string, + store anystore.DB, + headStorage headstorage.HeadStorage, + keys *accountdata.AccountKeys, + syncClient syncstorage.SyncClient, + aclList list.AclList, + indexer Indexer, +) (Storage, error) { + inner, err := innerstorage.New(ctx, storageId, headStorage, store) + if err != nil { + return nil, err + } + s := &storage{ + inner: inner, + keys: keys, + storageId: storageId, + aclList: aclList, + indexer: indexer, + syncClient: syncClient, + byteRepr: make([]byte, 8), + readKeys: make(map[string]crypto.SymKey), + } + return s, nil +} + +func (s *storage) Prepare() error { + s.aclList.RLock() + defer s.aclList.RUnlock() + return s.readKeysFromAclState(s.aclList.AclState()) +} + +func (s *storage) Id() string { + return s.storageId +} + +func (s *storage) Set(ctx context.Context, key string, value []byte) error { + s.mx.Lock() + defer s.mx.Unlock() + s.aclList.RLock() + headId := s.aclList.Head().Id + state := s.aclList.AclState() + if !s.aclList.AclState().Permissions(state.Identity()).CanWrite() { + s.aclList.RUnlock() + return list.ErrInsufficientPermissions + } + readKeyId := state.CurrentReadKeyId() + err := s.readKeysFromAclState(state) + if err != nil { + s.aclList.RUnlock() + return err + } + s.aclList.RUnlock() + value, err = s.currentReadKey.Encrypt(value) + if err != nil { + return err + } + peerIdKey := s.keys.PeerKey + identityKey := s.keys.SignKey + protoPeerKey, err := peerIdKey.GetPublic().Marshall() + if err != nil { + return err + } + protoIdentityKey, err := identityKey.GetPublic().Marshall() + if err != nil { + return err + } + timestampMicro := time.Now().UnixMicro() + inner := spacesyncproto.StoreKeyInner{ + Peer: protoPeerKey, + Identity: protoIdentityKey, + Value: value, + TimestampMicro: timestampMicro, + AclHeadId: headId, + Key: key, + } + innerBytes, err := inner.Marshal() + if err != nil { + return err + } + peerSig, err := peerIdKey.Sign(innerBytes) + if err != nil { + return err + } + identitySig, err := identityKey.Sign(innerBytes) + if err != nil { + return err + } + keyPeerId := key + "-" + peerIdKey.GetPublic().PeerId() + keyValue := innerstorage.KeyValue{ + KeyPeerId: keyPeerId, + Key: key, + TimestampMilli: int(timestampMicro), + Identity: identityKey.GetPublic().Account(), + PeerId: peerIdKey.GetPublic().PeerId(), + AclId: headId, + ReadKeyId: readKeyId, + Value: innerstorage.Value{ + Value: innerBytes, + PeerSignature: peerSig, + IdentitySignature: identitySig, + }, + } + err = s.inner.Set(ctx, keyValue) + if err != nil { + return err + } + indexErr := s.indexer.Index(s.decrypt, keyValue) + if indexErr != nil { + log.Warn("failed to index for key", zap.String("key", key), zap.Error(indexErr)) + } + sendErr := s.syncClient.Broadcast(ctx, s.storageId, keyValue) + if sendErr != nil { + log.Warn("failed to send key value", zap.String("key", key), zap.Error(sendErr)) + } + return nil +} + +func (s *storage) SetRaw(ctx context.Context, keyValue ...*spacesyncproto.StoreKeyValue) (err error) { + if len(keyValue) == 0 { + return nil + } + s.mx.Lock() + defer s.mx.Unlock() + keyValues := make([]innerstorage.KeyValue, 0, len(keyValue)) + for _, kv := range keyValue { + innerKv, err := innerstorage.KeyValueFromProto(kv, true) + if err != nil { + return err + } + keyValues = append(keyValues, innerKv) + } + s.aclList.RLock() + state := s.aclList.AclState() + err = s.readKeysFromAclState(state) + if err != nil { + s.aclList.RUnlock() + return err + } + for i := range keyValues { + el, err := s.inner.Diff().Element(keyValues[i].KeyPeerId) + if err == nil { + binary.BigEndian.PutUint64(s.byteRepr, uint64(keyValues[i].TimestampMilli)) + if el.Head >= string(s.byteRepr) { + keyValues[i].KeyPeerId = "" + continue + } + } + keyValues[i].ReadKeyId, err = state.ReadKeyForAclId(keyValues[i].AclId) + if err != nil { + keyValues[i].KeyPeerId = "" + continue + } + } + s.aclList.RUnlock() + keyValues = slice.DiscardFromSlice(keyValues, func(value innerstorage.KeyValue) bool { + return value.KeyPeerId == "" + }) + if len(keyValues) == 0 { + return nil + } + err = s.inner.Set(ctx, keyValues...) + if err != nil { + return err + } + sendErr := s.syncClient.Broadcast(ctx, s.storageId, keyValues...) + if sendErr != nil { + log.Warn("failed to send key values", zap.Error(sendErr)) + } + indexErr := s.indexer.Index(s.decrypt, keyValues...) + if indexErr != nil { + log.Warn("failed to index for keys", zap.Error(indexErr)) + } + return nil +} + +func (s *storage) GetAll(ctx context.Context, key string, get func(decryptor Decryptor, values []innerstorage.KeyValue) error) (err error) { + var values []innerstorage.KeyValue + err = s.inner.IteratePrefix(ctx, key, func(kv innerstorage.KeyValue) error { + bytes := make([]byte, len(kv.Value.Value)) + copy(bytes, kv.Value.Value) + kv.Value.Value = bytes + values = append(values, kv) + return nil + }) + if err != nil { + return err + } + s.mx.Lock() + defer s.mx.Unlock() + return get(s.decrypt, values) +} + +func (s *storage) InnerStorage() innerstorage.KeyValueStorage { + return s.inner +} + +func (s *storage) readKeysFromAclState(state *list.AclState) (err error) { + if len(s.readKeys) == len(state.Keys()) { + return nil + } + if state.AccountKey() == nil || !state.HadReadPermissions(state.AccountKey().GetPublic()) { + return nil + } + for key, value := range state.Keys() { + if _, exists := s.readKeys[key]; exists { + continue + } + if value.ReadKey == nil { + continue + } + treeKey, err := deriveKey(value.ReadKey, s.storageId) + if err != nil { + return err + } + s.readKeys[key] = treeKey + } + curKey, err := state.CurrentReadKey() + if err != nil { + return err + } + if curKey == nil { + return nil + } + s.currentReadKey, err = deriveKey(curKey, s.storageId) + return err +} + +func (s *storage) Iterate(ctx context.Context, f func(decryptor Decryptor, key string, values []innerstorage.KeyValue) (bool, error)) (err error) { + s.mx.Lock() + defer s.mx.Unlock() + var ( + curKey = "" + // TODO: reuse buffer + values []innerstorage.KeyValue + ) + err = s.inner.IterateValues(ctx, func(kv innerstorage.KeyValue) (bool, error) { + if kv.Key != curKey { + if curKey != "" { + iter, err := f(s.decrypt, curKey, values) + if err != nil { + return false, err + } + if !iter { + values = nil + return false, nil + } + } + curKey = kv.Key + values = values[:0] + } + bytes := make([]byte, len(kv.Value.Value)) + copy(bytes, kv.Value.Value) + kv.Value.Value = bytes + values = append(values, kv) + return true, nil + }) + if err != nil { + return err + } + if len(values) > 0 { + _, err = f(s.decrypt, curKey, values) + } + return err +} + +func (s *storage) decrypt(kv innerstorage.KeyValue) (value []byte, err error) { + if kv.ReadKeyId == "" { + return nil, fmt.Errorf("no read key id") + } + key := s.readKeys[kv.ReadKeyId] + if key == nil { + return nil, fmt.Errorf("no read key for %s", kv.ReadKeyId) + } + msg := &spacesyncproto.StoreKeyInner{} + err = proto.Unmarshal(kv.Value.Value, msg) + if err != nil { + return nil, err + } + value, err = key.Decrypt(msg.Value) + if err != nil { + return nil, err + } + return value, nil +} + +func deriveKey(key crypto.SymKey, id string) (crypto.SymKey, error) { + raw, err := key.Raw() + if err != nil { + return nil, err + } + return crypto.DeriveSymmetricKey(raw, fmt.Sprintf(crypto.AnysyncKeyValuePath, id)) +} diff --git a/commonspace/object/keyvalue/keyvaluestorage/syncstorage/syncclient.go b/commonspace/object/keyvalue/keyvaluestorage/syncstorage/syncclient.go new file mode 100644 index 00000000..3e3839a2 --- /dev/null +++ b/commonspace/object/keyvalue/keyvaluestorage/syncstorage/syncclient.go @@ -0,0 +1,83 @@ +package syncstorage + +import ( + "context" + "fmt" + + "github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage/innerstorage" + "github.com/anyproto/any-sync/commonspace/spacesyncproto" + "github.com/anyproto/any-sync/commonspace/sync" + "github.com/anyproto/any-sync/commonspace/sync/objectsync/objectmessages" +) + +type innerUpdate struct { + prepared []byte + keyValues []innerstorage.KeyValue +} + +func (i *innerUpdate) Marshall(data objectmessages.ObjectMeta) ([]byte, error) { + if i.prepared != nil { + return i.prepared, nil + } + return nil, fmt.Errorf("no prepared data") +} + +func (i *innerUpdate) Prepare() error { + // TODO: Add peer to ignored peers list + var ( + protoKeyValues []*spacesyncproto.StoreKeyValue + err error + ) + for _, kv := range i.keyValues { + protoKeyValues = append(protoKeyValues, kv.Proto()) + } + keyValues := &spacesyncproto.StoreKeyValues{KeyValues: protoKeyValues} + i.prepared, err = keyValues.Marshal() + return err +} + +func (i *innerUpdate) Heads() []string { + return nil +} + +func (i *innerUpdate) MsgSize() uint64 { + return uint64(len(i.prepared)) +} + +func (i *innerUpdate) ObjectType() spacesyncproto.ObjectType { + return spacesyncproto.ObjectType_KeyValue +} + +type SyncClient interface { + Broadcast(ctx context.Context, objectId string, keyValues ...innerstorage.KeyValue) error +} + +type syncClient struct { + spaceId string + syncService sync.SyncService +} + +func New(spaceId string, syncService sync.SyncService) SyncClient { + return &syncClient{ + spaceId: spaceId, + syncService: syncService, + } +} + +func (s *syncClient) Broadcast(ctx context.Context, objectId string, keyValue ...innerstorage.KeyValue) error { + inner := &innerUpdate{ + keyValues: keyValue, + } + err := inner.Prepare() + if err != nil { + return err + } + headUpdate := &objectmessages.HeadUpdate{ + Meta: objectmessages.ObjectMeta{ + ObjectId: objectId, + SpaceId: s.spaceId, + }, + Update: inner, + } + return s.syncService.BroadcastMessage(ctx, headUpdate) +} diff --git a/commonspace/object/keyvalue/kvinterfaces/interfaces.go b/commonspace/object/keyvalue/kvinterfaces/interfaces.go new file mode 100644 index 00000000..ce0ea4b3 --- /dev/null +++ b/commonspace/object/keyvalue/kvinterfaces/interfaces.go @@ -0,0 +1,24 @@ +//go:generate mockgen -destination mock_kvinterfaces/mock_kvinterfaces.go github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces KeyValueService +package kvinterfaces + +import ( + "context" + + "storj.io/drpc" + + "github.com/anyproto/any-sync/app" + "github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage" + "github.com/anyproto/any-sync/commonspace/spacesyncproto" + "github.com/anyproto/any-sync/net/peer" +) + +const CName = "common.object.keyvalue" + +type KeyValueService interface { + app.ComponentRunnable + DefaultStore() keyvaluestorage.Storage + HandleMessage(ctx context.Context, msg drpc.Message) (err error) + SyncWithPeer(p peer.Peer) (err error) + HandleStoreDiffRequest(ctx context.Context, req *spacesyncproto.StoreDiffRequest) (resp *spacesyncproto.StoreDiffResponse, err error) + HandleStoreElementsRequest(ctx context.Context, stream spacesyncproto.DRPCSpaceSync_StoreElementsStream) (err error) +} diff --git a/commonspace/object/keyvalue/kvinterfaces/mock_kvinterfaces/mock_kvinterfaces.go b/commonspace/object/keyvalue/kvinterfaces/mock_kvinterfaces/mock_kvinterfaces.go new file mode 100644 index 00000000..93026984 --- /dev/null +++ b/commonspace/object/keyvalue/kvinterfaces/mock_kvinterfaces/mock_kvinterfaces.go @@ -0,0 +1,171 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces (interfaces: KeyValueService) +// +// Generated by this command: +// +// mockgen -destination mock_kvinterfaces/mock_kvinterfaces.go github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces KeyValueService +// +// Package mock_kvinterfaces is a generated GoMock package. +package mock_kvinterfaces + +import ( + context "context" + reflect "reflect" + + app "github.com/anyproto/any-sync/app" + keyvaluestorage "github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage" + spacesyncproto "github.com/anyproto/any-sync/commonspace/spacesyncproto" + peer "github.com/anyproto/any-sync/net/peer" + gomock "go.uber.org/mock/gomock" + drpc "storj.io/drpc" +) + +// MockKeyValueService is a mock of KeyValueService interface. +type MockKeyValueService struct { + ctrl *gomock.Controller + recorder *MockKeyValueServiceMockRecorder +} + +// MockKeyValueServiceMockRecorder is the mock recorder for MockKeyValueService. +type MockKeyValueServiceMockRecorder struct { + mock *MockKeyValueService +} + +// NewMockKeyValueService creates a new mock instance. +func NewMockKeyValueService(ctrl *gomock.Controller) *MockKeyValueService { + mock := &MockKeyValueService{ctrl: ctrl} + mock.recorder = &MockKeyValueServiceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockKeyValueService) EXPECT() *MockKeyValueServiceMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockKeyValueService) Close(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockKeyValueServiceMockRecorder) Close(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockKeyValueService)(nil).Close), arg0) +} + +// DefaultStore mocks base method. +func (m *MockKeyValueService) DefaultStore() keyvaluestorage.Storage { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DefaultStore") + ret0, _ := ret[0].(keyvaluestorage.Storage) + return ret0 +} + +// DefaultStore indicates an expected call of DefaultStore. +func (mr *MockKeyValueServiceMockRecorder) DefaultStore() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DefaultStore", reflect.TypeOf((*MockKeyValueService)(nil).DefaultStore)) +} + +// HandleMessage mocks base method. +func (m *MockKeyValueService) HandleMessage(arg0 context.Context, arg1 drpc.Message) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HandleMessage", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// HandleMessage indicates an expected call of HandleMessage. +func (mr *MockKeyValueServiceMockRecorder) HandleMessage(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleMessage", reflect.TypeOf((*MockKeyValueService)(nil).HandleMessage), arg0, arg1) +} + +// HandleStoreDiffRequest mocks base method. +func (m *MockKeyValueService) HandleStoreDiffRequest(arg0 context.Context, arg1 *spacesyncproto.StoreDiffRequest) (*spacesyncproto.StoreDiffResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HandleStoreDiffRequest", arg0, arg1) + ret0, _ := ret[0].(*spacesyncproto.StoreDiffResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HandleStoreDiffRequest indicates an expected call of HandleStoreDiffRequest. +func (mr *MockKeyValueServiceMockRecorder) HandleStoreDiffRequest(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleStoreDiffRequest", reflect.TypeOf((*MockKeyValueService)(nil).HandleStoreDiffRequest), arg0, arg1) +} + +// HandleStoreElementsRequest mocks base method. +func (m *MockKeyValueService) HandleStoreElementsRequest(arg0 context.Context, arg1 spacesyncproto.DRPCSpaceSync_StoreElementsStream) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HandleStoreElementsRequest", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// HandleStoreElementsRequest indicates an expected call of HandleStoreElementsRequest. +func (mr *MockKeyValueServiceMockRecorder) HandleStoreElementsRequest(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleStoreElementsRequest", reflect.TypeOf((*MockKeyValueService)(nil).HandleStoreElementsRequest), arg0, arg1) +} + +// Init mocks base method. +func (m *MockKeyValueService) Init(arg0 *app.App) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Init", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Init indicates an expected call of Init. +func (mr *MockKeyValueServiceMockRecorder) Init(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockKeyValueService)(nil).Init), arg0) +} + +// Name mocks base method. +func (m *MockKeyValueService) Name() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Name") + ret0, _ := ret[0].(string) + return ret0 +} + +// Name indicates an expected call of Name. +func (mr *MockKeyValueServiceMockRecorder) Name() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockKeyValueService)(nil).Name)) +} + +// Run mocks base method. +func (m *MockKeyValueService) Run(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Run", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Run indicates an expected call of Run. +func (mr *MockKeyValueServiceMockRecorder) Run(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockKeyValueService)(nil).Run), arg0) +} + +// SyncWithPeer mocks base method. +func (m *MockKeyValueService) SyncWithPeer(arg0 peer.Peer) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncWithPeer", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncWithPeer indicates an expected call of SyncWithPeer. +func (mr *MockKeyValueServiceMockRecorder) SyncWithPeer(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncWithPeer", reflect.TypeOf((*MockKeyValueService)(nil).SyncWithPeer), arg0) +} diff --git a/commonspace/object/keyvalue/limiter.go b/commonspace/object/keyvalue/limiter.go new file mode 100644 index 00000000..7a36bffd --- /dev/null +++ b/commonspace/object/keyvalue/limiter.go @@ -0,0 +1,52 @@ +package keyvalue + +import ( + "context" + "sync" +) + +type concurrentLimiter struct { + mu sync.Mutex + inProgress map[string]bool + wg sync.WaitGroup +} + +func newConcurrentLimiter() *concurrentLimiter { + return &concurrentLimiter{ + inProgress: make(map[string]bool), + } +} + +func (cl *concurrentLimiter) ScheduleRequest(ctx context.Context, id string, action func()) bool { + cl.mu.Lock() + if cl.inProgress[id] { + cl.mu.Unlock() + return false + } + + cl.inProgress[id] = true + cl.wg.Add(1) + cl.mu.Unlock() + + go func() { + defer func() { + cl.mu.Lock() + delete(cl.inProgress, id) + cl.mu.Unlock() + cl.wg.Done() + }() + + select { + case <-ctx.Done(): + return + default: + action() + } + }() + + return true +} + +func (cl *concurrentLimiter) Close() { + cl.wg.Wait() +} diff --git a/commonspace/object/keyvalue/remotediff.go b/commonspace/object/keyvalue/remotediff.go new file mode 100644 index 00000000..c0b82e10 --- /dev/null +++ b/commonspace/object/keyvalue/remotediff.go @@ -0,0 +1,106 @@ +package keyvalue + +import ( + "context" + + "github.com/anyproto/any-sync/app/ldiff" + "github.com/anyproto/any-sync/commonspace/spacesyncproto" +) + +type Client interface { + StoreDiff(context.Context, *spacesyncproto.StoreDiffRequest) (*spacesyncproto.StoreDiffResponse, error) +} + +type RemoteDiff interface { + ldiff.Remote +} + +func NewRemoteDiff(spaceId string, client Client) RemoteDiff { + return &remote{ + spaceId: spaceId, + client: client, + } +} + +type remote struct { + spaceId string + client Client +} + +func (r *remote) Ranges(ctx context.Context, ranges []ldiff.Range, resBuf []ldiff.RangeResult) (results []ldiff.RangeResult, err error) { + results = resBuf[:0] + pbRanges := make([]*spacesyncproto.HeadSyncRange, 0, len(ranges)) + for _, rg := range ranges { + pbRanges = append(pbRanges, &spacesyncproto.HeadSyncRange{ + From: rg.From, + To: rg.To, + Elements: rg.Elements, + Limit: uint32(rg.Limit), + }) + } + req := &spacesyncproto.StoreDiffRequest{ + SpaceId: r.spaceId, + Ranges: pbRanges, + } + resp, err := r.client.StoreDiff(ctx, req) + if err != nil { + return + } + for _, rr := range resp.Results { + var elms []ldiff.Element + if len(rr.Elements) > 0 { + elms = make([]ldiff.Element, 0, len(rr.Elements)) + } + for _, e := range rr.Elements { + elms = append(elms, ldiff.Element{ + Id: e.Id, + Head: e.Head, + }) + } + results = append(results, ldiff.RangeResult{ + Hash: rr.Hash, + Elements: elms, + Count: int(rr.Count), + }) + } + return +} + +func HandleRangeRequest(ctx context.Context, d ldiff.Diff, req *spacesyncproto.StoreDiffRequest) (resp *spacesyncproto.StoreDiffResponse, err error) { + ranges := make([]ldiff.Range, 0, len(req.Ranges)) + // basically we gather data applicable for both diffs + for _, reqRange := range req.Ranges { + ranges = append(ranges, ldiff.Range{ + From: reqRange.From, + To: reqRange.To, + Limit: int(reqRange.Limit), + Elements: reqRange.Elements, + }) + } + res, err := d.Ranges(ctx, ranges, nil) + if err != nil { + return + } + + resp = &spacesyncproto.StoreDiffResponse{ + Results: make([]*spacesyncproto.HeadSyncResult, 0, len(res)), + } + for _, rangeRes := range res { + var elements []*spacesyncproto.HeadSyncResultElement + if len(rangeRes.Elements) > 0 { + elements = make([]*spacesyncproto.HeadSyncResultElement, 0, len(rangeRes.Elements)) + for _, el := range rangeRes.Elements { + elements = append(elements, &spacesyncproto.HeadSyncResultElement{ + Id: el.Id, + Head: el.Head, + }) + } + } + resp.Results = append(resp.Results, &spacesyncproto.HeadSyncResult{ + Hash: rangeRes.Hash, + Elements: elements, + Count: uint32(rangeRes.Count), + }) + } + return +} diff --git a/commonspace/object/tree/objecttree/mock_objecttree/mock_objecttree.go b/commonspace/object/tree/objecttree/mock_objecttree/mock_objecttree.go index e3154e2a..084db88b 100644 --- a/commonspace/object/tree/objecttree/mock_objecttree/mock_objecttree.go +++ b/commonspace/object/tree/objecttree/mock_objecttree/mock_objecttree.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_objecttree/mock_objecttree.go github.com/anyproto/any-sync/commonspace/object/tree/objecttree ObjectTree,Storage // - // Package mock_objecttree is a generated GoMock package. package mock_objecttree @@ -374,11 +373,12 @@ func (mr *MockObjectTreeMockRecorder) SetFlusher(arg0 any) *gomock.Call { } // SnapshotPath mocks base method. -func (m *MockObjectTree) SnapshotPath() []string { +func (m *MockObjectTree) SnapshotPath() ([]string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SnapshotPath") ret0, _ := ret[0].([]string) - return ret0 + ret1, _ := ret[1].(error) + return ret0, ret1 } // SnapshotPath indicates an expected call of SnapshotPath. diff --git a/commonspace/object/tree/objecttree/objecttree.go b/commonspace/object/tree/objecttree/objecttree.go index 4eccceef..909cbd0a 100644 --- a/commonspace/object/tree/objecttree/objecttree.go +++ b/commonspace/object/tree/objecttree/objecttree.go @@ -90,7 +90,7 @@ type ReadableObjectTree interface { type ObjectTree interface { ReadableObjectTree - SnapshotPath() []string + SnapshotPath() ([]string, error) ChangesAfterCommonSnapshotLoader(snapshotPath, heads []string) (LoadIterator, error) Storage() Storage @@ -144,7 +144,10 @@ func (ot *objectTree) rebuildFromStorage(theirHeads, theirSnapshotPath []string, ) if theirHeads != nil { // TODO: add error handling - ourPath = ot.SnapshotPath() + ourPath, err = ot.SnapshotPath() + if err != nil { + return fmt.Errorf("rebuild from storage: %w", err) + } } ot.tree, err = ot.treeBuilder.Build(treeBuilderOpts{ theirHeads: theirHeads, @@ -748,13 +751,12 @@ func (ot *objectTree) Delete() error { return ot.storage.Delete(context.Background()) } -func (ot *objectTree) SnapshotPath() []string { +func (ot *objectTree) SnapshotPath() ([]string, error) { if ot.isDeleted { - return nil + return nil, ErrDeleted } - // TODO: Add error as return parameter if ot.snapshotPathIsActual() { - return ot.snapshotPath + return ot.snapshotPath, nil } var path []string @@ -763,14 +765,13 @@ func (ot *objectTree) SnapshotPath() []string { for currentSnapshotId != "" { sn, err := ot.storage.Get(context.Background(), currentSnapshotId) if err != nil { - // TODO: add error handling - panic(fmt.Sprintf("failed to get snapshot %s: %v", currentSnapshotId, err)) + return nil, fmt.Errorf("failed to get snapshot %s: %w", currentSnapshotId, err) } path = append(path, currentSnapshotId) currentSnapshotId = sn.SnapshotId } ot.snapshotPath = path - return path + return path, nil } func (ot *objectTree) ChangesAfterCommonSnapshotLoader(theirPath, theirHeads []string) (LoadIterator, error) { @@ -779,12 +780,16 @@ func (ot *objectTree) ChangesAfterCommonSnapshotLoader(theirPath, theirHeads []s } var ( needFullDocument = len(theirPath) == 0 - ourPath = ot.SnapshotPath() + ourPath []string // by default returning everything we have from start - commonSnapshot = ourPath[len(ourPath)-1] + commonSnapshot string err error ) - + ourPath, err = ot.SnapshotPath() + if err != nil { + return nil, fmt.Errorf("failed to get snapshot path: %w", err) + } + commonSnapshot = ourPath[len(ourPath)-1] // if this is non-empty request if !needFullDocument { commonSnapshot, err = commonSnapshotForTwoPaths(ourPath, theirPath) diff --git a/commonspace/object/tree/objecttree/objecttree_test.go b/commonspace/object/tree/objecttree/objecttree_test.go index 6fb4f7d8..0357c0d4 100644 --- a/commonspace/object/tree/objecttree/objecttree_test.go +++ b/commonspace/object/tree/objecttree/objecttree_test.go @@ -1075,7 +1075,8 @@ func TestObjectTree(t *testing.T) { _, err := objTree.AddRawChanges(context.Background(), payload) require.NoError(t, err, "adding changes should be without error") - snapshotPath := objTree.SnapshotPath() + snapshotPath, err := objTree.SnapshotPath() + require.NoError(t, err) assert.Equal(t, []string{"3", "0"}, snapshotPath) assert.Equal(t, true, objTree.(*objectTree).snapshotPathIsActual()) @@ -1857,7 +1858,9 @@ func TestObjectTree(t *testing.T) { RawChanges: result.changes, }) require.NoError(t, err) - iter, err := objTree.ChangesAfterCommonSnapshotLoader(otherTree.SnapshotPath(), otherTree.Heads()) + snPath, err := otherTree.SnapshotPath() + require.NoError(t, err) + iter, err := objTree.ChangesAfterCommonSnapshotLoader(snPath, otherTree.Heads()) require.NoError(t, err) for { batch, err := iter.NextBatch(400) diff --git a/commonspace/object/tree/objecttree/objecttreedebug.go b/commonspace/object/tree/objecttree/objecttreedebug.go index ea59c2e8..117b00f5 100644 --- a/commonspace/object/tree/objecttree/objecttreedebug.go +++ b/commonspace/object/tree/objecttree/objecttreedebug.go @@ -20,6 +20,6 @@ func (o objectTreeDebug) debugInfo(ot *objectTree, parser DescriptionParser) (di di.TreeString = ot.tree.String() di.TreeLen = ot.tree.Len() di.Heads = ot.Heads() - di.SnapshotPath = ot.SnapshotPath() + di.SnapshotPath, _ = ot.SnapshotPath() return } diff --git a/commonspace/object/tree/objecttree/storage.go b/commonspace/object/tree/objecttree/storage.go index 587c88d0..2a88ad04 100644 --- a/commonspace/object/tree/objecttree/storage.go +++ b/commonspace/object/tree/objecttree/storage.go @@ -77,6 +77,19 @@ type storage struct { var StorageChangeBuilder = NewChangeBuilder func CreateStorage(ctx context.Context, root *treechangeproto.RawTreeChangeWithId, headStorage headstorage.HeadStorage, store anystore.DB) (Storage, error) { + tx, err := store.WriteTx(ctx) + if err != nil { + return nil, err + } + storage, err := CreateStorageTx(tx.Context(), root, headStorage, store) + if err != nil { + tx.Rollback() + return nil, err + } + return storage, tx.Commit() +} + +func CreateStorageTx(ctx context.Context, root *treechangeproto.RawTreeChangeWithId, headStorage headstorage.HeadStorage, store anystore.DB) (Storage, error) { st := &storage{ id: root.Id, store: store, @@ -107,29 +120,23 @@ func CreateStorage(ctx context.Context, root *treechangeproto.RawTreeChangeWithI st.parser = &anyenc.Parser{} defer st.arena.Reset() doc := newStorageChangeValue(stChange, st.arena) - tx, err := st.store.WriteTx(ctx) + err = st.changesColl.Insert(ctx, doc) if err != nil { - return nil, err - } - err = st.changesColl.Insert(tx.Context(), doc) - if err != nil { - tx.Rollback() if errors.Is(err, anystore.ErrDocExists) { return nil, treestorage.ErrTreeExists } return nil, err } - err = st.headStorage.UpdateEntryTx(tx.Context(), headstorage.HeadsUpdate{ + err = st.headStorage.UpdateEntryTx(ctx, headstorage.HeadsUpdate{ Id: root.Id, Heads: []string{root.Id}, CommonSnapshot: &root.Id, IsDerived: &unmarshalled.IsDerived, }) if err != nil { - tx.Rollback() return nil, err } - return st, tx.Commit() + return st, nil } func NewStorage(ctx context.Context, id string, headStorage headstorage.HeadStorage, store anystore.DB) (Storage, error) { @@ -151,7 +158,7 @@ func NewStorage(ctx context.Context, id string, headStorage headstorage.HeadStor st.changesColl = changesColl st.arena = &anyenc.Arena{} st.parser = &anyenc.Parser{} - st.root, err = st.Get(ctx, st.id) + st.root, err = st.getWithoutParser(ctx, st.id) if err != nil { if errors.Is(err, anystore.ErrDocNotFound) { return nil, treestorage.ErrUnknownTreeId @@ -182,6 +189,7 @@ func (s *storage) Has(ctx context.Context, id string) (bool, error) { } func (s *storage) GetAfterOrder(ctx context.Context, orderId string, storageIter StorageIterator) error { + // this method can be called without having a lock on a tree, so don't reuse any non-thread-safe parts filter := query.And{ query.Key{Path: []string{OrderKey}, Filter: query.NewComp(query.CompOpGte, orderId)}, query.Key{Path: []string{TreeKey}, Filter: query.NewComp(query.CompOpEq, s.id)}, @@ -213,13 +221,19 @@ func (s *storage) AddAll(ctx context.Context, changes []StorageChange, heads []s if err != nil { return fmt.Errorf("failed to create write tx: %w", err) } + defer func() { + if err != nil { + tx.Rollback() + } else { + err = tx.Commit() + } + }() for _, ch := range changes { ch.TreeId = s.id newVal := newStorageChangeValue(ch, arena) err = s.changesColl.Insert(tx.Context(), newVal) arena.Reset() if err != nil { - tx.Rollback() return err } } @@ -228,12 +242,7 @@ func (s *storage) AddAll(ctx context.Context, changes []StorageChange, heads []s Heads: heads, CommonSnapshot: &commonSnapshot, } - err = s.headStorage.UpdateEntryTx(tx.Context(), update) - if err != nil { - tx.Rollback() - return err - } - return tx.Commit() + return s.headStorage.UpdateEntryTx(tx.Context(), update) } func (s *storage) AddAllNoError(ctx context.Context, changes []StorageChange, heads []string, commonSnapshot string) error { @@ -243,13 +252,19 @@ func (s *storage) AddAllNoError(ctx context.Context, changes []StorageChange, he if err != nil { return fmt.Errorf("failed to create write tx: %w", err) } + defer func() { + if err != nil { + tx.Rollback() + } else { + err = tx.Commit() + } + }() for _, ch := range changes { ch.TreeId = s.id newVal := newStorageChangeValue(ch, arena) err = s.changesColl.Insert(tx.Context(), newVal) arena.Reset() if err != nil && !errors.Is(err, anystore.ErrDocExists) { - tx.Rollback() return err } } @@ -258,12 +273,7 @@ func (s *storage) AddAllNoError(ctx context.Context, changes []StorageChange, he Heads: heads, CommonSnapshot: &commonSnapshot, } - err = s.headStorage.UpdateEntryTx(tx.Context(), update) - if err != nil { - tx.Rollback() - return err - } - return tx.Commit() + return s.headStorage.UpdateEntryTx(tx.Context(), update) } func (s *storage) Delete(ctx context.Context) error { @@ -300,6 +310,15 @@ func (s *storage) CommonSnapshot(ctx context.Context) (string, error) { return entry.CommonSnapshot, nil } +func (s *storage) getWithoutParser(ctx context.Context, id string) (StorageChange, error) { + // root will be reused outside the lock, so we shouldn't use parser for it + doc, err := s.changesColl.FindId(ctx, id) + if err != nil { + return StorageChange{}, err + } + return s.changeFromDoc(doc), nil +} + func (s *storage) Get(ctx context.Context, id string) (StorageChange, error) { doc, err := s.changesColl.FindIdWithParser(ctx, s.parser, id) if err != nil { diff --git a/commonspace/object/tree/objecttree/treemigrator.go b/commonspace/object/tree/objecttree/treemigrator.go index ac66532f..d38aef43 100644 --- a/commonspace/object/tree/objecttree/treemigrator.go +++ b/commonspace/object/tree/objecttree/treemigrator.go @@ -6,7 +6,6 @@ import ( "fmt" anystore "github.com/anyproto/any-store" - "go.uber.org/zap" "github.com/anyproto/any-sync/commonspace/headsync/headstorage" "github.com/anyproto/any-sync/commonspace/object/acl/list" @@ -55,10 +54,6 @@ func NewTreeMigrator(keyStorage crypto.KeyStorage, aclList list.AclList) *TreeMi } func (tm *TreeMigrator) MigrateTreeStorage(ctx context.Context, storage treeStorage, headStorage headstorage.HeadStorage, store anystore.DB) error { - var ( - usedDfs bool - loadFailed bool - ) rootChange, err := storage.Root() if err != nil { return err @@ -78,8 +73,7 @@ func (tm *TreeMigrator) MigrateTreeStorage(ctx context.Context, storage treeStor return fmt.Errorf("migration: failed to get all changes: %w", err) } } else { - usedDfs = true - loadFailed = tm.dfs(ctx, heads, rootChange.Id) + tm.dfs(ctx, heads, rootChange.Id) } newStorage, err := CreateStorage(ctx, rootChange, headStorage, store) if err != nil && !errors.Is(err, treestorage.ErrTreeExists) { @@ -107,20 +101,7 @@ func (tm *TreeMigrator) MigrateTreeStorage(ctx context.Context, storage treeStor return fmt.Errorf("migration: failed to add raw changes: %w", err) } if !slice.UnsortedEquals(res.Heads, heads) { - returnErr := fmt.Errorf("migration: heads mismatch: %v, %v != %v", rootChange.Id, res.Heads, heads) - if loadFailed { - log.Error("tree is corrupted", zap.String("id", storage.Id()), zap.Error(returnErr)) - return nil - } - if usedDfs { - return returnErr - } - tm.allChanges = nil - if tm.dfs(ctx, heads, rootChange.Id) { - log.Error("tree is corrupted", zap.String("id", storage.Id()), zap.Error(returnErr)) - return nil - } - return returnErr + log.Errorf("migration: heads mismatch: %v, %v != %v", rootChange.Id, res.Heads, heads) } return nil } diff --git a/commonspace/object/tree/synctree/headupdate.go b/commonspace/object/tree/synctree/headupdate.go index e828ae46..4e847aa0 100644 --- a/commonspace/object/tree/synctree/headupdate.go +++ b/commonspace/object/tree/synctree/headupdate.go @@ -4,6 +4,7 @@ import ( "slices" "github.com/anyproto/any-sync/commonspace/object/tree/treechangeproto" + "github.com/anyproto/any-sync/commonspace/spacesyncproto" "github.com/anyproto/any-sync/commonspace/sync/objectsync/objectmessages" ) @@ -21,6 +22,10 @@ func (h *InnerHeadUpdate) MsgSize() (size uint64) { return uint64(len(h.prepared)) } +func (h *InnerHeadUpdate) ObjectType() spacesyncproto.ObjectType { + return spacesyncproto.ObjectType_Tree +} + func (h *InnerHeadUpdate) Prepare() error { treeMsg := treechangeproto.WrapHeadUpdate(&treechangeproto.TreeHeadUpdate{ Heads: h.heads, diff --git a/commonspace/object/tree/synctree/mock_synctree/mock_synctree.go b/commonspace/object/tree/synctree/mock_synctree/mock_synctree.go index ef132cda..324dc869 100644 --- a/commonspace/object/tree/synctree/mock_synctree/mock_synctree.go +++ b/commonspace/object/tree/synctree/mock_synctree/mock_synctree.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_synctree/mock_synctree.go github.com/anyproto/any-sync/commonspace/object/tree/synctree SyncTree,HeadNotifiable,SyncClient,RequestFactory // - // Package mock_synctree is a generated GoMock package. package mock_synctree @@ -467,11 +466,12 @@ func (mr *MockSyncTreeMockRecorder) SetListener(arg0 any) *gomock.Call { } // SnapshotPath mocks base method. -func (m *MockSyncTree) SnapshotPath() []string { +func (m *MockSyncTree) SnapshotPath() ([]string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SnapshotPath") ret0, _ := ret[0].([]string) - return ret0 + ret1, _ := ret[1].(error) + return ret0, ret1 } // SnapshotPath indicates an expected call of SnapshotPath. @@ -651,11 +651,12 @@ func (mr *MockSyncClientMockRecorder) Broadcast(arg0, arg1 any) *gomock.Call { } // CreateFullSyncRequest mocks base method. -func (m *MockSyncClient) CreateFullSyncRequest(arg0 string, arg1 objecttree.ObjectTree) *objectmessages.Request { +func (m *MockSyncClient) CreateFullSyncRequest(arg0 string, arg1 objecttree.ObjectTree) (*objectmessages.Request, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateFullSyncRequest", arg0, arg1) ret0, _ := ret[0].(*objectmessages.Request) - return ret0 + ret1, _ := ret[1].(error) + return ret0, ret1 } // CreateFullSyncRequest indicates an expected call of CreateFullSyncRequest. @@ -760,11 +761,12 @@ func (m *MockRequestFactory) EXPECT() *MockRequestFactoryMockRecorder { } // CreateFullSyncRequest mocks base method. -func (m *MockRequestFactory) CreateFullSyncRequest(arg0 string, arg1 objecttree.ObjectTree) *objectmessages.Request { +func (m *MockRequestFactory) CreateFullSyncRequest(arg0 string, arg1 objecttree.ObjectTree) (*objectmessages.Request, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateFullSyncRequest", arg0, arg1) ret0, _ := ret[0].(*objectmessages.Request) - return ret0 + ret1, _ := ret[1].(error) + return ret0, ret1 } // CreateFullSyncRequest indicates an expected call of CreateFullSyncRequest. diff --git a/commonspace/object/tree/synctree/requestfactory.go b/commonspace/object/tree/synctree/requestfactory.go index e570c2ab..fa4f3827 100644 --- a/commonspace/object/tree/synctree/requestfactory.go +++ b/commonspace/object/tree/synctree/requestfactory.go @@ -12,7 +12,7 @@ const batchSize = 1024 * 1024 type RequestFactory interface { CreateHeadUpdate(t objecttree.ObjectTree, ignoredPeer string, added []*treechangeproto.RawTreeChangeWithId) (headUpdate *objectmessages.HeadUpdate, err error) CreateNewTreeRequest(peerId, objectId string) *objectmessages.Request - CreateFullSyncRequest(peerId string, t objecttree.ObjectTree) *objectmessages.Request + CreateFullSyncRequest(peerId string, t objecttree.ObjectTree) (*objectmessages.Request, error) CreateResponseProducer(t objecttree.ObjectTree, theirHeads, theirSnapshotPath []string) (response.ResponseProducer, error) } @@ -29,6 +29,10 @@ func (r *requestFactory) CreateHeadUpdate(t objecttree.ObjectTree, ignoredPeer s if ignoredPeer != "" { broadcastOpts.EmptyPeers = []string{ignoredPeer} } + snapshotPath, err := t.SnapshotPath() + if err != nil { + return + } headUpdate = &objectmessages.HeadUpdate{ Meta: objectmessages.ObjectMeta{ ObjectId: t.Id(), @@ -38,7 +42,7 @@ func (r *requestFactory) CreateHeadUpdate(t objecttree.ObjectTree, ignoredPeer s opts: broadcastOpts, heads: t.Heads(), changes: added, - snapshotPath: t.SnapshotPath(), + snapshotPath: snapshotPath, root: t.Header(), }, } @@ -50,8 +54,12 @@ func (r *requestFactory) CreateNewTreeRequest(peerId, objectId string) *objectme return NewRequest(peerId, r.spaceId, objectId, nil, nil, nil) } -func (r *requestFactory) CreateFullSyncRequest(peerId string, t objecttree.ObjectTree) *objectmessages.Request { - return NewRequest(peerId, r.spaceId, t.Id(), t.Heads(), t.SnapshotPath(), t.Header()) +func (r *requestFactory) CreateFullSyncRequest(peerId string, t objecttree.ObjectTree) (*objectmessages.Request, error) { + path, err := t.SnapshotPath() + if err != nil { + return nil, err + } + return NewRequest(peerId, r.spaceId, t.Id(), t.Heads(), path, t.Header()), nil } func (r *requestFactory) CreateResponseProducer(t objecttree.ObjectTree, theirHeads, theirSnapshotPath []string) (response.ResponseProducer, error) { diff --git a/commonspace/object/tree/synctree/response/mock_response/mock_response.go b/commonspace/object/tree/synctree/response/mock_response/mock_response.go index 480e0c25..b5d239a4 100644 --- a/commonspace/object/tree/synctree/response/mock_response/mock_response.go +++ b/commonspace/object/tree/synctree/response/mock_response/mock_response.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_response/mock_response.go github.com/anyproto/any-sync/commonspace/object/tree/synctree/response ResponseProducer // - // Package mock_response is a generated GoMock package. package mock_response @@ -40,11 +39,12 @@ func (m *MockResponseProducer) EXPECT() *MockResponseProducerMockRecorder { } // EmptyResponse mocks base method. -func (m *MockResponseProducer) EmptyResponse() *response.Response { +func (m *MockResponseProducer) EmptyResponse() (*response.Response, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EmptyResponse") ret0, _ := ret[0].(*response.Response) - return ret0 + ret1, _ := ret[1].(error) + return ret0, ret1 } // EmptyResponse indicates an expected call of EmptyResponse. diff --git a/commonspace/object/tree/synctree/response/responseproducer.go b/commonspace/object/tree/synctree/response/responseproducer.go index 2e9f6eb1..b3dca714 100644 --- a/commonspace/object/tree/synctree/response/responseproducer.go +++ b/commonspace/object/tree/synctree/response/responseproducer.go @@ -7,7 +7,7 @@ import ( type ResponseProducer interface { NewResponse(batchSize int) (*Response, error) - EmptyResponse() *Response + EmptyResponse() (*Response, error) } type responseProducer struct { @@ -45,14 +45,18 @@ func (r *responseProducer) NewResponse(batchSize int) (*Response, error) { }, nil } -func (r *responseProducer) EmptyResponse() *Response { +func (r *responseProducer) EmptyResponse() (*Response, error) { headsCopy := make([]string, len(r.tree.Heads())) copy(headsCopy, r.tree.Heads()) + snapshotPath, err := r.tree.SnapshotPath() + if err != nil { + return nil, err + } return &Response{ Heads: headsCopy, SpaceId: r.spaceId, ObjectId: r.objectId, Root: r.tree.Header(), - SnapshotPath: r.tree.SnapshotPath(), - } + SnapshotPath: snapshotPath, + }, nil } diff --git a/commonspace/object/tree/synctree/synchandler.go b/commonspace/object/tree/synctree/synchandler.go index 5a6a22a2..4b0c0fc0 100644 --- a/commonspace/object/tree/synctree/synchandler.go +++ b/commonspace/object/tree/synctree/synchandler.go @@ -41,6 +41,13 @@ func NewSyncHandler(tree SyncTree, syncClient SyncClient, spaceId string) syncde } func (s *syncHandler) HandleHeadUpdate(ctx context.Context, statusUpdater syncstatus.StatusUpdater, headUpdate drpc.Message) (req syncdeps.Request, err error) { + var objectRequest *objectmessages.Request + defer func() { + // we mitigate the problem of a nil value being wrapped in an interface + if err == nil && objectRequest != nil { + req = objectRequest + } + }() update, ok := headUpdate.(*objectmessages.HeadUpdate) if !ok { return nil, ErrUnexpectedResponseType @@ -73,7 +80,8 @@ func (s *syncHandler) HandleHeadUpdate(ctx context.Context, statusUpdater syncst return nil, nil } statusUpdater.HeadsApply(peerId, update.ObjectId(), contentUpdate.Heads, false) - return s.syncClient.CreateFullSyncRequest(peerId, s.tree), nil + objectRequest, err = s.syncClient.CreateFullSyncRequest(peerId, s.tree) + return } rawChangesPayload := objecttree.RawChangesPayload{ NewHeads: contentUpdate.Heads, @@ -85,7 +93,8 @@ func (s *syncHandler) HandleHeadUpdate(ctx context.Context, statusUpdater syncst return nil, err } if !slice.UnsortedEquals(res.Heads, contentUpdate.Heads) { - return s.syncClient.CreateFullSyncRequest(peerId, s.tree), nil + objectRequest, err = s.syncClient.CreateFullSyncRequest(peerId, s.tree) + return } return nil, nil } @@ -119,10 +128,17 @@ func (s *syncHandler) HandleStreamRequest(ctx context.Context, rq syncdeps.Reque var returnReq syncdeps.Request if slice.UnsortedEquals(curHeads, request.Heads) || slice.ContainsSorted(request.Heads, curHeads) { if len(curHeads) != len(request.Heads) { - returnReq = s.syncClient.CreateFullSyncRequest(rq.PeerId(), s.tree) + returnReq, err = s.syncClient.CreateFullSyncRequest(rq.PeerId(), s.tree) + if err != nil { + s.tree.Unlock() + return nil, err + } } - resp := producer.EmptyResponse() + resp, err := producer.EmptyResponse() s.tree.Unlock() + if err != nil { + return nil, err + } protoResp, err := resp.ProtoMessage() if err != nil { return nil, err @@ -130,7 +146,11 @@ func (s *syncHandler) HandleStreamRequest(ctx context.Context, rq syncdeps.Reque return returnReq, send(protoResp) } else { if len(request.Heads) != 0 { - returnReq = s.syncClient.CreateFullSyncRequest(rq.PeerId(), s.tree) + returnReq, err = s.syncClient.CreateFullSyncRequest(rq.PeerId(), s.tree) + if err != nil { + s.tree.Unlock() + return nil, err + } } s.tree.Unlock() } diff --git a/commonspace/object/tree/synctree/synchandler_test.go b/commonspace/object/tree/synctree/synchandler_test.go index 9bdb9606..2c3b7331 100644 --- a/commonspace/object/tree/synctree/synchandler_test.go +++ b/commonspace/object/tree/synctree/synchandler_test.go @@ -107,7 +107,7 @@ func TestSyncHandler_HeadUpdate(t *testing.T) { returnReq := &objectmessages.Request{ Bytes: []byte("abcd"), } - fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq) + fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq, nil) req, err := fx.syncHandler.HandleHeadUpdate(ctx, fx.syncStatus, headUpdate) require.NoError(t, err) require.Equal(t, returnReq, req) @@ -174,7 +174,7 @@ func TestSyncHandler_HeadUpdate(t *testing.T) { returnReq := &objectmessages.Request{ Bytes: []byte("abcd"), } - fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq) + fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq, nil) req, err := fx.syncHandler.HandleHeadUpdate(ctx, fx.syncStatus, headUpdate) require.NoError(t, err) require.Equal(t, returnReq, req) @@ -205,7 +205,7 @@ func TestSyncHandler_HandleStreamRequest(t *testing.T) { returnReq := &objectmessages.Request{ Bytes: []byte("abcde"), } - fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq) + fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq, nil) fx.tree.EXPECT().Heads().Return([]string{"curHead"}) resp := &response.Response{ Heads: heads, @@ -287,8 +287,8 @@ func TestSyncHandler_HandleStreamRequest(t *testing.T) { returnReq := &objectmessages.Request{ Bytes: []byte("abcde"), } - fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq) - producer.EXPECT().EmptyResponse().Return(resp) + fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq, nil) + producer.EXPECT().EmptyResponse().Return(resp, nil) ctx = peer.CtxWithPeerId(ctx, "peerId") callCount := 0 req, err := fx.syncHandler.HandleStreamRequest(ctx, request, testUpdater{}, func(resp proto.Message) error { @@ -322,8 +322,8 @@ func TestSyncHandler_HandleStreamRequest(t *testing.T) { returnReq := &objectmessages.Request{ Bytes: []byte("abcde"), } - fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq) - producer.EXPECT().EmptyResponse().Return(resp) + fx.client.EXPECT().CreateFullSyncRequest("peerId", fx.tree).Return(returnReq, nil) + producer.EXPECT().EmptyResponse().Return(resp, nil) ctx = peer.CtxWithPeerId(ctx, "peerId") callCount := 0 req, err := fx.syncHandler.HandleStreamRequest(ctx, request, testUpdater{}, func(resp proto.Message) error { diff --git a/commonspace/object/tree/synctree/synctree.go b/commonspace/object/tree/synctree/synctree.go index b8d6dddb..360f78d0 100644 --- a/commonspace/object/tree/synctree/synctree.go +++ b/commonspace/object/tree/synctree/synctree.go @@ -53,12 +53,14 @@ type SyncTree interface { type syncTree struct { syncdeps.ObjectSyncHandler objecttree.ObjectTree - syncClient SyncClient - syncStatus syncstatus.StatusUpdater - listener updatelistener.UpdateListener - onClose func(id string) - isClosed bool - isDeleted bool + syncClient SyncClient + syncStatus syncstatus.StatusUpdater + listener updatelistener.UpdateListener + statsCollector *TreeStatsCollector + onClose func(id string) + isClosed bool + isDeleted bool + buildTime time.Duration } var log = logger.NewNamed("common.commonspace.synctree") @@ -81,6 +83,7 @@ type BuildDeps struct { PeerGetter ResponsiblePeersGetter BuildObjectTree objecttree.BuildObjectTreeFunc ValidateObjectTree objecttree.ValidatorFunc + StatsCollector *TreeStatsCollector } var newTreeGetter = func(deps BuildDeps, treeId string) treeGetter { @@ -112,17 +115,20 @@ func PutSyncTree(ctx context.Context, payload treestorage.TreeStorageCreatePaylo } func buildSyncTree(ctx context.Context, peerId string, deps BuildDeps) (t SyncTree, err error) { + buildStart := time.Now() objTree, err := deps.BuildObjectTree(deps.TreeStorage, deps.AclList) if err != nil { return } syncClient := deps.SyncClient syncTree := &syncTree{ - ObjectTree: objTree, - syncClient: syncClient, - onClose: deps.OnClose, - listener: deps.Listener, - syncStatus: deps.SyncStatus, + ObjectTree: objTree, + syncClient: syncClient, + onClose: deps.OnClose, + listener: deps.Listener, + syncStatus: deps.SyncStatus, + statsCollector: deps.StatsCollector, + buildTime: time.Since(buildStart), } syncHandler := NewSyncHandler(syncTree, syncClient, deps.SpaceId) syncTree.ObjectSyncHandler = syncHandler @@ -146,6 +152,9 @@ func buildSyncTree(ctx context.Context, peerId string, deps BuildDeps) (t SyncTr deps.SyncStatus.ObjectReceive(peerId, syncTree.Id(), syncTree.Heads()) } } + if syncTree.statsCollector != nil { + syncTree.statsCollector.Register(syncTree) + } return } @@ -285,6 +294,11 @@ func (s *syncTree) Close() (err error) { } func (s *syncTree) close() (err error) { + defer func() { + if s.statsCollector != nil { + s.statsCollector.Unregister(s) + } + }() defer s.Unlock() defer func() { log.Debug("closed sync tree", zap.Error(err), zap.String("id", s.Id())) @@ -311,7 +325,10 @@ func (s *syncTree) checkAlive() (err error) { func (s *syncTree) SyncWithPeer(ctx context.Context, p peer.Peer) (err error) { s.Lock() defer s.Unlock() - req := s.syncClient.CreateFullSyncRequest(p.Id(), s) + req, err := s.syncClient.CreateFullSyncRequest(p.Id(), s) + if err != nil { + return + } return s.syncClient.QueueRequest(ctx, req) } diff --git a/commonspace/object/tree/synctree/treestats.go b/commonspace/object/tree/synctree/treestats.go new file mode 100644 index 00000000..7effb3d2 --- /dev/null +++ b/commonspace/object/tree/synctree/treestats.go @@ -0,0 +1,58 @@ +package synctree + +import ( + "sync" +) + +type TreeStatsCollector struct { + trees map[string]*syncTree + mutex sync.Mutex + spaceId string +} + +func NewTreeStatsCollector(spaceId string) *TreeStatsCollector { + return &TreeStatsCollector{ + trees: make(map[string]*syncTree), + spaceId: spaceId, + } +} + +func (t *TreeStatsCollector) Register(tree *syncTree) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.trees[tree.Id()] = tree +} + +func (t *TreeStatsCollector) Collect() []TreeStats { + t.mutex.Lock() + defer t.mutex.Unlock() + stats := make([]TreeStats, 0, len(t.trees)) + for _, tree := range t.trees { + tree.Lock() + stats = append(stats, TreeStats{ + TreeLen: tree.Len(), + SnapshotCounter: tree.Root().SnapshotCounter, + Heads: tree.Heads(), + ObjectId: tree.Id(), + SpaceId: t.spaceId, + BuildTimeMillis: int(tree.buildTime.Milliseconds()), + }) + tree.Unlock() + } + return stats +} + +func (t *TreeStatsCollector) Unregister(tree SyncTree) { + t.mutex.Lock() + defer t.mutex.Unlock() + delete(t.trees, tree.Id()) +} + +type TreeStats struct { + TreeLen int `json:"tree_len"` + SnapshotCounter int `json:"snapshot_counter"` + Heads []string `json:"heads"` + ObjectId string `json:"object_id"` + SpaceId string `json:"space_id"` + BuildTimeMillis int `json:"build_time_millis"` +} diff --git a/commonspace/object/tree/synctree/updatelistener/mock_updatelistener/mock_updatelistener.go b/commonspace/object/tree/synctree/updatelistener/mock_updatelistener/mock_updatelistener.go index 5e135802..ed385b79 100644 --- a/commonspace/object/tree/synctree/updatelistener/mock_updatelistener/mock_updatelistener.go +++ b/commonspace/object/tree/synctree/updatelistener/mock_updatelistener/mock_updatelistener.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_updatelistener/mock_updatelistener.go github.com/anyproto/any-sync/commonspace/object/tree/synctree/updatelistener UpdateListener // - // Package mock_updatelistener is a generated GoMock package. package mock_updatelistener diff --git a/commonspace/object/treemanager/mock_treemanager/mock_treemanager.go b/commonspace/object/treemanager/mock_treemanager/mock_treemanager.go index 7ccf9927..1cfa568c 100644 --- a/commonspace/object/treemanager/mock_treemanager/mock_treemanager.go +++ b/commonspace/object/treemanager/mock_treemanager/mock_treemanager.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_treemanager/mock_treemanager.go github.com/anyproto/any-sync/commonspace/object/treemanager TreeManager // - // Package mock_treemanager is a generated GoMock package. package mock_treemanager diff --git a/commonspace/object/treesyncer/mock_treesyncer/mock_treesyncer.go b/commonspace/object/treesyncer/mock_treesyncer/mock_treesyncer.go index 0f688efd..bdc3bcd1 100644 --- a/commonspace/object/treesyncer/mock_treesyncer/mock_treesyncer.go +++ b/commonspace/object/treesyncer/mock_treesyncer/mock_treesyncer.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_treesyncer/mock_treesyncer.go github.com/anyproto/any-sync/commonspace/object/treesyncer TreeSyncer // - // Package mock_treesyncer is a generated GoMock package. package mock_treesyncer diff --git a/commonspace/objectmanager/mock_objectmanager/mock_objectmanager.go b/commonspace/objectmanager/mock_objectmanager/mock_objectmanager.go index 61029d67..5e9dd567 100644 --- a/commonspace/objectmanager/mock_objectmanager/mock_objectmanager.go +++ b/commonspace/objectmanager/mock_objectmanager/mock_objectmanager.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_objectmanager/mock_objectmanager.go github.com/anyproto/any-sync/commonspace/objectmanager ObjectManager // - // Package mock_objectmanager is a generated GoMock package. package mock_objectmanager diff --git a/commonspace/objecttreebuilder/mock_objecttreebuilder/mock_objecttreebuilder.go b/commonspace/objecttreebuilder/mock_objecttreebuilder/mock_objecttreebuilder.go index 56bfdd19..7aca0fc9 100644 --- a/commonspace/objecttreebuilder/mock_objecttreebuilder/mock_objecttreebuilder.go +++ b/commonspace/objecttreebuilder/mock_objecttreebuilder/mock_objecttreebuilder.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_objecttreebuilder/mock_objecttreebuilder.go github.com/anyproto/any-sync/commonspace/objecttreebuilder TreeBuilder // - // Package mock_objecttreebuilder is a generated GoMock package. package mock_objecttreebuilder diff --git a/commonspace/objecttreebuilder/stat.go b/commonspace/objecttreebuilder/stat.go new file mode 100644 index 00000000..c4f32146 --- /dev/null +++ b/commonspace/objecttreebuilder/stat.go @@ -0,0 +1,8 @@ +package objecttreebuilder + +import "github.com/anyproto/any-sync/commonspace/object/tree/synctree" + +type debugStat struct { + TreeStats []synctree.TreeStats `json:"tree_stats"` + SpaceId string `json:"space_id"` +} diff --git a/commonspace/objecttreebuilder/treebuilder.go b/commonspace/objecttreebuilder/treebuilder.go index eccb59ab..bb8e0ea3 100644 --- a/commonspace/objecttreebuilder/treebuilder.go +++ b/commonspace/objecttreebuilder/treebuilder.go @@ -9,6 +9,7 @@ import ( "go.uber.org/zap" "github.com/anyproto/any-sync/app" + "github.com/anyproto/any-sync/app/debugstat" "github.com/anyproto/any-sync/app/logger" "github.com/anyproto/any-sync/commonspace/object/acl/list" "github.com/anyproto/any-sync/commonspace/object/acl/syncacl" @@ -69,14 +70,47 @@ type treeBuilder struct { log logger.CtxLogger builder objecttree.BuildObjectTreeFunc + treeStats *synctree.TreeStatsCollector + debugStat debugstat.StatService spaceId string aclList list.AclList treesUsed *atomic.Int32 isClosed *atomic.Bool } +func (t *treeBuilder) ProvideStat() any { + return debugStat{ + TreeStats: t.treeStats.Collect(), + SpaceId: t.spaceId, + } +} + +func (t *treeBuilder) StatId() string { + return t.spaceId +} + +func (t *treeBuilder) StatType() string { + return CName +} + +func (t *treeBuilder) Run(ctx context.Context) (err error) { + t.debugStat.AddProvider(t) + return +} + +func (t *treeBuilder) Close(ctx context.Context) (err error) { + t.debugStat.RemoveProvider(t) + return +} + func (t *treeBuilder) Init(a *app.App) (err error) { state := a.MustComponent(spacestate.CName).(*spacestate.SpaceState) + comp, ok := a.Component(debugstat.CName).(debugstat.StatService) + if !ok { + comp = debugstat.NewNoOp() + } + t.treeStats = synctree.NewTreeStatsCollector(state.SpaceId) + t.debugStat = comp t.spaceId = state.SpaceId t.isClosed = state.SpaceIsClosed t.treesUsed = state.TreesUsed @@ -119,6 +153,7 @@ func (t *treeBuilder) BuildTree(ctx context.Context, id string, opts BuildTreeOp PeerGetter: t.peerManager, BuildObjectTree: treeBuilder, ValidateObjectTree: opts.TreeValidator, + StatsCollector: t.treeStats, } t.treesUsed.Add(1) t.log.Debug("incrementing counter", zap.String("id", id), zap.Int32("trees", t.treesUsed.Load())) diff --git a/commonspace/peermanager/mock_peermanager/mock_peermanager.go b/commonspace/peermanager/mock_peermanager/mock_peermanager.go index 23a82698..d924bbab 100644 --- a/commonspace/peermanager/mock_peermanager/mock_peermanager.go +++ b/commonspace/peermanager/mock_peermanager/mock_peermanager.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_peermanager/mock_peermanager.go github.com/anyproto/any-sync/commonspace/peermanager PeerManager // - // Package mock_peermanager is a generated GoMock package. package mock_peermanager diff --git a/commonspace/settings/settingsstate/mock_settingsstate/mock_settingsstate.go b/commonspace/settings/settingsstate/mock_settingsstate/mock_settingsstate.go index 925fd320..8f64e354 100644 --- a/commonspace/settings/settingsstate/mock_settingsstate/mock_settingsstate.go +++ b/commonspace/settings/settingsstate/mock_settingsstate/mock_settingsstate.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_settingsstate/mock_settingsstate.go github.com/anyproto/any-sync/commonspace/settings/settingsstate StateBuilder,ChangeFactory // - // Package mock_settingsstate is a generated GoMock package. package mock_settingsstate diff --git a/commonspace/space.go b/commonspace/space.go index 7ad1a5d1..8ea543bf 100644 --- a/commonspace/space.go +++ b/commonspace/space.go @@ -13,8 +13,10 @@ import ( "github.com/anyproto/any-sync/app" "github.com/anyproto/any-sync/commonspace/acl/aclclient" "github.com/anyproto/any-sync/commonspace/headsync" + "github.com/anyproto/any-sync/commonspace/headsync/headstorage" "github.com/anyproto/any-sync/commonspace/object/acl/list" "github.com/anyproto/any-sync/commonspace/object/acl/syncacl" + "github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces" "github.com/anyproto/any-sync/commonspace/object/treesyncer" "github.com/anyproto/any-sync/commonspace/objecttreebuilder" "github.com/anyproto/any-sync/commonspace/peermanager" @@ -27,35 +29,8 @@ import ( "github.com/anyproto/any-sync/commonspace/syncstatus" "github.com/anyproto/any-sync/net/peer" "github.com/anyproto/any-sync/net/streampool" - "github.com/anyproto/any-sync/util/crypto" ) -type SpaceCreatePayload struct { - // SigningKey is the signing key of the owner - SigningKey crypto.PrivKey - // SpaceType is an arbitrary string - SpaceType string - // ReplicationKey is a key which is to be used to determine the node where the space should be held - ReplicationKey uint64 - // SpacePayload is an arbitrary payload related to space type - SpacePayload []byte - // MasterKey is the master key of the owner - MasterKey crypto.PrivKey - // ReadKey is the first read key of space - ReadKey crypto.SymKey - // MetadataKey is the first metadata key of space - MetadataKey crypto.PrivKey - // Metadata is the metadata of the owner - Metadata []byte -} - -type SpaceDerivePayload struct { - SigningKey crypto.PrivKey - MasterKey crypto.PrivKey - SpaceType string - SpacePayload []byte -} - type SpaceDescription struct { SpaceHeader *spacesyncproto.RawSpaceHeaderWithId AclId string @@ -82,6 +57,7 @@ type Space interface { AclClient() aclclient.AclSpaceClient SyncStatus() syncstatus.StatusUpdater Storage() spacestorage.SpaceStorage + KeyValue() kvinterfaces.KeyValueService DeleteTree(ctx context.Context, id string) (err error) GetNodePeers(ctx context.Context) (peer []peer.Peer, err error) @@ -109,6 +85,7 @@ type space struct { settings settings.Settings storage spacestorage.SpaceStorage aclClient aclclient.AclSpaceClient + keyValue kvinterfaces.KeyValueService aclList list.AclList creationTime time.Time } @@ -145,8 +122,17 @@ func (s *space) StoredIds() []string { return s.headSync.ExternalIds() } -func (s *space) DebugAllHeads() []headsync.TreeHeads { - return s.headSync.DebugAllHeads() +func (s *space) DebugAllHeads() (heads []headsync.TreeHeads) { + s.storage.HeadStorage().IterateEntries(context.Background(), headstorage.IterOpts{}, func(entry headstorage.HeadsEntry) (bool, error) { + if entry.CommonSnapshot != "" { + heads = append(heads, headsync.TreeHeads{ + Id: entry.Id, + Heads: entry.Heads, + }) + } + return true, nil + }) + return heads } func (s *space) DeleteTree(ctx context.Context, id string) (err error) { @@ -211,6 +197,7 @@ func (s *space) Init(ctx context.Context) (err error) { s.streamPool = s.app.MustComponent(streampool.CName).(streampool.StreamPool) s.treeSyncer = s.app.MustComponent(treesyncer.CName).(treesyncer.TreeSyncer) s.aclClient = s.app.MustComponent(aclclient.CName).(aclclient.AclSpaceClient) + s.keyValue = s.app.MustComponent(kvinterfaces.CName).(kvinterfaces.KeyValueService) return } @@ -218,6 +205,10 @@ func (s *space) SyncStatus() syncstatus.StatusUpdater { return s.syncStatus } +func (s *space) KeyValue() kvinterfaces.KeyValueService { + return s.keyValue +} + func (s *space) Storage() spacestorage.SpaceStorage { return s.storage } diff --git a/commonspace/payloads.go b/commonspace/spacepayloads/payloads.go similarity index 89% rename from commonspace/payloads.go rename to commonspace/spacepayloads/payloads.go index 269a46cd..ecb10a0d 100644 --- a/commonspace/payloads.go +++ b/commonspace/spacepayloads/payloads.go @@ -1,9 +1,9 @@ -package commonspace +package spacepayloads import ( + "crypto/rand" "errors" "hash/fnv" - "math/rand" "strconv" "strings" "time" @@ -21,6 +21,32 @@ import ( "github.com/anyproto/any-sync/util/crypto" ) +type SpaceCreatePayload struct { + // SigningKey is the signing key of the owner + SigningKey crypto.PrivKey + // SpaceType is an arbitrary string + SpaceType string + // ReplicationKey is a key which is to be used to determine the node where the space should be held + ReplicationKey uint64 + // SpacePayload is an arbitrary payload related to space type + SpacePayload []byte + // MasterKey is the master key of the owner + MasterKey crypto.PrivKey + // ReadKey is the first read key of space + ReadKey crypto.SymKey + // MetadataKey is the first metadata key of space + MetadataKey crypto.PrivKey + // Metadata is the metadata of the owner + Metadata []byte +} + +type SpaceDerivePayload struct { + SigningKey crypto.PrivKey + MasterKey crypto.PrivKey + SpaceType string + SpacePayload []byte +} + const ( SpaceReserved = "any-sync.space" ) @@ -113,7 +139,7 @@ func StoragePayloadForSpaceCreate(payload SpaceCreatePayload) (storagePayload sp return } -func storagePayloadForSpaceDerive(payload SpaceDerivePayload) (storagePayload spacestorage.SpaceStorageCreatePayload, err error) { +func StoragePayloadForSpaceDerive(payload SpaceDerivePayload) (storagePayload spacestorage.SpaceStorageCreatePayload, err error) { // marshalling keys identity, err := payload.SigningKey.GetPublic().Marshall() if err != nil { @@ -192,7 +218,7 @@ func storagePayloadForSpaceDerive(payload SpaceDerivePayload) (storagePayload sp return } -func validateSpaceStorageCreatePayload(payload spacestorage.SpaceStorageCreatePayload) (err error) { +func ValidateSpaceStorageCreatePayload(payload spacestorage.SpaceStorageCreatePayload) (err error) { err = ValidateSpaceHeader(payload.SpaceHeaderWithId, nil) if err != nil { return @@ -328,3 +354,7 @@ func validateCreateSpaceSettingsPayload(rawWithId *treechangeproto.RawTreeChange return } + +func NewSpaceId(id string, repKey uint64) string { + return strings.Join([]string{id, strconv.FormatUint(repKey, 36)}, ".") +} diff --git a/commonspace/payloads_test.go b/commonspace/spacepayloads/payloads_test.go similarity index 98% rename from commonspace/payloads_test.go rename to commonspace/spacepayloads/payloads_test.go index fbfe0f10..bd7ec235 100644 --- a/commonspace/payloads_test.go +++ b/commonspace/spacepayloads/payloads_test.go @@ -1,4 +1,4 @@ -package commonspace +package spacepayloads import ( "fmt" @@ -7,6 +7,9 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/anyproto/any-sync/commonspace/object/accountdata" "github.com/anyproto/any-sync/commonspace/object/acl/aclrecordproto" "github.com/anyproto/any-sync/commonspace/object/tree/objecttree" @@ -16,8 +19,6 @@ import ( "github.com/anyproto/any-sync/consensus/consensusproto" "github.com/anyproto/any-sync/util/cidutil" "github.com/anyproto/any-sync/util/crypto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestSuccessHeaderPayloadForSpaceCreate(t *testing.T) { @@ -438,7 +439,7 @@ func TestSuccessSameIds(t *testing.T) { SpaceHeaderWithId: rawHeaderWithId, SpaceSettingsWithId: rawSettingsPayload, } - err = validateSpaceStorageCreatePayload(spacePayload) + err = ValidateSpaceStorageCreatePayload(spacePayload) require.NoError(t, err) } @@ -455,7 +456,7 @@ func TestFailWithAclWrongSpaceId(t *testing.T) { SpaceHeaderWithId: rawHeaderWithId, SpaceSettingsWithId: rawSettingsPayload, } - err = validateSpaceStorageCreatePayload(spacePayload) + err = ValidateSpaceStorageCreatePayload(spacePayload) assert.EqualErrorf(t, err, spacestorage.ErrIncorrectSpaceHeader.Error(), "Error should be: %v, got: %v", spacestorage.ErrIncorrectSpaceHeader, err) } @@ -472,7 +473,7 @@ func TestFailWithSettingsWrongSpaceId(t *testing.T) { SpaceHeaderWithId: rawHeaderWithId, SpaceSettingsWithId: rawSettingsPayload, } - err = validateSpaceStorageCreatePayload(spacePayload) + err = ValidateSpaceStorageCreatePayload(spacePayload) assert.EqualErrorf(t, err, spacestorage.ErrIncorrectSpaceHeader.Error(), "Error should be: %v, got: %v", spacestorage.ErrIncorrectSpaceHeader, err) } @@ -489,7 +490,7 @@ func TestFailWithWrongAclHeadIdInSettingsPayload(t *testing.T) { SpaceHeaderWithId: rawHeaderWithId, SpaceSettingsWithId: rawSettingsPayload, } - err = validateSpaceStorageCreatePayload(spacePayload) + err = ValidateSpaceStorageCreatePayload(spacePayload) assert.EqualErrorf(t, err, spacestorage.ErrIncorrectSpaceHeader.Error(), "Error should be: %v, got: %v", spacestorage.ErrIncorrectSpaceHeader, err) } diff --git a/commonspace/spacerpc_test.go b/commonspace/spacerpc_test.go index 273ad3fd..dd44695b 100644 --- a/commonspace/spacerpc_test.go +++ b/commonspace/spacerpc_test.go @@ -93,6 +93,16 @@ type RpcServer struct { sync.Mutex } +func (r *RpcServer) StoreDiff(ctx2 context.Context, request *spacesyncproto.StoreDiffRequest) (*spacesyncproto.StoreDiffResponse, error) { + //TODO implement me + panic("implement me") +} + +func (r *RpcServer) StoreElements(stream spacesyncproto.DRPCSpaceSync_StoreElementsStream) error { + //TODO implement me + panic("implement me") +} + func NewRpcServer() *RpcServer { return &RpcServer{ spaces: make(map[string]Space), diff --git a/commonspace/spaceservice.go b/commonspace/spaceservice.go index b95c346d..630e15c2 100644 --- a/commonspace/spaceservice.go +++ b/commonspace/spaceservice.go @@ -13,7 +13,10 @@ import ( "github.com/anyproto/any-sync/commonspace/acl/aclclient" "github.com/anyproto/any-sync/commonspace/deletionmanager" + "github.com/anyproto/any-sync/commonspace/object/keyvalue" + "github.com/anyproto/any-sync/commonspace/object/keyvalue/keyvaluestorage" "github.com/anyproto/any-sync/commonspace/object/treesyncer" + "github.com/anyproto/any-sync/commonspace/spacepayloads" "github.com/anyproto/any-sync/commonspace/sync" "github.com/anyproto/any-sync/commonspace/sync/objectsync" "github.com/anyproto/any-sync/net" @@ -58,16 +61,18 @@ type ctxKey int const AddSpaceCtxKey ctxKey = 0 type SpaceService interface { - DeriveSpace(ctx context.Context, payload SpaceDerivePayload) (string, error) - DeriveId(ctx context.Context, payload SpaceDerivePayload) (string, error) - CreateSpace(ctx context.Context, payload SpaceCreatePayload) (string, error) + DeriveSpace(ctx context.Context, payload spacepayloads.SpaceDerivePayload) (string, error) + DeriveId(ctx context.Context, payload spacepayloads.SpaceDerivePayload) (string, error) + CreateSpace(ctx context.Context, payload spacepayloads.SpaceCreatePayload) (string, error) NewSpace(ctx context.Context, id string, deps Deps) (sp Space, err error) app.Component } type Deps struct { - SyncStatus syncstatus.StatusUpdater - TreeSyncer treesyncer.TreeSyncer + SyncStatus syncstatus.StatusUpdater + TreeSyncer treesyncer.TreeSyncer + AccountService accountservice.Service + Indexer keyvaluestorage.Indexer } type spaceService struct { @@ -100,8 +105,8 @@ func (s *spaceService) Name() (name string) { return CName } -func (s *spaceService) CreateSpace(ctx context.Context, payload SpaceCreatePayload) (id string, err error) { - storageCreate, err := StoragePayloadForSpaceCreate(payload) +func (s *spaceService) CreateSpace(ctx context.Context, payload spacepayloads.SpaceCreatePayload) (id string, err error) { + storageCreate, err := spacepayloads.StoragePayloadForSpaceCreate(payload) if err != nil { return } @@ -116,8 +121,8 @@ func (s *spaceService) CreateSpace(ctx context.Context, payload SpaceCreatePaylo return store.Id(), store.Close(ctx) } -func (s *spaceService) DeriveId(ctx context.Context, payload SpaceDerivePayload) (id string, err error) { - storageCreate, err := storagePayloadForSpaceDerive(payload) +func (s *spaceService) DeriveId(ctx context.Context, payload spacepayloads.SpaceDerivePayload) (id string, err error) { + storageCreate, err := spacepayloads.StoragePayloadForSpaceDerive(payload) if err != nil { return } @@ -125,8 +130,8 @@ func (s *spaceService) DeriveId(ctx context.Context, payload SpaceDerivePayload) return } -func (s *spaceService) DeriveSpace(ctx context.Context, payload SpaceDerivePayload) (id string, err error) { - storageCreate, err := storagePayloadForSpaceDerive(payload) +func (s *spaceService) DeriveSpace(ctx context.Context, payload spacepayloads.SpaceDerivePayload) (id string, err error) { + storageCreate, err := spacepayloads.StoragePayloadForSpaceDerive(payload) if err != nil { return } @@ -176,13 +181,22 @@ func (s *spaceService) NewSpace(ctx context.Context, id string, deps Deps) (Spac return nil, err } spaceApp := s.app.ChildApp() + if deps.AccountService != nil { + spaceApp.Register(deps.AccountService) + } + var keyValueIndexer keyvaluestorage.Indexer = keyvaluestorage.NoOpIndexer{} + if deps.Indexer != nil { + keyValueIndexer = deps.Indexer + } spaceApp.Register(state). Register(deps.SyncStatus). Register(peerManager). Register(st). + Register(keyValueIndexer). Register(objectsync.New()). Register(sync.NewSyncService()). Register(syncacl.New()). + Register(keyvalue.New()). Register(deletionstate.New()). Register(deletionmanager.New()). Register(settings.New()). @@ -300,7 +314,7 @@ func (s *spaceService) spacePullWithPeer(ctx context.Context, p peer.Peer, id st } func (s *spaceService) createSpaceStorage(ctx context.Context, payload spacestorage.SpaceStorageCreatePayload) (spacestorage.SpaceStorage, error) { - err := validateSpaceStorageCreatePayload(payload) + err := spacepayloads.ValidateSpaceStorageCreatePayload(payload) if err != nil { return nil, err } diff --git a/commonspace/spacestorage/migration/spacemigrator.go b/commonspace/spacestorage/migration/spacemigrator.go index 92cb2fb9..57885ff5 100644 --- a/commonspace/spacestorage/migration/spacemigrator.go +++ b/commonspace/spacestorage/migration/spacemigrator.go @@ -190,7 +190,7 @@ func (s *spaceMigrator) migrateHash(ctx context.Context, oldStorage oldstorage.S if err != nil { return err } - return newStorage.StateStorage().SetHash(ctx, spaceHash) + return newStorage.StateStorage().SetHash(ctx, spaceHash, spaceHash) } func (s *spaceMigrator) checkMigrated(ctx context.Context, id string) (bool, spacestorage.SpaceStorage) { diff --git a/commonspace/spacestorage/mock_spacestorage/mock_spacestorage.go b/commonspace/spacestorage/mock_spacestorage/mock_spacestorage.go index b0c9167d..045059c4 100644 --- a/commonspace/spacestorage/mock_spacestorage/mock_spacestorage.go +++ b/commonspace/spacestorage/mock_spacestorage/mock_spacestorage.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_spacestorage/mock_spacestorage.go github.com/anyproto/any-sync/commonspace/spacestorage SpaceStorage // - // Package mock_spacestorage is a generated GoMock package. package mock_spacestorage diff --git a/commonspace/spacestorage/spacestorage.go b/commonspace/spacestorage/spacestorage.go index 30f6d258..2c3a5a7a 100644 --- a/commonspace/spacestorage/spacestorage.go +++ b/commonspace/spacestorage/spacestorage.go @@ -52,7 +52,7 @@ type SpaceStorageProvider interface { CreateSpaceStorage(ctx context.Context, payload SpaceStorageCreatePayload) (SpaceStorage, error) } -func Create(ctx context.Context, store anystore.DB, payload SpaceStorageCreatePayload) (SpaceStorage, error) { +func Create(ctx context.Context, store anystore.DB, payload SpaceStorageCreatePayload) (st SpaceStorage, err error) { spaceId := payload.SpaceHeaderWithId.Id state := statestorage.State{ AclId: payload.AclWithId.Id, @@ -60,7 +60,18 @@ func Create(ctx context.Context, store anystore.DB, payload SpaceStorageCreatePa SpaceId: payload.SpaceHeaderWithId.Id, SpaceHeader: payload.SpaceHeaderWithId.RawHeader, } - changesColl, err := store.Collection(ctx, objecttree.CollName) + tx, err := store.WriteTx(ctx) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + _ = tx.Rollback() + } else { + err = tx.Commit() + } + }() + changesColl, err := store.Collection(tx.Context(), objecttree.CollName) if err != nil { return nil, err } @@ -68,27 +79,27 @@ func Create(ctx context.Context, store anystore.DB, payload SpaceStorageCreatePa Fields: []string{objecttree.TreeKey, objecttree.OrderKey}, Unique: true, } - err = changesColl.EnsureIndex(ctx, orderIdx) + err = changesColl.EnsureIndex(tx.Context(), orderIdx) if err != nil { return nil, err } // TODO: put it in one transaction - stateStorage, err := statestorage.Create(ctx, state, store) + stateStorage, err := statestorage.CreateTx(tx.Context(), state, store) if err != nil { return nil, err } - headStorage, err := headstorage.New(ctx, store) + headStorage, err := headstorage.New(tx.Context(), store) if err != nil { return nil, err } - aclStorage, err := list.CreateStorage(ctx, &consensusproto.RawRecordWithId{ + aclStorage, err := list.CreateStorageTx(tx.Context(), &consensusproto.RawRecordWithId{ Payload: payload.AclWithId.Payload, Id: payload.AclWithId.Id, }, headStorage, store) if err != nil { return nil, err } - _, err = objecttree.CreateStorage(ctx, &treechangeproto.RawTreeChangeWithId{ + _, err = objecttree.CreateStorageTx(tx.Context(), &treechangeproto.RawTreeChangeWithId{ RawChange: payload.SpaceSettingsWithId.RawChange, Id: payload.SpaceSettingsWithId.Id, }, headStorage, store) diff --git a/commonspace/spacestorage_test.go b/commonspace/spacestorage_test.go new file mode 100644 index 00000000..a7cc038f --- /dev/null +++ b/commonspace/spacestorage_test.go @@ -0,0 +1,53 @@ +package commonspace + +import ( + "context" + "path/filepath" + "testing" + + anystore "github.com/anyproto/any-store" + "github.com/stretchr/testify/require" + + "github.com/anyproto/any-sync/commonspace/object/accountdata" + "github.com/anyproto/any-sync/commonspace/spacepayloads" + "github.com/anyproto/any-sync/commonspace/spacestorage" + "github.com/anyproto/any-sync/util/crypto" +) + +func newStorageCreatePayload(t *testing.T) spacestorage.SpaceStorageCreatePayload { + keys, err := accountdata.NewRandom() + require.NoError(t, err) + masterKey, _, err := crypto.GenerateRandomEd25519KeyPair() + require.NoError(t, err) + metaKey, _, err := crypto.GenerateRandomEd25519KeyPair() + require.NoError(t, err) + readKey := crypto.NewAES() + meta := []byte("account") + payload := spacepayloads.SpaceCreatePayload{ + SigningKey: keys.SignKey, + SpaceType: "space", + ReplicationKey: 10, + SpacePayload: nil, + MasterKey: masterKey, + ReadKey: readKey, + MetadataKey: metaKey, + Metadata: meta, + } + createSpace, err := spacepayloads.StoragePayloadForSpaceCreate(payload) + require.NoError(t, err) + return createSpace +} + +var ctx = context.Background() + +func TestCreateSpaceStorageFailed_EmptyStorage(t *testing.T) { + payload := newStorageCreatePayload(t) + store, err := anystore.Open(ctx, filepath.Join(t.TempDir(), "store.db"), nil) + require.NoError(t, err) + payload.SpaceSettingsWithId.RawChange = nil + _, err = spacestorage.Create(ctx, store, payload) + require.Error(t, err) + collNames, err := store.GetCollectionNames(ctx) + require.NoError(t, err) + require.Empty(t, collNames) +} \ No newline at end of file diff --git a/commonspace/spacesyncproto/mock_spacesyncproto/mock_spacesyncproto.go b/commonspace/spacesyncproto/mock_spacesyncproto/mock_spacesyncproto.go index a65c8c68..082e823e 100644 --- a/commonspace/spacesyncproto/mock_spacesyncproto/mock_spacesyncproto.go +++ b/commonspace/spacesyncproto/mock_spacesyncproto/mock_spacesyncproto.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_spacesyncproto/mock_spacesyncproto.go github.com/anyproto/any-sync/commonspace/spacesyncproto DRPCSpaceSyncClient // - // Package mock_spacesyncproto is a generated GoMock package. package mock_spacesyncproto @@ -174,3 +173,33 @@ func (mr *MockDRPCSpaceSyncClientMockRecorder) SpacePush(arg0, arg1 any) *gomock mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SpacePush", reflect.TypeOf((*MockDRPCSpaceSyncClient)(nil).SpacePush), arg0, arg1) } + +// StoreDiff mocks base method. +func (m *MockDRPCSpaceSyncClient) StoreDiff(arg0 context.Context, arg1 *spacesyncproto.StoreDiffRequest) (*spacesyncproto.StoreDiffResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StoreDiff", arg0, arg1) + ret0, _ := ret[0].(*spacesyncproto.StoreDiffResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StoreDiff indicates an expected call of StoreDiff. +func (mr *MockDRPCSpaceSyncClientMockRecorder) StoreDiff(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreDiff", reflect.TypeOf((*MockDRPCSpaceSyncClient)(nil).StoreDiff), arg0, arg1) +} + +// StoreElements mocks base method. +func (m *MockDRPCSpaceSyncClient) StoreElements(arg0 context.Context) (spacesyncproto.DRPCSpaceSync_StoreElementsClient, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StoreElements", arg0) + ret0, _ := ret[0].(spacesyncproto.DRPCSpaceSync_StoreElementsClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StoreElements indicates an expected call of StoreElements. +func (mr *MockDRPCSpaceSyncClientMockRecorder) StoreElements(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreElements", reflect.TypeOf((*MockDRPCSpaceSyncClient)(nil).StoreElements), arg0) +} diff --git a/commonspace/spacesyncproto/protos/spacesync.proto b/commonspace/spacesyncproto/protos/spacesync.proto index e3b02c18..b14f76d6 100644 --- a/commonspace/spacesyncproto/protos/spacesync.proto +++ b/commonspace/spacesyncproto/protos/spacesync.proto @@ -20,6 +20,10 @@ enum ErrCodes { service SpaceSync { // HeadSync compares all objects and their hashes in a space rpc HeadSync(HeadSyncRequest) returns (HeadSyncResponse); + // StoreDiff compares all objects and their hashes in a space + rpc StoreDiff(StoreDiffRequest) returns (StoreDiffResponse); + // StoreElements exchanges elements between peers + rpc StoreElements(stream StoreKeyValue) returns (stream StoreKeyValue); // SpacePush sends new space to the node rpc SpacePush(SpacePushRequest) returns (SpacePushResponse); // SpacePull gets space from the remote peer @@ -63,7 +67,7 @@ message HeadSyncResultElement { message HeadSyncRequest { string spaceId = 1; repeated HeadSyncRange ranges = 2; - DiffType diffType = 3 [deprecated=true]; + DiffType diffType = 3; } // HeadSyncResponse is a response for HeadSync @@ -79,6 +83,7 @@ message ObjectSyncMessage { string replyId = 3; bytes payload = 4; string objectId = 5; + ObjectType objectType = 6; } // SpacePushRequest is a request to add space on a node containing only one acl record @@ -144,6 +149,12 @@ message ObjectDelete { string id = 1; } +// StoreHeader is a header for a store +message StoreHeader { + string spaceId = 1; + string storageName = 2; +} + // SpaceDelete is a message containing deleter peer id message SpaceDelete { string deleterPeerId = 1; @@ -196,8 +207,51 @@ message AclGetRecordsResponse { repeated bytes records = 1; } +message StoreDiffRequest { + string spaceId = 1; + repeated HeadSyncRange ranges = 2; +} + +message StoreDiffResponse { + repeated HeadSyncResult results = 1; +} + +message StoreKeyValue { + string keyPeerId = 1; + bytes value = 2; + bytes identitySignature = 3; + bytes peerSignature = 4; + string spaceId = 5; +} + +message StoreKeyValues { + repeated StoreKeyValue keyValues = 1; +} + +message StoreKeyInner { + bytes peer = 1; + bytes identity = 2; + bytes value = 3; + int64 timestampMicro = 4; + string aclHeadId = 5; + string key = 6; +} + +message StorageHeader { + string spaceId = 1; + string storageName = 2; +} + // DiffType is a type of diff enum DiffType { Initial = 0; - Precalculated = 1; -} \ No newline at end of file + V1 = 1; + V2 = 2; +} + +// ObjectType is a type of object +enum ObjectType { + Tree = 0; + Acl = 1; + KeyValue = 2; +} diff --git a/commonspace/spacesyncproto/spacesync.pb.go b/commonspace/spacesyncproto/spacesync.pb.go index 3f95b78e..3e4cfb5e 100644 --- a/commonspace/spacesyncproto/spacesync.pb.go +++ b/commonspace/spacesyncproto/spacesync.pb.go @@ -104,18 +104,21 @@ func (SpaceSubscriptionAction) EnumDescriptor() ([]byte, []int) { type DiffType int32 const ( - DiffType_Initial DiffType = 0 - DiffType_Precalculated DiffType = 1 + DiffType_Initial DiffType = 0 + DiffType_V1 DiffType = 1 + DiffType_V2 DiffType = 2 ) var DiffType_name = map[int32]string{ 0: "Initial", - 1: "Precalculated", + 1: "V1", + 2: "V2", } var DiffType_value = map[string]int32{ - "Initial": 0, - "Precalculated": 1, + "Initial": 0, + "V1": 1, + "V2": 2, } func (x DiffType) String() string { @@ -126,6 +129,35 @@ func (DiffType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_80e49f1f4ac27799, []int{2} } +// ObjectType is a type of object +type ObjectType int32 + +const ( + ObjectType_Tree ObjectType = 0 + ObjectType_Acl ObjectType = 1 + ObjectType_KeyValue ObjectType = 2 +) + +var ObjectType_name = map[int32]string{ + 0: "Tree", + 1: "Acl", + 2: "KeyValue", +} + +var ObjectType_value = map[string]int32{ + "Tree": 0, + "Acl": 1, + "KeyValue": 2, +} + +func (x ObjectType) String() string { + return proto.EnumName(ObjectType_name, int32(x)) +} + +func (ObjectType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_80e49f1f4ac27799, []int{3} +} + // HeadSyncRange presenting a request for one range type HeadSyncRange struct { From uint64 `protobuf:"varint,1,opt,name=from,proto3" json:"from,omitempty"` @@ -337,7 +369,7 @@ func (m *HeadSyncResultElement) GetHead() string { type HeadSyncRequest struct { SpaceId string `protobuf:"bytes,1,opt,name=spaceId,proto3" json:"spaceId,omitempty"` Ranges []*HeadSyncRange `protobuf:"bytes,2,rep,name=ranges,proto3" json:"ranges,omitempty"` - DiffType DiffType `protobuf:"varint,3,opt,name=diffType,proto3,enum=spacesync.DiffType" json:"diffType,omitempty"` // Deprecated: Do not use. + DiffType DiffType `protobuf:"varint,3,opt,name=diffType,proto3,enum=spacesync.DiffType" json:"diffType,omitempty"` } func (m *HeadSyncRequest) Reset() { *m = HeadSyncRequest{} } @@ -395,7 +427,6 @@ func (m *HeadSyncRequest) GetRanges() []*HeadSyncRange { return nil } -// Deprecated: Do not use. func (m *HeadSyncRequest) GetDiffType() DiffType { if m != nil { return m.DiffType @@ -466,11 +497,12 @@ func (m *HeadSyncResponse) GetDiffType() DiffType { // ObjectSyncMessage is a message sent on object sync type ObjectSyncMessage struct { - SpaceId string `protobuf:"bytes,1,opt,name=spaceId,proto3" json:"spaceId,omitempty"` - RequestId string `protobuf:"bytes,2,opt,name=requestId,proto3" json:"requestId,omitempty"` - ReplyId string `protobuf:"bytes,3,opt,name=replyId,proto3" json:"replyId,omitempty"` - Payload []byte `protobuf:"bytes,4,opt,name=payload,proto3" json:"payload,omitempty"` - ObjectId string `protobuf:"bytes,5,opt,name=objectId,proto3" json:"objectId,omitempty"` + SpaceId string `protobuf:"bytes,1,opt,name=spaceId,proto3" json:"spaceId,omitempty"` + RequestId string `protobuf:"bytes,2,opt,name=requestId,proto3" json:"requestId,omitempty"` + ReplyId string `protobuf:"bytes,3,opt,name=replyId,proto3" json:"replyId,omitempty"` + Payload []byte `protobuf:"bytes,4,opt,name=payload,proto3" json:"payload,omitempty"` + ObjectId string `protobuf:"bytes,5,opt,name=objectId,proto3" json:"objectId,omitempty"` + ObjectType ObjectType `protobuf:"varint,6,opt,name=objectType,proto3,enum=spacesync.ObjectType" json:"objectType,omitempty"` } func (m *ObjectSyncMessage) Reset() { *m = ObjectSyncMessage{} } @@ -549,6 +581,13 @@ func (m *ObjectSyncMessage) GetObjectId() string { return "" } +func (m *ObjectSyncMessage) GetObjectType() ObjectType { + if m != nil { + return m.ObjectType + } + return ObjectType_Tree +} + // SpacePushRequest is a request to add space on a node containing only one acl record type SpacePushRequest struct { Payload *SpacePayload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` @@ -1209,6 +1248,67 @@ func (m *ObjectDelete) GetId() string { return "" } +// StoreHeader is a header for a store +type StoreHeader struct { + SpaceId string `protobuf:"bytes,1,opt,name=spaceId,proto3" json:"spaceId,omitempty"` + StorageName string `protobuf:"bytes,2,opt,name=storageName,proto3" json:"storageName,omitempty"` +} + +func (m *StoreHeader) Reset() { *m = StoreHeader{} } +func (m *StoreHeader) String() string { return proto.CompactTextString(m) } +func (*StoreHeader) ProtoMessage() {} +func (*StoreHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_80e49f1f4ac27799, []int{16} +} +func (m *StoreHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StoreHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StoreHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StoreHeader) XXX_MarshalAppend(b []byte, newLen int) ([]byte, error) { + b = b[:newLen] + _, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b, nil +} +func (m *StoreHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoreHeader.Merge(m, src) +} +func (m *StoreHeader) XXX_Size() int { + return m.Size() +} +func (m *StoreHeader) XXX_DiscardUnknown() { + xxx_messageInfo_StoreHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_StoreHeader proto.InternalMessageInfo + +func (m *StoreHeader) GetSpaceId() string { + if m != nil { + return m.SpaceId + } + return "" +} + +func (m *StoreHeader) GetStorageName() string { + if m != nil { + return m.StorageName + } + return "" +} + // SpaceDelete is a message containing deleter peer id type SpaceDelete struct { DeleterPeerId string `protobuf:"bytes,1,opt,name=deleterPeerId,proto3" json:"deleterPeerId,omitempty"` @@ -1218,7 +1318,7 @@ func (m *SpaceDelete) Reset() { *m = SpaceDelete{} } func (m *SpaceDelete) String() string { return proto.CompactTextString(m) } func (*SpaceDelete) ProtoMessage() {} func (*SpaceDelete) Descriptor() ([]byte, []int) { - return fileDescriptor_80e49f1f4ac27799, []int{16} + return fileDescriptor_80e49f1f4ac27799, []int{17} } func (m *SpaceDelete) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1272,7 +1372,7 @@ func (m *SpaceSettingsSnapshot) Reset() { *m = SpaceSettingsSnapshot{} } func (m *SpaceSettingsSnapshot) String() string { return proto.CompactTextString(m) } func (*SpaceSettingsSnapshot) ProtoMessage() {} func (*SpaceSettingsSnapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_80e49f1f4ac27799, []int{17} + return fileDescriptor_80e49f1f4ac27799, []int{18} } func (m *SpaceSettingsSnapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1333,7 +1433,7 @@ func (m *SettingsData) Reset() { *m = SettingsData{} } func (m *SettingsData) String() string { return proto.CompactTextString(m) } func (*SettingsData) ProtoMessage() {} func (*SettingsData) Descriptor() ([]byte, []int) { - return fileDescriptor_80e49f1f4ac27799, []int{18} + return fileDescriptor_80e49f1f4ac27799, []int{19} } func (m *SettingsData) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1393,7 +1493,7 @@ func (m *SpaceSubscription) Reset() { *m = SpaceSubscription{} } func (m *SpaceSubscription) String() string { return proto.CompactTextString(m) } func (*SpaceSubscription) ProtoMessage() {} func (*SpaceSubscription) Descriptor() ([]byte, []int) { - return fileDescriptor_80e49f1f4ac27799, []int{19} + return fileDescriptor_80e49f1f4ac27799, []int{20} } func (m *SpaceSubscription) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1454,7 +1554,7 @@ func (m *AclAddRecordRequest) Reset() { *m = AclAddRecordRequest{} } func (m *AclAddRecordRequest) String() string { return proto.CompactTextString(m) } func (*AclAddRecordRequest) ProtoMessage() {} func (*AclAddRecordRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_80e49f1f4ac27799, []int{20} + return fileDescriptor_80e49f1f4ac27799, []int{21} } func (m *AclAddRecordRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1515,7 +1615,7 @@ func (m *AclAddRecordResponse) Reset() { *m = AclAddRecordResponse{} } func (m *AclAddRecordResponse) String() string { return proto.CompactTextString(m) } func (*AclAddRecordResponse) ProtoMessage() {} func (*AclAddRecordResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_80e49f1f4ac27799, []int{21} + return fileDescriptor_80e49f1f4ac27799, []int{22} } func (m *AclAddRecordResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1577,7 +1677,7 @@ func (m *AclGetRecordsRequest) Reset() { *m = AclGetRecordsRequest{} } func (m *AclGetRecordsRequest) String() string { return proto.CompactTextString(m) } func (*AclGetRecordsRequest) ProtoMessage() {} func (*AclGetRecordsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_80e49f1f4ac27799, []int{22} + return fileDescriptor_80e49f1f4ac27799, []int{23} } func (m *AclGetRecordsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1637,7 +1737,7 @@ func (m *AclGetRecordsResponse) Reset() { *m = AclGetRecordsResponse{} } func (m *AclGetRecordsResponse) String() string { return proto.CompactTextString(m) } func (*AclGetRecordsResponse) ProtoMessage() {} func (*AclGetRecordsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_80e49f1f4ac27799, []int{23} + return fileDescriptor_80e49f1f4ac27799, []int{24} } func (m *AclGetRecordsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1681,10 +1781,411 @@ func (m *AclGetRecordsResponse) GetRecords() [][]byte { return nil } +type StoreDiffRequest struct { + SpaceId string `protobuf:"bytes,1,opt,name=spaceId,proto3" json:"spaceId,omitempty"` + Ranges []*HeadSyncRange `protobuf:"bytes,2,rep,name=ranges,proto3" json:"ranges,omitempty"` +} + +func (m *StoreDiffRequest) Reset() { *m = StoreDiffRequest{} } +func (m *StoreDiffRequest) String() string { return proto.CompactTextString(m) } +func (*StoreDiffRequest) ProtoMessage() {} +func (*StoreDiffRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_80e49f1f4ac27799, []int{25} +} +func (m *StoreDiffRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StoreDiffRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StoreDiffRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StoreDiffRequest) XXX_MarshalAppend(b []byte, newLen int) ([]byte, error) { + b = b[:newLen] + _, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b, nil +} +func (m *StoreDiffRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoreDiffRequest.Merge(m, src) +} +func (m *StoreDiffRequest) XXX_Size() int { + return m.Size() +} +func (m *StoreDiffRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StoreDiffRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StoreDiffRequest proto.InternalMessageInfo + +func (m *StoreDiffRequest) GetSpaceId() string { + if m != nil { + return m.SpaceId + } + return "" +} + +func (m *StoreDiffRequest) GetRanges() []*HeadSyncRange { + if m != nil { + return m.Ranges + } + return nil +} + +type StoreDiffResponse struct { + Results []*HeadSyncResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +} + +func (m *StoreDiffResponse) Reset() { *m = StoreDiffResponse{} } +func (m *StoreDiffResponse) String() string { return proto.CompactTextString(m) } +func (*StoreDiffResponse) ProtoMessage() {} +func (*StoreDiffResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_80e49f1f4ac27799, []int{26} +} +func (m *StoreDiffResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StoreDiffResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StoreDiffResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StoreDiffResponse) XXX_MarshalAppend(b []byte, newLen int) ([]byte, error) { + b = b[:newLen] + _, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b, nil +} +func (m *StoreDiffResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoreDiffResponse.Merge(m, src) +} +func (m *StoreDiffResponse) XXX_Size() int { + return m.Size() +} +func (m *StoreDiffResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StoreDiffResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StoreDiffResponse proto.InternalMessageInfo + +func (m *StoreDiffResponse) GetResults() []*HeadSyncResult { + if m != nil { + return m.Results + } + return nil +} + +type StoreKeyValue struct { + KeyPeerId string `protobuf:"bytes,1,opt,name=keyPeerId,proto3" json:"keyPeerId,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + IdentitySignature []byte `protobuf:"bytes,3,opt,name=identitySignature,proto3" json:"identitySignature,omitempty"` + PeerSignature []byte `protobuf:"bytes,4,opt,name=peerSignature,proto3" json:"peerSignature,omitempty"` + SpaceId string `protobuf:"bytes,5,opt,name=spaceId,proto3" json:"spaceId,omitempty"` +} + +func (m *StoreKeyValue) Reset() { *m = StoreKeyValue{} } +func (m *StoreKeyValue) String() string { return proto.CompactTextString(m) } +func (*StoreKeyValue) ProtoMessage() {} +func (*StoreKeyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_80e49f1f4ac27799, []int{27} +} +func (m *StoreKeyValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StoreKeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StoreKeyValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StoreKeyValue) XXX_MarshalAppend(b []byte, newLen int) ([]byte, error) { + b = b[:newLen] + _, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b, nil +} +func (m *StoreKeyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoreKeyValue.Merge(m, src) +} +func (m *StoreKeyValue) XXX_Size() int { + return m.Size() +} +func (m *StoreKeyValue) XXX_DiscardUnknown() { + xxx_messageInfo_StoreKeyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StoreKeyValue proto.InternalMessageInfo + +func (m *StoreKeyValue) GetKeyPeerId() string { + if m != nil { + return m.KeyPeerId + } + return "" +} + +func (m *StoreKeyValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *StoreKeyValue) GetIdentitySignature() []byte { + if m != nil { + return m.IdentitySignature + } + return nil +} + +func (m *StoreKeyValue) GetPeerSignature() []byte { + if m != nil { + return m.PeerSignature + } + return nil +} + +func (m *StoreKeyValue) GetSpaceId() string { + if m != nil { + return m.SpaceId + } + return "" +} + +type StoreKeyValues struct { + KeyValues []*StoreKeyValue `protobuf:"bytes,1,rep,name=keyValues,proto3" json:"keyValues,omitempty"` +} + +func (m *StoreKeyValues) Reset() { *m = StoreKeyValues{} } +func (m *StoreKeyValues) String() string { return proto.CompactTextString(m) } +func (*StoreKeyValues) ProtoMessage() {} +func (*StoreKeyValues) Descriptor() ([]byte, []int) { + return fileDescriptor_80e49f1f4ac27799, []int{28} +} +func (m *StoreKeyValues) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StoreKeyValues) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StoreKeyValues.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StoreKeyValues) XXX_MarshalAppend(b []byte, newLen int) ([]byte, error) { + b = b[:newLen] + _, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b, nil +} +func (m *StoreKeyValues) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoreKeyValues.Merge(m, src) +} +func (m *StoreKeyValues) XXX_Size() int { + return m.Size() +} +func (m *StoreKeyValues) XXX_DiscardUnknown() { + xxx_messageInfo_StoreKeyValues.DiscardUnknown(m) +} + +var xxx_messageInfo_StoreKeyValues proto.InternalMessageInfo + +func (m *StoreKeyValues) GetKeyValues() []*StoreKeyValue { + if m != nil { + return m.KeyValues + } + return nil +} + +type StoreKeyInner struct { + Peer []byte `protobuf:"bytes,1,opt,name=peer,proto3" json:"peer,omitempty"` + Identity []byte `protobuf:"bytes,2,opt,name=identity,proto3" json:"identity,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + TimestampMicro int64 `protobuf:"varint,4,opt,name=timestampMicro,proto3" json:"timestampMicro,omitempty"` + AclHeadId string `protobuf:"bytes,5,opt,name=aclHeadId,proto3" json:"aclHeadId,omitempty"` + Key string `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` +} + +func (m *StoreKeyInner) Reset() { *m = StoreKeyInner{} } +func (m *StoreKeyInner) String() string { return proto.CompactTextString(m) } +func (*StoreKeyInner) ProtoMessage() {} +func (*StoreKeyInner) Descriptor() ([]byte, []int) { + return fileDescriptor_80e49f1f4ac27799, []int{29} +} +func (m *StoreKeyInner) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StoreKeyInner) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StoreKeyInner.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StoreKeyInner) XXX_MarshalAppend(b []byte, newLen int) ([]byte, error) { + b = b[:newLen] + _, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b, nil +} +func (m *StoreKeyInner) XXX_Merge(src proto.Message) { + xxx_messageInfo_StoreKeyInner.Merge(m, src) +} +func (m *StoreKeyInner) XXX_Size() int { + return m.Size() +} +func (m *StoreKeyInner) XXX_DiscardUnknown() { + xxx_messageInfo_StoreKeyInner.DiscardUnknown(m) +} + +var xxx_messageInfo_StoreKeyInner proto.InternalMessageInfo + +func (m *StoreKeyInner) GetPeer() []byte { + if m != nil { + return m.Peer + } + return nil +} + +func (m *StoreKeyInner) GetIdentity() []byte { + if m != nil { + return m.Identity + } + return nil +} + +func (m *StoreKeyInner) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *StoreKeyInner) GetTimestampMicro() int64 { + if m != nil { + return m.TimestampMicro + } + return 0 +} + +func (m *StoreKeyInner) GetAclHeadId() string { + if m != nil { + return m.AclHeadId + } + return "" +} + +func (m *StoreKeyInner) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +type StorageHeader struct { + SpaceId string `protobuf:"bytes,1,opt,name=spaceId,proto3" json:"spaceId,omitempty"` + StorageName string `protobuf:"bytes,2,opt,name=storageName,proto3" json:"storageName,omitempty"` +} + +func (m *StorageHeader) Reset() { *m = StorageHeader{} } +func (m *StorageHeader) String() string { return proto.CompactTextString(m) } +func (*StorageHeader) ProtoMessage() {} +func (*StorageHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_80e49f1f4ac27799, []int{30} +} +func (m *StorageHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StorageHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StorageHeader) XXX_MarshalAppend(b []byte, newLen int) ([]byte, error) { + b = b[:newLen] + _, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b, nil +} +func (m *StorageHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageHeader.Merge(m, src) +} +func (m *StorageHeader) XXX_Size() int { + return m.Size() +} +func (m *StorageHeader) XXX_DiscardUnknown() { + xxx_messageInfo_StorageHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageHeader proto.InternalMessageInfo + +func (m *StorageHeader) GetSpaceId() string { + if m != nil { + return m.SpaceId + } + return "" +} + +func (m *StorageHeader) GetStorageName() string { + if m != nil { + return m.StorageName + } + return "" +} + func init() { proto.RegisterEnum("spacesync.ErrCodes", ErrCodes_name, ErrCodes_value) proto.RegisterEnum("spacesync.SpaceSubscriptionAction", SpaceSubscriptionAction_name, SpaceSubscriptionAction_value) proto.RegisterEnum("spacesync.DiffType", DiffType_name, DiffType_value) + proto.RegisterEnum("spacesync.ObjectType", ObjectType_name, ObjectType_value) proto.RegisterType((*HeadSyncRange)(nil), "spacesync.HeadSyncRange") proto.RegisterType((*HeadSyncResult)(nil), "spacesync.HeadSyncResult") proto.RegisterType((*HeadSyncResultElement)(nil), "spacesync.HeadSyncResultElement") @@ -1701,6 +2202,7 @@ func init() { proto.RegisterType((*RawSpaceHeaderWithId)(nil), "spacesync.RawSpaceHeaderWithId") proto.RegisterType((*SpaceSettingsContent)(nil), "spacesync.SpaceSettingsContent") proto.RegisterType((*ObjectDelete)(nil), "spacesync.ObjectDelete") + proto.RegisterType((*StoreHeader)(nil), "spacesync.StoreHeader") proto.RegisterType((*SpaceDelete)(nil), "spacesync.SpaceDelete") proto.RegisterType((*SpaceSettingsSnapshot)(nil), "spacesync.SpaceSettingsSnapshot") proto.RegisterType((*SettingsData)(nil), "spacesync.SettingsData") @@ -1709,6 +2211,12 @@ func init() { proto.RegisterType((*AclAddRecordResponse)(nil), "spacesync.AclAddRecordResponse") proto.RegisterType((*AclGetRecordsRequest)(nil), "spacesync.AclGetRecordsRequest") proto.RegisterType((*AclGetRecordsResponse)(nil), "spacesync.AclGetRecordsResponse") + proto.RegisterType((*StoreDiffRequest)(nil), "spacesync.StoreDiffRequest") + proto.RegisterType((*StoreDiffResponse)(nil), "spacesync.StoreDiffResponse") + proto.RegisterType((*StoreKeyValue)(nil), "spacesync.StoreKeyValue") + proto.RegisterType((*StoreKeyValues)(nil), "spacesync.StoreKeyValues") + proto.RegisterType((*StoreKeyInner)(nil), "spacesync.StoreKeyInner") + proto.RegisterType((*StorageHeader)(nil), "spacesync.StorageHeader") } func init() { @@ -1716,89 +2224,106 @@ func init() { } var fileDescriptor_80e49f1f4ac27799 = []byte{ - // 1308 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x3d, 0x6f, 0xdb, 0xc6, - 0x1b, 0x17, 0xe9, 0x17, 0x49, 0x8f, 0x29, 0x85, 0x39, 0x2b, 0xb1, 0xfe, 0x4a, 0xa0, 0x08, 0xc4, - 0x1f, 0x85, 0xe1, 0x21, 0x2f, 0x76, 0x51, 0x20, 0x69, 0x3b, 0x38, 0xb6, 0x53, 0xb3, 0x6d, 0x62, - 0xe3, 0x94, 0x20, 0x40, 0x81, 0x0e, 0x67, 0xf2, 0x6c, 0xb1, 0xa5, 0x48, 0x95, 0x77, 0x4a, 0xac, - 0xb1, 0x53, 0xd7, 0xa2, 0x63, 0xfb, 0x85, 0x3a, 0xa6, 0x5b, 0xc7, 0x22, 0xd9, 0xfb, 0x09, 0x3a, - 0x14, 0x77, 0x3c, 0x92, 0x47, 0x89, 0x72, 0x5b, 0x64, 0x91, 0xf9, 0xbc, 0xfd, 0x9e, 0xd7, 0x7b, - 0xee, 0x0c, 0x0f, 0xbc, 0x78, 0x3c, 0x8e, 0x23, 0x36, 0x21, 0x1e, 0xbd, 0x27, 0x7f, 0xd9, 0x2c, - 0xf2, 0x26, 0x49, 0xcc, 0xe3, 0x7b, 0xf2, 0x97, 0x15, 0xdc, 0xbb, 0x92, 0x81, 0x9a, 0x39, 0xc3, - 0xa1, 0xd0, 0x3a, 0xa6, 0xc4, 0x1f, 0xce, 0x22, 0x0f, 0x93, 0xe8, 0x82, 0x22, 0x04, 0xab, 0xe7, - 0x49, 0x3c, 0xee, 0x1a, 0x03, 0x63, 0x7b, 0x15, 0xcb, 0x6f, 0xd4, 0x06, 0x93, 0xc7, 0x5d, 0x53, - 0x72, 0x4c, 0x1e, 0xa3, 0x0e, 0xac, 0x85, 0xc1, 0x38, 0xe0, 0xdd, 0x95, 0x81, 0xb1, 0xdd, 0xc2, - 0x29, 0x81, 0x7a, 0xd0, 0xa0, 0x21, 0x1d, 0xd3, 0x88, 0xb3, 0xee, 0xea, 0xc0, 0xd8, 0x6e, 0xe0, - 0x9c, 0x76, 0x2e, 0xa1, 0x9d, 0xbb, 0xa1, 0x6c, 0x1a, 0x72, 0xe1, 0x67, 0x44, 0xd8, 0x48, 0xfa, - 0xb1, 0xb0, 0xfc, 0x46, 0x9f, 0x68, 0x08, 0xe6, 0x60, 0x65, 0x7b, 0x63, 0x77, 0x70, 0xb7, 0x88, - 0xbd, 0x0c, 0x70, 0x94, 0x2a, 0x16, 0x3e, 0x44, 0x54, 0x5e, 0x3c, 0x8d, 0xf2, 0xa8, 0x24, 0xe1, - 0x7c, 0x0c, 0x37, 0x2a, 0x0d, 0x45, 0x52, 0x81, 0x2f, 0xdd, 0x37, 0xb1, 0x19, 0xf8, 0x32, 0x20, - 0x4a, 0x7c, 0x99, 0x66, 0x13, 0xcb, 0x6f, 0xe7, 0x27, 0x03, 0xae, 0x15, 0xd6, 0xdf, 0x4d, 0x29, - 0xe3, 0xa8, 0x0b, 0x75, 0x19, 0x93, 0x9b, 0x19, 0x67, 0x24, 0xba, 0x0f, 0xeb, 0x89, 0xa8, 0x61, - 0x16, 0x7c, 0xb7, 0x2a, 0x78, 0xa1, 0x80, 0x95, 0x1e, 0xda, 0x83, 0x86, 0x1f, 0x9c, 0x9f, 0x3f, - 0x9f, 0x4d, 0xa8, 0x8c, 0xba, 0xbd, 0xbb, 0xa9, 0xd9, 0x1c, 0x2a, 0xd1, 0x63, 0xb3, 0x6b, 0xe0, - 0x5c, 0xd1, 0xb9, 0x04, 0x5b, 0xcb, 0x68, 0x12, 0x47, 0x8c, 0xa2, 0x3d, 0xa8, 0x27, 0x32, 0x3b, - 0xd6, 0x35, 0xa4, 0xef, 0xff, 0x2d, 0x2d, 0x1c, 0xce, 0x34, 0xd1, 0x3d, 0xcd, 0xbb, 0xb9, 0xd4, - 0xbb, 0xe6, 0xf9, 0x17, 0x03, 0xae, 0x9f, 0x9c, 0x7d, 0x43, 0x3d, 0x2e, 0xe0, 0x9e, 0x52, 0xc6, - 0xc8, 0x05, 0xbd, 0xa2, 0x20, 0xb7, 0xa1, 0x99, 0xa4, 0x55, 0x73, 0xb3, 0xba, 0x16, 0x0c, 0x61, - 0x97, 0xd0, 0x49, 0x38, 0x73, 0x7d, 0x99, 0x7b, 0x13, 0x67, 0xa4, 0x90, 0x4c, 0xc8, 0x2c, 0x8c, - 0x89, 0x2f, 0x07, 0xc9, 0xc2, 0x19, 0x29, 0x66, 0x2c, 0x96, 0x01, 0xb8, 0x7e, 0x77, 0x4d, 0x1a, - 0xe5, 0xb4, 0x43, 0xc1, 0x1e, 0x0a, 0xc7, 0xa7, 0x53, 0x36, 0xca, 0x9a, 0xf5, 0xa0, 0x40, 0x12, - 0xb1, 0x6d, 0xec, 0x6e, 0x69, 0x19, 0xa6, 0xda, 0xa9, 0xb8, 0x70, 0xd1, 0x07, 0x38, 0x48, 0xa8, - 0x4f, 0x23, 0x1e, 0x90, 0x50, 0x46, 0x6d, 0x61, 0x8d, 0xe3, 0x6c, 0xc2, 0x75, 0xcd, 0x4d, 0x5a, - 0x7f, 0xc7, 0xc9, 0x7d, 0x87, 0x61, 0xe6, 0x7b, 0x6e, 0xc0, 0x9c, 0x27, 0xb9, 0xa1, 0xd0, 0x51, - 0x8d, 0xfb, 0xef, 0x01, 0x3a, 0xdf, 0x9b, 0x60, 0xe9, 0x12, 0xb4, 0x0f, 0x1b, 0xd2, 0x46, 0xf4, - 0x99, 0x26, 0x0a, 0xe7, 0x8e, 0x86, 0x83, 0xc9, 0xeb, 0x61, 0xa1, 0xf0, 0x32, 0xe0, 0x23, 0xd7, - 0xc7, 0xba, 0x8d, 0x48, 0x9a, 0x78, 0xa1, 0x02, 0xcc, 0x92, 0x2e, 0x38, 0xc8, 0x01, 0xab, 0xa0, - 0xf2, 0x86, 0x95, 0x78, 0x68, 0x17, 0x3a, 0x12, 0x72, 0x48, 0x39, 0x0f, 0xa2, 0x0b, 0x76, 0x5a, - 0x6a, 0x61, 0xa5, 0x0c, 0x7d, 0x04, 0x37, 0xab, 0xf8, 0x79, 0x77, 0x97, 0x48, 0x9d, 0xdf, 0x0c, - 0xd8, 0xd0, 0x52, 0x12, 0x73, 0x11, 0xc8, 0x06, 0xf1, 0x99, 0xda, 0x28, 0x39, 0x2d, 0xa6, 0x90, - 0x07, 0x63, 0xca, 0x38, 0x19, 0x4f, 0x64, 0x6a, 0x2b, 0xb8, 0x60, 0x08, 0xa9, 0xf4, 0x91, 0x9f, - 0xc1, 0x26, 0x2e, 0x18, 0xe8, 0x03, 0x68, 0x8b, 0xa1, 0x0c, 0x3c, 0xc2, 0x83, 0x38, 0xfa, 0x82, - 0xce, 0x64, 0x36, 0xab, 0x78, 0x8e, 0x2b, 0x96, 0x07, 0xa3, 0x34, 0x8d, 0xda, 0xc2, 0xf2, 0x1b, - 0xdd, 0x05, 0xa4, 0x95, 0x38, 0xab, 0xc6, 0xba, 0xd4, 0xa8, 0x90, 0x38, 0xa7, 0xd0, 0x2e, 0x37, - 0x0a, 0x0d, 0x16, 0x1b, 0x6b, 0x95, 0xfb, 0x26, 0xa2, 0x0f, 0x2e, 0x22, 0xc2, 0xa7, 0x09, 0x55, - 0x6d, 0x2b, 0x18, 0xce, 0x21, 0x74, 0xaa, 0x5a, 0x2f, 0xcf, 0x25, 0x79, 0x5d, 0x42, 0x2d, 0x18, - 0x6a, 0x6e, 0xcd, 0x7c, 0x6e, 0x7f, 0x36, 0xa0, 0x33, 0xd4, 0xdb, 0x70, 0x10, 0x47, 0x5c, 0x6c, - 0xd0, 0x4f, 0xc1, 0x4a, 0x0f, 0xdf, 0x21, 0x0d, 0x29, 0xa7, 0x15, 0x03, 0x7c, 0xa2, 0x89, 0x8f, - 0x6b, 0xb8, 0xa4, 0x8e, 0x1e, 0xa9, 0xec, 0x94, 0xb5, 0x29, 0xad, 0x6f, 0xce, 0x8f, 0x7f, 0x6e, - 0xac, 0x2b, 0x3f, 0xae, 0xc3, 0xda, 0x2b, 0x12, 0x4e, 0xa9, 0xd3, 0x07, 0x4b, 0x77, 0xb2, 0x70, - 0xe8, 0xf6, 0xd4, 0x9c, 0x28, 0xf1, 0xff, 0xa1, 0xe5, 0xcb, 0xaf, 0xe4, 0x94, 0xd2, 0x24, 0xdf, - 0x58, 0x65, 0xa6, 0xf3, 0x35, 0xdc, 0x28, 0x25, 0x3c, 0x8c, 0xc8, 0x84, 0x8d, 0x62, 0x2e, 0x8e, - 0x49, 0xaa, 0xe9, 0xbb, 0x7e, 0xba, 0x69, 0x9b, 0x58, 0xe3, 0x2c, 0xc2, 0x9b, 0x55, 0xf0, 0x3f, - 0x18, 0x60, 0x65, 0xd0, 0x87, 0x84, 0x13, 0xf4, 0x10, 0xea, 0x5e, 0x5a, 0x53, 0xb5, 0xbd, 0xef, - 0xcc, 0x57, 0x61, 0xae, 0xf4, 0x38, 0xd3, 0x17, 0x57, 0x26, 0x53, 0xd1, 0xa9, 0x0a, 0x0e, 0x96, - 0xd9, 0x66, 0x59, 0xe0, 0xdc, 0xc2, 0xf9, 0x56, 0xad, 0xa4, 0xe1, 0xf4, 0x8c, 0x79, 0x49, 0x30, - 0x11, 0xe3, 0x2c, 0xce, 0x92, 0x5a, 0xe0, 0x59, 0x8a, 0x39, 0x8d, 0x1e, 0xc1, 0x3a, 0xf1, 0x84, - 0x96, 0xba, 0x30, 0x9c, 0x05, 0x67, 0x1a, 0xd2, 0xbe, 0xd4, 0xc4, 0xca, 0xc2, 0x71, 0x61, 0x73, - 0xdf, 0x0b, 0xf7, 0x7d, 0x1f, 0x53, 0x2f, 0x4e, 0xfc, 0x7f, 0xbe, 0x4f, 0xb5, 0x6b, 0xc0, 0x2c, - 0x5d, 0x03, 0xce, 0x97, 0xd0, 0x29, 0x43, 0xa9, 0x6d, 0xda, 0x83, 0x46, 0x22, 0x39, 0x39, 0x58, - 0x4e, 0x5f, 0x81, 0xf6, 0xb9, 0x44, 0xfb, 0x8c, 0xf2, 0x14, 0x8d, 0xfd, 0xab, 0xc8, 0x88, 0x17, - 0x1e, 0x17, 0xcf, 0x85, 0x8c, 0x74, 0x1e, 0xc0, 0x8d, 0x39, 0x2c, 0x15, 0x9a, 0xbc, 0xed, 0x24, - 0x4b, 0x16, 0xd5, 0xc2, 0x19, 0xb9, 0xf3, 0xa7, 0x01, 0x8d, 0xa3, 0x24, 0x39, 0x88, 0x7d, 0xca, - 0x50, 0x1b, 0xe0, 0x45, 0x44, 0x2f, 0x27, 0xd4, 0xe3, 0xd4, 0xb7, 0x6b, 0xc8, 0x56, 0xbb, 0xfe, - 0x69, 0xc0, 0x58, 0x10, 0x5d, 0xd8, 0x06, 0xba, 0xa6, 0x26, 0xfa, 0xe8, 0x32, 0x60, 0x9c, 0xd9, - 0x26, 0xda, 0x84, 0x6b, 0x92, 0xf1, 0x2c, 0xe6, 0x6e, 0x74, 0x40, 0xbc, 0x11, 0xb5, 0x57, 0x10, - 0x82, 0xb6, 0x64, 0xba, 0x2c, 0x9d, 0x7c, 0xdf, 0x5e, 0x45, 0x5d, 0xe8, 0xc8, 0x09, 0x64, 0xcf, - 0x62, 0xae, 0xe2, 0x0a, 0xce, 0x42, 0x6a, 0xaf, 0xa1, 0x0e, 0xd8, 0x98, 0x7a, 0x34, 0x98, 0x70, - 0x97, 0xb9, 0xd1, 0x2b, 0x12, 0x06, 0xbe, 0xbd, 0x2e, 0x30, 0x14, 0xa1, 0x56, 0x94, 0x5d, 0x17, - 0x9a, 0x87, 0xd3, 0x74, 0xf5, 0x51, 0x55, 0x27, 0xbb, 0x81, 0x6e, 0xc1, 0xd6, 0xf3, 0x38, 0x7e, - 0x4a, 0xa2, 0x99, 0xe2, 0xb1, 0x27, 0x49, 0x3c, 0x16, 0xce, 0xec, 0xa6, 0x08, 0xf8, 0x28, 0x49, - 0xe2, 0xe4, 0xe4, 0xfc, 0x9c, 0x51, 0x6e, 0xfb, 0x3b, 0x0f, 0x61, 0x6b, 0xc9, 0xac, 0xa0, 0x16, - 0x34, 0x15, 0xf7, 0x8c, 0xda, 0x35, 0x61, 0xfa, 0x22, 0x62, 0x39, 0xc3, 0xd8, 0xd9, 0x81, 0x46, - 0xf6, 0x2e, 0x41, 0x1b, 0x50, 0x77, 0xa3, 0x40, 0xdc, 0xc9, 0x76, 0x0d, 0x5d, 0x87, 0xd6, 0x69, - 0x42, 0x3d, 0x12, 0x7a, 0xd3, 0x90, 0x88, 0x74, 0x8d, 0xdd, 0xbf, 0x56, 0xa1, 0x99, 0xfa, 0x99, - 0x45, 0x1e, 0x3a, 0x80, 0x46, 0xf6, 0x0e, 0x42, 0xbd, 0xca, 0xc7, 0x91, 0x0c, 0xbc, 0x77, 0xab, - 0xfa, 0xe1, 0x94, 0x36, 0xf1, 0x89, 0x42, 0x14, 0x77, 0x3f, 0xba, 0xb5, 0x70, 0x53, 0x17, 0x0f, - 0x8f, 0xde, 0xed, 0x6a, 0xe1, 0x02, 0x4e, 0x18, 0x56, 0xe1, 0xe4, 0x8f, 0x88, 0x2a, 0x1c, 0xed, - 0xf5, 0x80, 0xc1, 0x2e, 0xde, 0x63, 0x43, 0x9e, 0x50, 0x32, 0x46, 0xb7, 0x17, 0xf6, 0xaf, 0xf6, - 0x58, 0xeb, 0x5d, 0x29, 0xdd, 0x36, 0xee, 0x1b, 0xe8, 0x18, 0xa0, 0x10, 0xbc, 0x0f, 0x1a, 0x7a, - 0x09, 0x5b, 0x05, 0x53, 0x25, 0xf4, 0xfe, 0x41, 0xde, 0x37, 0xd0, 0x09, 0x58, 0xfa, 0xf1, 0x47, - 0x7d, 0x4d, 0xbf, 0x62, 0xc5, 0xf4, 0xee, 0x2c, 0x95, 0xe7, 0x75, 0x6c, 0x95, 0x4e, 0x2d, 0x9a, - 0xb3, 0x58, 0xd8, 0x0d, 0xbd, 0xc1, 0x72, 0x85, 0x14, 0xf3, 0xf1, 0x87, 0xbf, 0xbe, 0xed, 0x1b, - 0x6f, 0xde, 0xf6, 0x8d, 0x3f, 0xde, 0xf6, 0x8d, 0x1f, 0xdf, 0xf5, 0x6b, 0x6f, 0xde, 0xf5, 0x6b, - 0xbf, 0xbf, 0xeb, 0xd7, 0xbe, 0xea, 0x2d, 0xff, 0x8f, 0xed, 0x6c, 0x5d, 0xfe, 0xd9, 0xfb, 0x3b, - 0x00, 0x00, 0xff, 0xff, 0xd0, 0x97, 0xf7, 0xc0, 0xd6, 0x0d, 0x00, 0x00, + // 1572 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x18, 0x4d, 0x6f, 0x1b, 0x45, + 0x3b, 0xbb, 0x4e, 0x1c, 0xfb, 0x89, 0xe3, 0x6e, 0x26, 0x49, 0xe3, 0xd7, 0xad, 0x5c, 0x6b, 0xf4, + 0xaa, 0x6f, 0x14, 0xbd, 0xb4, 0x4d, 0x0a, 0x95, 0x5a, 0xe0, 0x90, 0x26, 0x69, 0x63, 0x4a, 0x9a, + 0x68, 0xdc, 0x0f, 0x09, 0x09, 0xa4, 0xcd, 0xee, 0x24, 0x59, 0xba, 0xde, 0x35, 0x3b, 0xe3, 0x36, + 0x3e, 0x72, 0xe2, 0x04, 0xe2, 0xcc, 0x7f, 0xe0, 0xc0, 0xbf, 0xe0, 0x58, 0x38, 0x71, 0x44, 0xed, + 0x9d, 0xdf, 0x80, 0x66, 0x76, 0x76, 0x76, 0xd6, 0x1f, 0xa5, 0xa8, 0x70, 0x89, 0xe7, 0x79, 0xe6, + 0xf9, 0xfe, 0x9c, 0x0d, 0x6c, 0x7a, 0x71, 0xaf, 0x17, 0x47, 0xac, 0xef, 0x7a, 0xf4, 0xba, 0xfc, + 0xcb, 0x86, 0x91, 0xd7, 0x4f, 0x62, 0x1e, 0x5f, 0x97, 0x7f, 0x59, 0x8e, 0xbd, 0x26, 0x11, 0xa8, + 0xaa, 0x11, 0x98, 0xc2, 0xe2, 0x3e, 0x75, 0xfd, 0xee, 0x30, 0xf2, 0x88, 0x1b, 0x9d, 0x52, 0x84, + 0x60, 0xf6, 0x24, 0x89, 0x7b, 0x0d, 0xab, 0x6d, 0xad, 0xcf, 0x12, 0x79, 0x46, 0x75, 0xb0, 0x79, + 0xdc, 0xb0, 0x25, 0xc6, 0xe6, 0x31, 0x5a, 0x81, 0xb9, 0x30, 0xe8, 0x05, 0xbc, 0x51, 0x6a, 0x5b, + 0xeb, 0x8b, 0x24, 0x05, 0x50, 0x13, 0x2a, 0x34, 0xa4, 0x3d, 0x1a, 0x71, 0xd6, 0x98, 0x6d, 0x5b, + 0xeb, 0x15, 0xa2, 0x61, 0x7c, 0x0e, 0x75, 0xad, 0x86, 0xb2, 0x41, 0xc8, 0x85, 0x9e, 0x33, 0x97, + 0x9d, 0x49, 0x3d, 0x35, 0x22, 0xcf, 0xe8, 0x23, 0x43, 0x82, 0xdd, 0x2e, 0xad, 0x2f, 0x6c, 0xb5, + 0xaf, 0xe5, 0xb6, 0x17, 0x05, 0xec, 0xa5, 0x84, 0xb9, 0x0e, 0x61, 0x95, 0x17, 0x0f, 0x22, 0x6d, + 0x95, 0x04, 0xf0, 0x87, 0xb0, 0x3a, 0x91, 0x51, 0x38, 0x15, 0xf8, 0x52, 0x7d, 0x95, 0xd8, 0x81, + 0x2f, 0x0d, 0xa2, 0xae, 0x2f, 0xdd, 0xac, 0x12, 0x79, 0xc6, 0xdf, 0x59, 0x70, 0x21, 0xe7, 0xfe, + 0x6a, 0x40, 0x19, 0x47, 0x0d, 0x98, 0x97, 0x36, 0x75, 0x32, 0xe6, 0x0c, 0x44, 0x37, 0xa0, 0x9c, + 0x88, 0x18, 0x66, 0xc6, 0x37, 0x26, 0x19, 0x2f, 0x08, 0x88, 0xa2, 0x43, 0xd7, 0xa1, 0xe2, 0x07, + 0x27, 0x27, 0x8f, 0x86, 0x7d, 0x2a, 0xad, 0xae, 0x6f, 0x2d, 0x1b, 0x3c, 0xbb, 0xea, 0x8a, 0x68, + 0x22, 0x7c, 0x0e, 0x8e, 0xe1, 0x4d, 0x3f, 0x8e, 0x18, 0x45, 0x37, 0x61, 0x3e, 0x91, 0x9e, 0xb1, + 0x86, 0x25, 0xf5, 0xfe, 0x67, 0x6a, 0xd0, 0x48, 0x46, 0x59, 0xd0, 0x6c, 0xbf, 0x8d, 0xe6, 0x5f, + 0x2d, 0x58, 0x3a, 0x3c, 0xfe, 0x92, 0x7a, 0x5c, 0x88, 0x3b, 0xa0, 0x8c, 0xb9, 0xa7, 0xf4, 0x0d, + 0xc1, 0xb8, 0x0c, 0xd5, 0x24, 0x8d, 0x58, 0x27, 0x8b, 0x69, 0x8e, 0x10, 0x7c, 0x09, 0xed, 0x87, + 0xc3, 0x8e, 0x2f, 0xfd, 0xae, 0x92, 0x0c, 0x14, 0x37, 0x7d, 0x77, 0x18, 0xc6, 0xae, 0x2f, 0x8b, + 0xa8, 0x46, 0x32, 0x50, 0xd4, 0x57, 0x2c, 0x0d, 0xe8, 0xf8, 0x8d, 0x39, 0xc9, 0xa4, 0x61, 0xf4, + 0x01, 0x40, 0x7a, 0x96, 0x0e, 0x95, 0xa5, 0x43, 0xab, 0x86, 0x43, 0x87, 0xfa, 0x92, 0x18, 0x84, + 0x98, 0x82, 0xd3, 0x15, 0x34, 0x47, 0x03, 0x76, 0x96, 0xe5, 0x77, 0x33, 0x37, 0x40, 0xb8, 0xb4, + 0xb0, 0xb5, 0x66, 0xc8, 0x49, 0xa9, 0xd3, 0xeb, 0xdc, 0xb2, 0x16, 0xc0, 0x4e, 0x42, 0x7d, 0x1a, + 0xf1, 0xc0, 0x0d, 0xa5, 0xb3, 0x35, 0x62, 0x60, 0xf0, 0x32, 0x2c, 0x19, 0x6a, 0xd2, 0xb4, 0x61, + 0xac, 0x75, 0x87, 0x61, 0xa6, 0x7b, 0xa4, 0x26, 0xf1, 0x3d, 0xcd, 0x28, 0x68, 0x54, 0xbe, 0xff, + 0xbe, 0x81, 0xf8, 0x6b, 0x1b, 0x6a, 0xe6, 0x0d, 0xda, 0x86, 0x05, 0xc9, 0x23, 0xca, 0x83, 0x26, + 0x4a, 0xce, 0x15, 0x43, 0x0e, 0x71, 0x5f, 0x74, 0x73, 0x82, 0xa7, 0x01, 0x3f, 0xeb, 0xf8, 0xc4, + 0xe4, 0x11, 0x4e, 0xbb, 0x5e, 0xa8, 0x04, 0x66, 0x4e, 0xe7, 0x18, 0x84, 0xa1, 0x96, 0x43, 0x3a, + 0xcf, 0x05, 0x1c, 0xda, 0x82, 0x15, 0x29, 0xb2, 0x4b, 0x39, 0x0f, 0xa2, 0x53, 0x76, 0x54, 0xc8, + 0xfc, 0xc4, 0x3b, 0x74, 0x0b, 0x2e, 0x4e, 0xc2, 0xeb, 0xa2, 0x98, 0x72, 0x8b, 0x7f, 0xb1, 0x60, + 0xc1, 0x70, 0x49, 0x94, 0x53, 0x20, 0x13, 0xc4, 0x87, 0x6a, 0x08, 0x69, 0x58, 0x14, 0x2f, 0x0f, + 0x7a, 0x94, 0x71, 0xb7, 0xd7, 0x97, 0xae, 0x95, 0x48, 0x8e, 0x10, 0xb7, 0x52, 0x87, 0x6e, 0xdb, + 0x2a, 0xc9, 0x11, 0xe8, 0x2a, 0xd4, 0x45, 0x2d, 0x07, 0x9e, 0xcb, 0x83, 0x38, 0x7a, 0x40, 0x87, + 0xd2, 0x9b, 0x59, 0x32, 0x82, 0x15, 0xf3, 0x86, 0x51, 0x9a, 0x5a, 0x5d, 0x23, 0xf2, 0x8c, 0xae, + 0x01, 0x32, 0x42, 0x9c, 0x45, 0xa3, 0x2c, 0x29, 0x26, 0xdc, 0xe0, 0x23, 0xa8, 0x17, 0x13, 0x85, + 0xda, 0xe3, 0x89, 0xad, 0x15, 0xf3, 0x26, 0xac, 0x0f, 0x4e, 0x23, 0x97, 0x0f, 0x12, 0xaa, 0xd2, + 0x96, 0x23, 0xf0, 0x2e, 0xac, 0x4c, 0x4a, 0xbd, 0x6c, 0x67, 0xf7, 0x45, 0x41, 0x6a, 0x8e, 0x50, + 0x75, 0x6b, 0xeb, 0xba, 0xfd, 0xc1, 0x82, 0x95, 0xae, 0x99, 0x86, 0x9d, 0x38, 0xe2, 0x62, 0xe8, + 0x7e, 0x0c, 0xb5, 0xb4, 0xfd, 0x76, 0x69, 0x48, 0x39, 0x9d, 0x50, 0xc0, 0x87, 0xc6, 0xf5, 0xfe, + 0x0c, 0x29, 0x90, 0xa3, 0x3b, 0xca, 0x3b, 0xc5, 0x6d, 0x4b, 0xee, 0x8b, 0xa3, 0xe5, 0xaf, 0x99, + 0x4d, 0xe2, 0xbb, 0xf3, 0x30, 0xf7, 0xdc, 0x0d, 0x07, 0x14, 0xb7, 0xa0, 0x66, 0x2a, 0x19, 0x6b, + 0xba, 0x0e, 0x2c, 0x74, 0x79, 0x9c, 0x64, 0xf1, 0x9a, 0x3e, 0xe2, 0x44, 0xac, 0x79, 0x9c, 0xb8, + 0xa7, 0xf4, 0xa1, 0xdb, 0xa3, 0xca, 0x7d, 0x13, 0x85, 0x6f, 0xaa, 0x92, 0x53, 0x9a, 0xfe, 0x0b, + 0x8b, 0xbe, 0x3c, 0x25, 0x47, 0x94, 0x26, 0x5a, 0x60, 0x11, 0x89, 0x3f, 0x87, 0xd5, 0x42, 0xec, + 0xba, 0x91, 0xdb, 0x67, 0x67, 0x31, 0x17, 0x1d, 0x97, 0x52, 0xfa, 0x1d, 0x3f, 0x9d, 0xf5, 0x55, + 0x62, 0x60, 0xc6, 0xc5, 0xdb, 0x93, 0xc4, 0x7f, 0x63, 0x41, 0x2d, 0x13, 0xbd, 0xeb, 0x72, 0x17, + 0xdd, 0x86, 0x79, 0x2f, 0x4d, 0x8f, 0xda, 0x1f, 0x57, 0x46, 0x03, 0x3a, 0x92, 0x45, 0x92, 0xd1, + 0x8b, 0x85, 0xcd, 0x94, 0x75, 0x2a, 0x19, 0xed, 0x69, 0xbc, 0x99, 0x17, 0x44, 0x73, 0xe0, 0x67, + 0x6a, 0xba, 0x75, 0x07, 0xc7, 0xcc, 0x4b, 0x82, 0xbe, 0xe8, 0x0c, 0xd1, 0x96, 0x2a, 0xbe, 0x99, + 0x8b, 0x1a, 0x46, 0x77, 0xa0, 0xec, 0x7a, 0x82, 0x4a, 0xad, 0x2c, 0x3c, 0xa6, 0xcc, 0x90, 0xb4, + 0x2d, 0x29, 0x89, 0xe2, 0xc0, 0x1d, 0x58, 0xde, 0xf6, 0xc2, 0x6d, 0xdf, 0x27, 0xd4, 0x8b, 0x13, + 0xff, 0xaf, 0xb7, 0xb9, 0xb1, 0x88, 0xec, 0xc2, 0x22, 0xc2, 0x9f, 0xc2, 0x4a, 0x51, 0x94, 0x1a, + 0xcc, 0x4d, 0xa8, 0x24, 0x12, 0xa3, 0x85, 0x69, 0xf8, 0x0d, 0xd2, 0x3e, 0x91, 0xd2, 0xee, 0x53, + 0x9e, 0x4a, 0x63, 0x6f, 0x65, 0x99, 0xeb, 0x85, 0xfb, 0xf9, 0x63, 0x25, 0x03, 0xf1, 0x26, 0xac, + 0x8e, 0xc8, 0x52, 0xa6, 0xc9, 0x7d, 0x2b, 0x51, 0x32, 0xa8, 0x35, 0x92, 0x81, 0xf8, 0x0b, 0x70, + 0x64, 0xb5, 0x8b, 0x95, 0xff, 0x2f, 0x3c, 0x71, 0xf0, 0x3e, 0x2c, 0x19, 0xf2, 0xdf, 0xe1, 0xc9, + 0x82, 0x7f, 0xb2, 0x60, 0x51, 0x8a, 0x7a, 0x40, 0x87, 0x4f, 0x44, 0x27, 0x8b, 0xa1, 0xf4, 0x8c, + 0x0e, 0x0b, 0xbd, 0x94, 0x23, 0xc4, 0x7b, 0x50, 0x36, 0xbc, 0x0a, 0x78, 0x0a, 0xa0, 0xff, 0xc3, + 0x52, 0x36, 0xe6, 0xbb, 0x7a, 0x0c, 0x96, 0x24, 0xc5, 0xf8, 0x85, 0x68, 0xa9, 0x3e, 0xa5, 0x49, + 0x4e, 0x99, 0x6e, 0xa6, 0x22, 0xd2, 0x8c, 0xd7, 0x5c, 0x21, 0x5e, 0x78, 0x1f, 0xea, 0x05, 0x93, + 0x19, 0xba, 0x25, 0x6d, 0x4e, 0x01, 0xe5, 0xbc, 0x19, 0xc4, 0x02, 0x35, 0xc9, 0x49, 0xf1, 0x8f, + 0x86, 0xf7, 0x9d, 0x28, 0xa2, 0x89, 0x58, 0x20, 0xc2, 0x8c, 0xec, 0x05, 0x2d, 0xce, 0x85, 0xa5, + 0x66, 0x8f, 0x2c, 0x35, 0x1d, 0x8f, 0x92, 0x19, 0x8f, 0xab, 0x50, 0xd7, 0x9b, 0xed, 0x20, 0xf0, + 0x92, 0x58, 0xba, 0x58, 0x22, 0x23, 0x58, 0x11, 0x6b, 0x55, 0x65, 0xda, 0xcb, 0x1c, 0x81, 0x1c, + 0x28, 0x3d, 0xa3, 0x43, 0xb9, 0xa9, 0xaa, 0x44, 0x1c, 0xf1, 0x83, 0xd4, 0x5c, 0xf7, 0xf4, 0x1f, + 0x98, 0xa3, 0x1b, 0x7f, 0x58, 0x50, 0xd9, 0x4b, 0x92, 0x9d, 0xd8, 0xa7, 0x0c, 0xd5, 0x01, 0x1e, + 0x47, 0xf4, 0xbc, 0x4f, 0x3d, 0x4e, 0x7d, 0x67, 0x06, 0x39, 0xea, 0x6d, 0x73, 0x10, 0x30, 0x16, + 0x44, 0xa7, 0x8e, 0x85, 0x2e, 0xa8, 0xb1, 0xbb, 0x77, 0x1e, 0x30, 0xce, 0x1c, 0x1b, 0x2d, 0xc3, + 0x05, 0x89, 0x78, 0x18, 0xf3, 0x4e, 0xb4, 0xe3, 0x7a, 0x67, 0xd4, 0x29, 0x21, 0x04, 0x75, 0x89, + 0xec, 0xb0, 0x74, 0x3c, 0xfb, 0xce, 0x2c, 0x6a, 0xc0, 0x8a, 0xac, 0x1e, 0xf6, 0x30, 0xe6, 0xaa, + 0x5a, 0x83, 0xe3, 0x90, 0x3a, 0x73, 0x68, 0x05, 0x1c, 0x42, 0x3d, 0x1a, 0xf4, 0x79, 0x87, 0x75, + 0xa2, 0xe7, 0x6e, 0x18, 0xf8, 0x4e, 0x59, 0xc8, 0x50, 0x80, 0x5a, 0xc9, 0xce, 0xbc, 0xa0, 0xdc, + 0x1d, 0xa4, 0xab, 0x9e, 0xaa, 0x8e, 0x72, 0x2a, 0xe8, 0x12, 0xac, 0x3d, 0x8a, 0xe3, 0x03, 0x37, + 0x1a, 0x2a, 0x1c, 0xbb, 0x97, 0xc4, 0x3d, 0xa1, 0xcc, 0xa9, 0x0a, 0x83, 0xf7, 0x92, 0x24, 0x4e, + 0x0e, 0x4f, 0x4e, 0x18, 0xe5, 0x8e, 0xbf, 0x71, 0x1b, 0xd6, 0xa6, 0x0c, 0x34, 0xb4, 0x08, 0x55, + 0x85, 0x3d, 0xa6, 0xce, 0x8c, 0x60, 0x7d, 0x1c, 0x31, 0x8d, 0xb0, 0x36, 0xfe, 0x07, 0x95, 0xec, + 0xf9, 0x8e, 0x16, 0x60, 0xbe, 0x13, 0x05, 0xe2, 0x0d, 0xea, 0xcc, 0xa0, 0x32, 0xd8, 0x4f, 0x36, + 0x1d, 0x4b, 0xfe, 0x6e, 0x39, 0xf6, 0xc6, 0x7b, 0x00, 0xf9, 0xb3, 0x18, 0x55, 0x60, 0xf6, 0x51, + 0x42, 0x85, 0xc4, 0x79, 0x28, 0x6d, 0x7b, 0xa1, 0x63, 0xa1, 0x1a, 0x54, 0xb2, 0x4a, 0x74, 0xec, + 0xad, 0x6f, 0xcb, 0x50, 0x4d, 0x6d, 0x1a, 0x46, 0x1e, 0xda, 0x81, 0x4a, 0xd6, 0xa7, 0xa8, 0x39, + 0xb1, 0x79, 0xa5, 0x93, 0xcd, 0x4b, 0x93, 0x1b, 0x3b, 0x1d, 0x03, 0xf7, 0xa0, 0xaa, 0x67, 0x03, + 0xba, 0x34, 0xda, 0x05, 0xc6, 0x44, 0x6a, 0x5e, 0x9e, 0x7c, 0xa9, 0xe4, 0xdc, 0x57, 0xad, 0xb1, + 0x97, 0x7d, 0x0a, 0x4e, 0xed, 0xa8, 0xe6, 0xd4, 0x9b, 0x75, 0xeb, 0x86, 0x25, 0x0d, 0xca, 0x1e, + 0xea, 0x45, 0x83, 0x46, 0xbe, 0x12, 0x8a, 0x06, 0x8d, 0xbe, 0xed, 0x0d, 0x39, 0x61, 0x38, 0x49, + 0x8e, 0x7e, 0xf1, 0x4f, 0x92, 0x63, 0x3c, 0xf5, 0x09, 0x38, 0xf9, 0x37, 0x57, 0x97, 0x27, 0xd4, + 0xed, 0xa1, 0xcb, 0x63, 0x8f, 0x25, 0xe3, 0x83, 0xac, 0xf9, 0xc6, 0x5b, 0xe9, 0xe3, 0x7e, 0x96, + 0x76, 0x99, 0xbb, 0x77, 0x90, 0x86, 0x9e, 0xc2, 0x5a, 0x8e, 0x54, 0x0e, 0xbd, 0xbb, 0x91, 0x37, + 0x2c, 0x74, 0x08, 0x35, 0x73, 0xc1, 0xa2, 0x96, 0x41, 0x3f, 0x61, 0x89, 0x37, 0xaf, 0x4c, 0xbd, + 0xd7, 0x71, 0x5c, 0x2c, 0xec, 0x45, 0x34, 0xc2, 0x31, 0xb6, 0x7d, 0x9b, 0xed, 0xe9, 0x04, 0xa9, + 0xcc, 0xbb, 0xef, 0xff, 0xfc, 0xaa, 0x65, 0xbd, 0x7c, 0xd5, 0xb2, 0x7e, 0x7f, 0xd5, 0xb2, 0xbe, + 0x7f, 0xdd, 0x9a, 0x79, 0xf9, 0xba, 0x35, 0xf3, 0xdb, 0xeb, 0xd6, 0xcc, 0x67, 0xcd, 0xe9, 0xff, + 0x91, 0x39, 0x2e, 0xcb, 0x9f, 0x9b, 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x8f, 0x50, 0x36, + 0xb6, 0x11, 0x00, 0x00, } func (m *HeadSyncRange) Marshal() (dAtA []byte, err error) { @@ -2046,6 +2571,11 @@ func (m *ObjectSyncMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ObjectType != 0 { + i = encodeVarintSpacesync(dAtA, i, uint64(m.ObjectType)) + i-- + dAtA[i] = 0x30 + } if len(m.ObjectId) > 0 { i -= len(m.ObjectId) copy(dAtA[i:], m.ObjectId) @@ -2516,6 +3046,43 @@ func (m *ObjectDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StoreHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StoreHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.StorageName) > 0 { + i -= len(m.StorageName) + copy(dAtA[i:], m.StorageName) + i = encodeVarintSpacesync(dAtA, i, uint64(len(m.StorageName))) + i-- + dAtA[i] = 0x12 + } + if len(m.SpaceId) > 0 { + i -= len(m.SpaceId) + copy(dAtA[i:], m.SpaceId) + i = encodeVarintSpacesync(dAtA, i, uint64(len(m.SpaceId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *SpaceDelete) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2814,6 +3381,282 @@ func (m *AclGetRecordsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StoreDiffRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreDiffRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StoreDiffRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSpacesync(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.SpaceId) > 0 { + i -= len(m.SpaceId) + copy(dAtA[i:], m.SpaceId) + i = encodeVarintSpacesync(dAtA, i, uint64(len(m.SpaceId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StoreDiffResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreDiffResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StoreDiffResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSpacesync(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StoreKeyValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreKeyValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StoreKeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SpaceId) > 0 { + i -= len(m.SpaceId) + copy(dAtA[i:], m.SpaceId) + i = encodeVarintSpacesync(dAtA, i, uint64(len(m.SpaceId))) + i-- + dAtA[i] = 0x2a + } + if len(m.PeerSignature) > 0 { + i -= len(m.PeerSignature) + copy(dAtA[i:], m.PeerSignature) + i = encodeVarintSpacesync(dAtA, i, uint64(len(m.PeerSignature))) + i-- + dAtA[i] = 0x22 + } + if len(m.IdentitySignature) > 0 { + i -= len(m.IdentitySignature) + copy(dAtA[i:], m.IdentitySignature) + i = encodeVarintSpacesync(dAtA, i, uint64(len(m.IdentitySignature))) + i-- + dAtA[i] = 0x1a + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintSpacesync(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.KeyPeerId) > 0 { + i -= len(m.KeyPeerId) + copy(dAtA[i:], m.KeyPeerId) + i = encodeVarintSpacesync(dAtA, i, uint64(len(m.KeyPeerId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StoreKeyValues) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreKeyValues) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StoreKeyValues) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.KeyValues) > 0 { + for iNdEx := len(m.KeyValues) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.KeyValues[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSpacesync(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StoreKeyInner) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreKeyInner) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StoreKeyInner) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintSpacesync(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x32 + } + if len(m.AclHeadId) > 0 { + i -= len(m.AclHeadId) + copy(dAtA[i:], m.AclHeadId) + i = encodeVarintSpacesync(dAtA, i, uint64(len(m.AclHeadId))) + i-- + dAtA[i] = 0x2a + } + if m.TimestampMicro != 0 { + i = encodeVarintSpacesync(dAtA, i, uint64(m.TimestampMicro)) + i-- + dAtA[i] = 0x20 + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintSpacesync(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x1a + } + if len(m.Identity) > 0 { + i -= len(m.Identity) + copy(dAtA[i:], m.Identity) + i = encodeVarintSpacesync(dAtA, i, uint64(len(m.Identity))) + i-- + dAtA[i] = 0x12 + } + if len(m.Peer) > 0 { + i -= len(m.Peer) + copy(dAtA[i:], m.Peer) + i = encodeVarintSpacesync(dAtA, i, uint64(len(m.Peer))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StorageHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.StorageName) > 0 { + i -= len(m.StorageName) + copy(dAtA[i:], m.StorageName) + i = encodeVarintSpacesync(dAtA, i, uint64(len(m.StorageName))) + i-- + dAtA[i] = 0x12 + } + if len(m.SpaceId) > 0 { + i -= len(m.SpaceId) + copy(dAtA[i:], m.SpaceId) + i = encodeVarintSpacesync(dAtA, i, uint64(len(m.SpaceId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintSpacesync(dAtA []byte, offset int, v uint64) int { offset -= sovSpacesync(v) base := offset @@ -2951,6 +3794,9 @@ func (m *ObjectSyncMessage) Size() (n int) { if l > 0 { n += 1 + l + sovSpacesync(uint64(l)) } + if m.ObjectType != 0 { + n += 1 + sovSpacesync(uint64(m.ObjectType)) + } return n } @@ -3149,6 +3995,23 @@ func (m *ObjectDelete) Size() (n int) { return n } +func (m *StoreHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SpaceId) + if l > 0 { + n += 1 + l + sovSpacesync(uint64(l)) + } + l = len(m.StorageName) + if l > 0 { + n += 1 + l + sovSpacesync(uint64(l)) + } + return n +} + func (m *SpaceDelete) Size() (n int) { if m == nil { return 0 @@ -3284,6 +4147,133 @@ func (m *AclGetRecordsResponse) Size() (n int) { return n } +func (m *StoreDiffRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SpaceId) + if l > 0 { + n += 1 + l + sovSpacesync(uint64(l)) + } + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovSpacesync(uint64(l)) + } + } + return n +} + +func (m *StoreDiffResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovSpacesync(uint64(l)) + } + } + return n +} + +func (m *StoreKeyValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.KeyPeerId) + if l > 0 { + n += 1 + l + sovSpacesync(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovSpacesync(uint64(l)) + } + l = len(m.IdentitySignature) + if l > 0 { + n += 1 + l + sovSpacesync(uint64(l)) + } + l = len(m.PeerSignature) + if l > 0 { + n += 1 + l + sovSpacesync(uint64(l)) + } + l = len(m.SpaceId) + if l > 0 { + n += 1 + l + sovSpacesync(uint64(l)) + } + return n +} + +func (m *StoreKeyValues) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.KeyValues) > 0 { + for _, e := range m.KeyValues { + l = e.Size() + n += 1 + l + sovSpacesync(uint64(l)) + } + } + return n +} + +func (m *StoreKeyInner) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Peer) + if l > 0 { + n += 1 + l + sovSpacesync(uint64(l)) + } + l = len(m.Identity) + if l > 0 { + n += 1 + l + sovSpacesync(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovSpacesync(uint64(l)) + } + if m.TimestampMicro != 0 { + n += 1 + sovSpacesync(uint64(m.TimestampMicro)) + } + l = len(m.AclHeadId) + if l > 0 { + n += 1 + l + sovSpacesync(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovSpacesync(uint64(l)) + } + return n +} + +func (m *StorageHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SpaceId) + if l > 0 { + n += 1 + l + sovSpacesync(uint64(l)) + } + l = len(m.StorageName) + if l > 0 { + n += 1 + l + sovSpacesync(uint64(l)) + } + return n +} + func sovSpacesync(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -4097,6 +5087,25 @@ func (m *ObjectSyncMessage) Unmarshal(dAtA []byte) error { } m.ObjectId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectType", wireType) + } + m.ObjectType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObjectType |= ObjectType(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipSpacesync(dAtA[iNdEx:]) @@ -5332,6 +6341,120 @@ func (m *ObjectDelete) Unmarshal(dAtA []byte) error { } return nil } +func (m *StoreHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpaceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpacesync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSpacesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpaceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpacesync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSpacesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpacesync(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSpacesync + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *SpaceDelete) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -6177,6 +7300,855 @@ func (m *AclGetRecordsResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *StoreDiffRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreDiffRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreDiffRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpaceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpacesync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSpacesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpaceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpacesync + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSpacesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, &HeadSyncRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpacesync(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSpacesync + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StoreDiffResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreDiffResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreDiffResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpacesync + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSpacesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, &HeadSyncResult{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpacesync(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSpacesync + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StoreKeyValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreKeyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreKeyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyPeerId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpacesync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSpacesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyPeerId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSpacesync + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSpacesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IdentitySignature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSpacesync + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSpacesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IdentitySignature = append(m.IdentitySignature[:0], dAtA[iNdEx:postIndex]...) + if m.IdentitySignature == nil { + m.IdentitySignature = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerSignature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSpacesync + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSpacesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerSignature = append(m.PeerSignature[:0], dAtA[iNdEx:postIndex]...) + if m.PeerSignature == nil { + m.PeerSignature = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpaceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpacesync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSpacesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpaceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpacesync(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSpacesync + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StoreKeyValues) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreKeyValues: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreKeyValues: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyValues", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpacesync + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSpacesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyValues = append(m.KeyValues, &StoreKeyValue{}) + if err := m.KeyValues[len(m.KeyValues)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpacesync(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSpacesync + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StoreKeyInner) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreKeyInner: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreKeyInner: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSpacesync + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSpacesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Peer = append(m.Peer[:0], dAtA[iNdEx:postIndex]...) + if m.Peer == nil { + m.Peer = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identity", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSpacesync + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSpacesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identity = append(m.Identity[:0], dAtA[iNdEx:postIndex]...) + if m.Identity == nil { + m.Identity = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSpacesync + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSpacesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampMicro", wireType) + } + m.TimestampMicro = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampMicro |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AclHeadId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpacesync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSpacesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AclHeadId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpacesync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSpacesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpacesync(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSpacesync + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpaceId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpacesync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSpacesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpaceId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpacesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpacesync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSpacesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpacesync(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSpacesync + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipSpacesync(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/commonspace/spacesyncproto/spacesync_drpc.pb.go b/commonspace/spacesyncproto/spacesync_drpc.pb.go index 7612fe36..fe93109b 100644 --- a/commonspace/spacesyncproto/spacesync_drpc.pb.go +++ b/commonspace/spacesyncproto/spacesync_drpc.pb.go @@ -41,6 +41,8 @@ type DRPCSpaceSyncClient interface { DRPCConn() drpc.Conn HeadSync(ctx context.Context, in *HeadSyncRequest) (*HeadSyncResponse, error) + StoreDiff(ctx context.Context, in *StoreDiffRequest) (*StoreDiffResponse, error) + StoreElements(ctx context.Context) (DRPCSpaceSync_StoreElementsClient, error) SpacePush(ctx context.Context, in *SpacePushRequest) (*SpacePushResponse, error) SpacePull(ctx context.Context, in *SpacePullRequest) (*SpacePullResponse, error) ObjectSyncStream(ctx context.Context) (DRPCSpaceSync_ObjectSyncStreamClient, error) @@ -69,6 +71,54 @@ func (c *drpcSpaceSyncClient) HeadSync(ctx context.Context, in *HeadSyncRequest) return out, nil } +func (c *drpcSpaceSyncClient) StoreDiff(ctx context.Context, in *StoreDiffRequest) (*StoreDiffResponse, error) { + out := new(StoreDiffResponse) + err := c.cc.Invoke(ctx, "/spacesync.SpaceSync/StoreDiff", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}, in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcSpaceSyncClient) StoreElements(ctx context.Context) (DRPCSpaceSync_StoreElementsClient, error) { + stream, err := c.cc.NewStream(ctx, "/spacesync.SpaceSync/StoreElements", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}) + if err != nil { + return nil, err + } + x := &drpcSpaceSync_StoreElementsClient{stream} + return x, nil +} + +type DRPCSpaceSync_StoreElementsClient interface { + drpc.Stream + Send(*StoreKeyValue) error + Recv() (*StoreKeyValue, error) +} + +type drpcSpaceSync_StoreElementsClient struct { + drpc.Stream +} + +func (x *drpcSpaceSync_StoreElementsClient) GetStream() drpc.Stream { + return x.Stream +} + +func (x *drpcSpaceSync_StoreElementsClient) Send(m *StoreKeyValue) error { + return x.MsgSend(m, drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}) +} + +func (x *drpcSpaceSync_StoreElementsClient) Recv() (*StoreKeyValue, error) { + m := new(StoreKeyValue) + if err := x.MsgRecv(m, drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}); err != nil { + return nil, err + } + return m, nil +} + +func (x *drpcSpaceSync_StoreElementsClient) RecvMsg(m *StoreKeyValue) error { + return x.MsgRecv(m, drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}) +} + func (c *drpcSpaceSyncClient) SpacePush(ctx context.Context, in *SpacePushRequest) (*SpacePushResponse, error) { out := new(SpacePushResponse) err := c.cc.Invoke(ctx, "/spacesync.SpaceSync/SpacePush", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}, in, out) @@ -195,6 +245,8 @@ func (c *drpcSpaceSyncClient) AclGetRecords(ctx context.Context, in *AclGetRecor type DRPCSpaceSyncServer interface { HeadSync(context.Context, *HeadSyncRequest) (*HeadSyncResponse, error) + StoreDiff(context.Context, *StoreDiffRequest) (*StoreDiffResponse, error) + StoreElements(DRPCSpaceSync_StoreElementsStream) error SpacePush(context.Context, *SpacePushRequest) (*SpacePushResponse, error) SpacePull(context.Context, *SpacePullRequest) (*SpacePullResponse, error) ObjectSyncStream(DRPCSpaceSync_ObjectSyncStreamStream) error @@ -210,6 +262,14 @@ func (s *DRPCSpaceSyncUnimplementedServer) HeadSync(context.Context, *HeadSyncRe return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented) } +func (s *DRPCSpaceSyncUnimplementedServer) StoreDiff(context.Context, *StoreDiffRequest) (*StoreDiffResponse, error) { + return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented) +} + +func (s *DRPCSpaceSyncUnimplementedServer) StoreElements(DRPCSpaceSync_StoreElementsStream) error { + return drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented) +} + func (s *DRPCSpaceSyncUnimplementedServer) SpacePush(context.Context, *SpacePushRequest) (*SpacePushResponse, error) { return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented) } @@ -240,7 +300,7 @@ func (s *DRPCSpaceSyncUnimplementedServer) AclGetRecords(context.Context, *AclGe type DRPCSpaceSyncDescription struct{} -func (DRPCSpaceSyncDescription) NumMethods() int { return 8 } +func (DRPCSpaceSyncDescription) NumMethods() int { return 10 } func (DRPCSpaceSyncDescription) Method(n int) (string, drpc.Encoding, drpc.Receiver, interface{}, bool) { switch n { @@ -254,6 +314,23 @@ func (DRPCSpaceSyncDescription) Method(n int) (string, drpc.Encoding, drpc.Recei ) }, DRPCSpaceSyncServer.HeadSync, true case 1: + return "/spacesync.SpaceSync/StoreDiff", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}, + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCSpaceSyncServer). + StoreDiff( + ctx, + in1.(*StoreDiffRequest), + ) + }, DRPCSpaceSyncServer.StoreDiff, true + case 2: + return "/spacesync.SpaceSync/StoreElements", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}, + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return nil, srv.(DRPCSpaceSyncServer). + StoreElements( + &drpcSpaceSync_StoreElementsStream{in1.(drpc.Stream)}, + ) + }, DRPCSpaceSyncServer.StoreElements, true + case 3: return "/spacesync.SpaceSync/SpacePush", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}, func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { return srv.(DRPCSpaceSyncServer). @@ -262,7 +339,7 @@ func (DRPCSpaceSyncDescription) Method(n int) (string, drpc.Encoding, drpc.Recei in1.(*SpacePushRequest), ) }, DRPCSpaceSyncServer.SpacePush, true - case 2: + case 4: return "/spacesync.SpaceSync/SpacePull", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}, func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { return srv.(DRPCSpaceSyncServer). @@ -271,7 +348,7 @@ func (DRPCSpaceSyncDescription) Method(n int) (string, drpc.Encoding, drpc.Recei in1.(*SpacePullRequest), ) }, DRPCSpaceSyncServer.SpacePull, true - case 3: + case 5: return "/spacesync.SpaceSync/ObjectSyncStream", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}, func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { return nil, srv.(DRPCSpaceSyncServer). @@ -279,7 +356,7 @@ func (DRPCSpaceSyncDescription) Method(n int) (string, drpc.Encoding, drpc.Recei &drpcSpaceSync_ObjectSyncStreamStream{in1.(drpc.Stream)}, ) }, DRPCSpaceSyncServer.ObjectSyncStream, true - case 4: + case 6: return "/spacesync.SpaceSync/ObjectSync", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}, func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { return srv.(DRPCSpaceSyncServer). @@ -288,7 +365,7 @@ func (DRPCSpaceSyncDescription) Method(n int) (string, drpc.Encoding, drpc.Recei in1.(*ObjectSyncMessage), ) }, DRPCSpaceSyncServer.ObjectSync, true - case 5: + case 7: return "/spacesync.SpaceSync/ObjectSyncRequestStream", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}, func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { return nil, srv.(DRPCSpaceSyncServer). @@ -297,7 +374,7 @@ func (DRPCSpaceSyncDescription) Method(n int) (string, drpc.Encoding, drpc.Recei &drpcSpaceSync_ObjectSyncRequestStreamStream{in2.(drpc.Stream)}, ) }, DRPCSpaceSyncServer.ObjectSyncRequestStream, true - case 6: + case 8: return "/spacesync.SpaceSync/AclAddRecord", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}, func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { return srv.(DRPCSpaceSyncServer). @@ -306,7 +383,7 @@ func (DRPCSpaceSyncDescription) Method(n int) (string, drpc.Encoding, drpc.Recei in1.(*AclAddRecordRequest), ) }, DRPCSpaceSyncServer.AclAddRecord, true - case 7: + case 9: return "/spacesync.SpaceSync/AclGetRecords", drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}, func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { return srv.(DRPCSpaceSyncServer). @@ -340,6 +417,48 @@ func (x *drpcSpaceSync_HeadSyncStream) SendAndClose(m *HeadSyncResponse) error { return x.CloseSend() } +type DRPCSpaceSync_StoreDiffStream interface { + drpc.Stream + SendAndClose(*StoreDiffResponse) error +} + +type drpcSpaceSync_StoreDiffStream struct { + drpc.Stream +} + +func (x *drpcSpaceSync_StoreDiffStream) SendAndClose(m *StoreDiffResponse) error { + if err := x.MsgSend(m, drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCSpaceSync_StoreElementsStream interface { + drpc.Stream + Send(*StoreKeyValue) error + Recv() (*StoreKeyValue, error) +} + +type drpcSpaceSync_StoreElementsStream struct { + drpc.Stream +} + +func (x *drpcSpaceSync_StoreElementsStream) Send(m *StoreKeyValue) error { + return x.MsgSend(m, drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}) +} + +func (x *drpcSpaceSync_StoreElementsStream) Recv() (*StoreKeyValue, error) { + m := new(StoreKeyValue) + if err := x.MsgRecv(m, drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}); err != nil { + return nil, err + } + return m, nil +} + +func (x *drpcSpaceSync_StoreElementsStream) RecvMsg(m *StoreKeyValue) error { + return x.MsgRecv(m, drpcEncoding_File_commonspace_spacesyncproto_protos_spacesync_proto{}) +} + type DRPCSpaceSync_SpacePushStream interface { drpc.Stream SendAndClose(*SpacePushResponse) error diff --git a/commonspace/spaceutils_test.go b/commonspace/spaceutils_test.go index 7e3f0b7e..2d5e97d5 100644 --- a/commonspace/spaceutils_test.go +++ b/commonspace/spaceutils_test.go @@ -2,12 +2,14 @@ package commonspace import ( "context" + "errors" "fmt" "math/rand" "sync" "testing" "time" + anystore "github.com/anyproto/any-store" "github.com/anyproto/go-chash" "github.com/stretchr/testify/require" "go.uber.org/zap" @@ -26,6 +28,7 @@ import ( "github.com/anyproto/any-sync/commonspace/object/treesyncer" "github.com/anyproto/any-sync/commonspace/objecttreebuilder" "github.com/anyproto/any-sync/commonspace/peermanager" + "github.com/anyproto/any-sync/commonspace/spacepayloads" "github.com/anyproto/any-sync/commonspace/spacestorage" "github.com/anyproto/any-sync/commonspace/spacesyncproto" "github.com/anyproto/any-sync/commonspace/sync/objectsync/objectmessages" @@ -751,7 +754,7 @@ func newMultiPeerFixture(t *testing.T, peerNum int) *multiPeerFixture { require.NoError(t, err) readKey := crypto.NewAES() meta := []byte("account") - payload := SpaceCreatePayload{ + payload := spacepayloads.SpaceCreatePayload{ SigningKey: keys.SignKey, SpaceType: "space", ReplicationKey: 10, @@ -761,7 +764,7 @@ func newMultiPeerFixture(t *testing.T, peerNum int) *multiPeerFixture { MetadataKey: metaKey, Metadata: meta, } - createSpace, err := StoragePayloadForSpaceCreate(payload) + createSpace, err := spacepayloads.StoragePayloadForSpaceCreate(payload) require.NoError(t, err) executor := list.NewExternalKeysAclExecutor(createSpace.SpaceHeaderWithId.Id, keys, meta, createSpace.AclWithId) cmds := []string{ @@ -802,6 +805,9 @@ func newMultiPeerFixture(t *testing.T, peerNum int) *multiPeerFixture { err := listStorage.AddAll(ctx, []list.StorageRecord{ {RawRecord: rec.Payload, Id: rec.Id, PrevId: prevRec, Order: i + 1, ChangeSize: len(rec.Payload)}, }) + if errors.Is(err, anystore.ErrDocExists) { + continue + } require.NoError(t, err) } } @@ -829,7 +835,7 @@ func Test_Sync(t *testing.T) { require.NoError(t, err) state, err := sp.Storage().StateStorage().GetState(context.Background()) require.NoError(t, err) - hashes = append(hashes, state.Hash) + hashes = append(hashes, state.NewHash) } for i := 1; i < len(hashes); i++ { require.Equal(t, hashes[0], hashes[i]) diff --git a/commonspace/sync/objectsync/objectmessages/headupdate.go b/commonspace/sync/objectsync/objectmessages/headupdate.go index 2d9c3e02..e8510f0e 100644 --- a/commonspace/sync/objectsync/objectmessages/headupdate.go +++ b/commonspace/sync/objectsync/objectmessages/headupdate.go @@ -39,6 +39,7 @@ type InnerHeadUpdate interface { Prepare() error Heads() []string MsgSize() uint64 + ObjectType() spacesyncproto.ObjectType } type ObjectMeta struct { @@ -48,10 +49,11 @@ type ObjectMeta struct { } type HeadUpdate struct { - Meta ObjectMeta - Bytes []byte - Update InnerHeadUpdate - msg *spacesyncproto.ObjectSyncMessage + Meta ObjectMeta + Bytes []byte + Update InnerHeadUpdate + objectType spacesyncproto.ObjectType + msg *spacesyncproto.ObjectSyncMessage } func (h *HeadUpdate) MsgSize() uint64 { @@ -84,6 +86,7 @@ func (h *HeadUpdate) SetProtoMessage(message proto.Message) error { h.Bytes = msg.GetPayload() h.Meta.SpaceId = msg.SpaceId h.Meta.ObjectId = msg.ObjectId + h.objectType = msg.GetObjectType() return nil } @@ -94,14 +97,19 @@ func (h *HeadUpdate) ProtoMessage() (proto.Message, error) { return nil, err } return &spacesyncproto.ObjectSyncMessage{ - SpaceId: h.Meta.SpaceId, - Payload: payload, - ObjectId: h.Meta.ObjectId, + SpaceId: h.Meta.SpaceId, + Payload: payload, + ObjectId: h.Meta.ObjectId, + ObjectType: h.Update.ObjectType(), }, nil } return NewMessage(), nil } +func (h *HeadUpdate) ObjectType() spacesyncproto.ObjectType { + return h.objectType +} + func (h *HeadUpdate) SpaceId() string { return h.Meta.SpaceId } @@ -116,9 +124,10 @@ func (h *HeadUpdate) ObjectId() string { func (h *HeadUpdate) Copy() drpc.Message { return &HeadUpdate{ - Meta: h.Meta, - Bytes: h.Bytes, - Update: h.Update, - msg: h.msg, + Meta: h.Meta, + Bytes: h.Bytes, + Update: h.Update, + msg: h.msg, + objectType: h.objectType, } } diff --git a/commonspace/sync/objectsync/objectsync_test.go b/commonspace/sync/objectsync/objectsync_test.go index eb104d5d..f3a61e38 100644 --- a/commonspace/sync/objectsync/objectsync_test.go +++ b/commonspace/sync/objectsync/objectsync_test.go @@ -10,6 +10,8 @@ import ( "go.uber.org/mock/gomock" "github.com/anyproto/any-sync/app" + "github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces" + "github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces/mock_kvinterfaces" "github.com/anyproto/any-sync/commonspace/object/tree/synctree" "github.com/anyproto/any-sync/commonspace/object/tree/treechangeproto" "github.com/anyproto/any-sync/commonspace/object/treemanager" @@ -159,6 +161,7 @@ func TestObjectSync_ApplyRequest(t *testing.T) { type fixture struct { *objectSync objectManager *mock_objectmanager.MockObjectManager + keyValue *mock_kvinterfaces.MockKeyValueService pool *mock_pool.MockService a *app.App ctrl *gomock.Controller @@ -171,13 +174,16 @@ func newFixture(t *testing.T) *fixture { fx.ctrl = gomock.NewController(t) fx.objectManager = mock_objectmanager.NewMockObjectManager(fx.ctrl) fx.pool = mock_pool.NewMockService(fx.ctrl) + fx.keyValue = mock_kvinterfaces.NewMockKeyValueService(fx.ctrl) anymock.ExpectComp(fx.objectManager.EXPECT(), treemanager.CName) anymock.ExpectComp(fx.pool.EXPECT(), pool.CName) + anymock.ExpectComp(fx.keyValue.EXPECT(), kvinterfaces.CName) fx.objectSync = &objectSync{} spaceState := &spacestate.SpaceState{SpaceId: "spaceId"} fx.a.Register(fx.objectManager). Register(spaceState). Register(fx.pool). + Register(fx.keyValue). Register(syncstatus.NewNoOpSyncStatus()). Register(fx.objectSync) require.NoError(t, fx.a.Start(context.Background())) diff --git a/commonspace/sync/objectsync/synchandler.go b/commonspace/sync/objectsync/synchandler.go index 382f355c..2cc9d984 100644 --- a/commonspace/sync/objectsync/synchandler.go +++ b/commonspace/sync/objectsync/synchandler.go @@ -11,6 +11,7 @@ import ( "github.com/anyproto/any-sync/app" "github.com/anyproto/any-sync/app/logger" + "github.com/anyproto/any-sync/commonspace/object/keyvalue/kvinterfaces" "github.com/anyproto/any-sync/commonspace/object/tree/synctree" "github.com/anyproto/any-sync/commonspace/object/tree/treechangeproto" "github.com/anyproto/any-sync/commonspace/object/treemanager" @@ -30,10 +31,11 @@ var ErrUnexpectedHeadUpdateType = errors.New("unexpected head update type") var log = logger.NewNamed(syncdeps.CName) type objectSync struct { - spaceId string - pool pool.Service - manager objectmanager.ObjectManager - status syncstatus.StatusUpdater + spaceId string + pool pool.Service + manager objectmanager.ObjectManager + status syncstatus.StatusUpdater + keyValue kvinterfaces.KeyValueService } func New() syncdeps.SyncHandler { @@ -43,6 +45,7 @@ func New() syncdeps.SyncHandler { func (o *objectSync) Init(a *app.App) (err error) { o.manager = a.MustComponent(treemanager.CName).(objectmanager.ObjectManager) o.pool = a.MustComponent(pool.CName).(pool.Service) + o.keyValue = a.MustComponent(kvinterfaces.CName).(kvinterfaces.KeyValueService) o.status = a.MustComponent(syncstatus.CName).(syncstatus.StatusUpdater) o.spaceId = a.MustComponent(spacestate.CName).(*spacestate.SpaceState).SpaceId return @@ -57,6 +60,9 @@ func (o *objectSync) HandleHeadUpdate(ctx context.Context, headUpdate drpc.Messa if !ok { return nil, ErrUnexpectedHeadUpdateType } + if update.ObjectType() == spacesyncproto.ObjectType_KeyValue { + return nil, o.keyValue.HandleMessage(ctx, update) + } peerId, err := peer.CtxPeerId(ctx) if err != nil { return nil, err diff --git a/commonspace/sync/sync_test.go b/commonspace/sync/sync_test.go index c1070c18..0a4d93c0 100644 --- a/commonspace/sync/sync_test.go +++ b/commonspace/sync/sync_test.go @@ -386,7 +386,12 @@ func (r *testRequest) MsgSize() uint64 { } type testMessage struct { - objectId string + objectId string + objectType spacesyncproto.ObjectType +} + +func (t *testMessage) ObjectType() spacesyncproto.ObjectType { + return t.objectType } func (t *testMessage) ObjectId() string { diff --git a/commonspace/sync/syncdeps/message.go b/commonspace/sync/syncdeps/message.go index c1844098..26a0fac3 100644 --- a/commonspace/sync/syncdeps/message.go +++ b/commonspace/sync/syncdeps/message.go @@ -1,6 +1,9 @@ package syncdeps +import "github.com/anyproto/any-sync/commonspace/spacesyncproto" + type Message interface { ObjectId() string MsgSize() uint64 + ObjectType() spacesyncproto.ObjectType } diff --git a/commonspace/sync/syncdeps/mock_syncdeps/mock_syncdeps.go b/commonspace/sync/syncdeps/mock_syncdeps/mock_syncdeps.go index 8bc15c81..50d1cfd9 100644 --- a/commonspace/sync/syncdeps/mock_syncdeps/mock_syncdeps.go +++ b/commonspace/sync/syncdeps/mock_syncdeps/mock_syncdeps.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_syncdeps/mock_syncdeps.go github.com/anyproto/any-sync/commonspace/sync/syncdeps ObjectSyncHandler,RequestSender,ResponseCollector // - // Package mock_syncdeps is a generated GoMock package. package mock_syncdeps diff --git a/commonspace/syncstatus/mock_syncstatus/mock_syncstatus.go b/commonspace/syncstatus/mock_syncstatus/mock_syncstatus.go index 622ac41e..ba3b328f 100644 --- a/commonspace/syncstatus/mock_syncstatus/mock_syncstatus.go +++ b/commonspace/syncstatus/mock_syncstatus/mock_syncstatus.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_syncstatus/mock_syncstatus.go github.com/anyproto/any-sync/commonspace/syncstatus StatusUpdater // - // Package mock_syncstatus is a generated GoMock package. package mock_syncstatus diff --git a/consensus/consensusclient/mock_consensusclient/mock_consensusclient.go b/consensus/consensusclient/mock_consensusclient/mock_consensusclient.go index ff132095..c3bf7a3c 100644 --- a/consensus/consensusclient/mock_consensusclient/mock_consensusclient.go +++ b/consensus/consensusclient/mock_consensusclient/mock_consensusclient.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_consensusclient/mock_consensusclient.go github.com/anyproto/any-sync/consensus/consensusclient Service // - // Package mock_consensusclient is a generated GoMock package. package mock_consensusclient diff --git a/go.mod b/go.mod index e7a57cd4..5df18009 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ toolchain go1.23.5 require ( filippo.io/edwards25519 v1.1.0 - github.com/anyproto/any-store v0.1.8 + github.com/anyproto/any-store v0.1.11 github.com/anyproto/go-chash v0.1.0 github.com/anyproto/go-slip10 v1.0.0 github.com/anyproto/go-slip21 v1.0.0 @@ -21,7 +21,7 @@ require ( github.com/google/uuid v1.6.0 github.com/hashicorp/yamux v0.1.2 github.com/huandu/skiplist v1.2.1 - github.com/ipfs/boxo v0.29.0 + github.com/ipfs/boxo v0.29.1 github.com/ipfs/go-block-format v0.2.0 github.com/ipfs/go-cid v0.5.0 github.com/ipfs/go-ipld-format v0.6.0 @@ -30,7 +30,7 @@ require ( github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multihash v0.2.3 github.com/prometheus/client_golang v1.21.1 - github.com/quic-go/quic-go v0.50.0 + github.com/quic-go/quic-go v0.50.1 github.com/stretchr/testify v1.10.0 github.com/tyler-smith/go-bip39 v1.1.0 github.com/zeebo/blake3 v0.2.4 @@ -38,7 +38,7 @@ require ( go.uber.org/mock v0.5.0 go.uber.org/zap v1.27.0 golang.org/x/crypto v0.36.0 - golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa + golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 golang.org/x/net v0.37.0 golang.org/x/sys v0.31.0 golang.org/x/time v0.11.0 @@ -116,6 +116,6 @@ require ( modernc.org/libc v1.61.13 // indirect modernc.org/mathutil v1.7.1 // indirect modernc.org/memory v1.8.2 // indirect - modernc.org/sqlite v1.36.0 // indirect + modernc.org/sqlite v1.36.1 // indirect zombiezen.com/go/sqlite v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 223e404f..17d68734 100644 --- a/go.sum +++ b/go.sum @@ -6,8 +6,8 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= -github.com/anyproto/any-store v0.1.8 h1:/bxUVq6sBTwYkmPL2g1xUAWNb3axF+zPhP2dvdEBH68= -github.com/anyproto/any-store v0.1.8/go.mod h1:GpnVhcGm5aUQtOwCnKeTt4jsWgVXZ773WbQVLFdeCFo= +github.com/anyproto/any-store v0.1.11 h1:xoaDVF8FJEI6V37fMw/R3ptBCLHj0kYiImwWxC1Ryu8= +github.com/anyproto/any-store v0.1.11/go.mod h1:X3UkQ2zLATYNED3gFhY2VcdfDOeJvpEQ0PmDO90A9Yo= github.com/anyproto/go-chash v0.1.0 h1:I9meTPjXFRfXZHRJzjOHC/XF7Q5vzysKkiT/grsogXY= github.com/anyproto/go-chash v0.1.0/go.mod h1:0UjNQi3PDazP0fINpFYu6VKhuna+W/V+1vpXHAfNgLY= github.com/anyproto/go-slip10 v1.0.0 h1:uAEtSuudR3jJBOfkOXf3bErxVoxbuKwdoJN55M1i6IA= @@ -77,8 +77,8 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gammazero/chanqueue v1.0.0 h1:FER/sMailGFA3DDvFooEkipAMU+3c9Bg3bheloPSz6o= -github.com/gammazero/chanqueue v1.0.0/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc= +github.com/gammazero/chanqueue v1.1.0 h1:yiwtloc1azhgGLFo2gMloJtQvkYD936Ai7tBfa+rYJw= +github.com/gammazero/chanqueue v1.1.0/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc= github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34= github.com/gammazero/deque v1.0.0/go.mod h1:iflpYvtGfM3U8S8j+sZEKIak3SAKYpA5/SQewgfXDKo= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -120,8 +120,8 @@ github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/boxo v0.29.0 h1:clzd7PglUcE+Ufq1KucS3aKID7pzGVaSgcdRsW395t4= -github.com/ipfs/boxo v0.29.0/go.mod h1:c3R52nMlgMsN1tADffYcogKoVRsX1RJE1TMYSpJ4uVs= +github.com/ipfs/boxo v0.29.1 h1:z61ZT4YDfTHLjXTsu/+3wvJ8aJlExthDSOCpx6Nh8xc= +github.com/ipfs/boxo v0.29.1/go.mod h1:MkDJStXiJS9U99cbAijHdcmwNfVn5DKYBmQCOgjY2NU= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= @@ -293,8 +293,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= -github.com/quic-go/quic-go v0.50.0 h1:3H/ld1pa3CYhkcc20TPIyG1bNsdhn9qZBGN3b9/UyUo= -github.com/quic-go/quic-go v0.50.0/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E= +github.com/quic-go/quic-go v0.50.1 h1:unsgjFIUqW8a2oopkY7YNONpV1gYND6Nt9hnt1PN94Q= +github.com/quic-go/quic-go v0.50.1/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E= github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg= github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= @@ -379,8 +379,8 @@ golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= -golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa h1:t2QcU6V556bFjYgu4L6C+6VrCPyJZ+eyRsABUPs1mz4= -golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.21.0 h1:c5qV36ajHpdj4Qi0GnE0jUc/yuo33OLFaa0d+crTD5s= golang.org/x/image v0.21.0/go.mod h1:vUbsLavqK/W303ZroQQVKQ+Af3Yl6Uz1Ppu5J/cLz78= @@ -475,8 +475,8 @@ modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= -modernc.org/sqlite v1.36.0 h1:EQXNRn4nIS+gfsKeUTymHIz1waxuv5BzU7558dHSfH8= -modernc.org/sqlite v1.36.0/go.mod h1:7MPwH7Z6bREicF9ZVUR78P1IKuxfZ8mRIDHD0iD+8TU= +modernc.org/sqlite v1.36.1 h1:bDa8BJUH4lg6EGkLbahKe/8QqoF8p9gArSc6fTqYhyQ= +modernc.org/sqlite v1.36.1/go.mod h1:7MPwH7Z6bREicF9ZVUR78P1IKuxfZ8mRIDHD0iD+8TU= modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= diff --git a/nameservice/nameserviceclient/mock/mock_nameserviceclient.go b/nameservice/nameserviceclient/mock/mock_nameserviceclient.go index 82990e01..00f41f3a 100644 --- a/nameservice/nameserviceclient/mock/mock_nameserviceclient.go +++ b/nameservice/nameserviceclient/mock/mock_nameserviceclient.go @@ -5,7 +5,6 @@ // // mockgen -destination=mock/mock_nameserviceclient.go -package=mock_nameserviceclient github.com/anyproto/any-sync/nameservice/nameserviceclient AnyNsClientService // - // Package mock_nameserviceclient is a generated GoMock package. package mock_nameserviceclient diff --git a/net/peer/mock_peer/mock_peer.go b/net/peer/mock_peer/mock_peer.go index 96b1aaba..4ace14a8 100644 --- a/net/peer/mock_peer/mock_peer.go +++ b/net/peer/mock_peer/mock_peer.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_peer/mock_peer.go github.com/anyproto/any-sync/net/peer Peer // - // Package mock_peer is a generated GoMock package. package mock_peer diff --git a/net/peer/peer.go b/net/peer/peer.go index ea579a3c..916dc713 100644 --- a/net/peer/peer.go +++ b/net/peer/peer.go @@ -159,10 +159,10 @@ func (p *peer) AcquireDrpcConn(ctx context.Context) (drpc.Conn, error) { } func (p *peer) ReleaseDrpcConn(conn drpc.Conn) { - // do nothing if it's closed connection + var closed bool select { case <-conn.Closed(): - return + closed = true default: } @@ -183,7 +183,9 @@ func (p *peer) ReleaseDrpcConn(conn drpc.Conn) { if _, ok = p.active[sc]; ok { delete(p.active, sc) } - p.inactive = append(p.inactive, sc) + if !closed { + p.inactive = append(p.inactive, sc) + } return } diff --git a/net/pool/mock_pool/mock_pool.go b/net/pool/mock_pool/mock_pool.go index 6490a959..2a1b8f99 100644 --- a/net/pool/mock_pool/mock_pool.go +++ b/net/pool/mock_pool/mock_pool.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_pool/mock_pool.go github.com/anyproto/any-sync/net/pool Pool,Service // - // Package mock_pool is a generated GoMock package. package mock_pool diff --git a/net/rpc/limiter/mock_limiter/mock_limiter.go b/net/rpc/limiter/mock_limiter/mock_limiter.go index 99e2ff8c..78b9cc52 100644 --- a/net/rpc/limiter/mock_limiter/mock_limiter.go +++ b/net/rpc/limiter/mock_limiter/mock_limiter.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_limiter/mock_limiter.go github.com/anyproto/any-sync/net/rpc/limiter RpcLimiter // - // Package mock_limiter is a generated GoMock package. package mock_limiter diff --git a/net/secureservice/secureservice.go b/net/secureservice/secureservice.go index 046434ab..3f38e5da 100644 --- a/net/secureservice/secureservice.go +++ b/net/secureservice/secureservice.go @@ -30,12 +30,14 @@ var ( // ProtoVersion 3 - acl with breaking changes / multiplayer // ProtoVersion 4 - new sync compatible version // ProtoVersion 5 - sync with no entry space + // ProtoVersion 6 - sync with key value messages CompatibleVersion = uint32(4) ProtoVersion = uint32(5) + KeyValueVersion = uint32(6) ) var ( - compatibleVersions = []uint32{CompatibleVersion, ProtoVersion} + compatibleVersions = []uint32{CompatibleVersion, ProtoVersion, KeyValueVersion} ) func New() SecureService { diff --git a/net/streampool/streampool.go b/net/streampool/streampool.go index 48345bcb..62a0b8a7 100644 --- a/net/streampool/streampool.go +++ b/net/streampool/streampool.go @@ -235,8 +235,6 @@ func (s *streamPool) Send(ctx context.Context, msg drpc.Message, peerGetter Peer for _, p := range peers { if e := s.sendOne(ctx, p, msg); e != nil { log.InfoCtx(ctx, "send peer error", zap.Error(e), zap.String("peerId", p.Id())) - } else { - log.DebugCtx(ctx, "send success", zap.String("peerId", p.Id())) } } }) @@ -261,7 +259,6 @@ func (s *streamPool) SendById(ctx context.Context, msg drpc.Message, peerIds ... if e := st.write(msg); e != nil { st.l.Debug("sendById write error", zap.Error(e)) } else { - st.l.DebugCtx(ctx, "sendById success") return } } @@ -284,8 +281,6 @@ func (s *streamPool) sendOne(ctx context.Context, p peer.Peer, msg drpc.Message) // continue with next stream continue } else { - st.l.DebugCtx(ctx, "sendOne success") - // stop sending on success break } } diff --git a/net/transport/mock_transport/mock_transport.go b/net/transport/mock_transport/mock_transport.go index d263cd3e..f416394f 100644 --- a/net/transport/mock_transport/mock_transport.go +++ b/net/transport/mock_transport/mock_transport.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_transport/mock_transport.go github.com/anyproto/any-sync/net/transport Transport,MultiConn // - // Package mock_transport is a generated GoMock package. package mock_transport diff --git a/net/transport/quic/conn.go b/net/transport/quic/conn.go index a160dbf3..8563c74c 100644 --- a/net/transport/quic/conn.go +++ b/net/transport/quic/conn.go @@ -4,6 +4,7 @@ import ( "context" "errors" "net" + "time" "github.com/quic-go/quic-go" @@ -11,16 +12,18 @@ import ( "github.com/anyproto/any-sync/net/transport" ) -func newConn(cctx context.Context, qconn quic.Connection) transport.MultiConn { +func newConn(cctx context.Context, qconn quic.Connection, writeTimeout time.Duration) transport.MultiConn { cctx = peer.CtxWithPeerAddr(cctx, transport.Quic+"://"+qconn.RemoteAddr().String()) return &quicMultiConn{ - cctx: cctx, - Connection: qconn, + cctx: cctx, + Connection: qconn, + writeTimeout: writeTimeout, } } type quicMultiConn struct { - cctx context.Context + cctx context.Context + writeTimeout time.Duration quic.Connection } @@ -39,9 +42,10 @@ func (q *quicMultiConn) Accept() (conn net.Conn, err error) { return nil, err } return quicNetConn{ - Stream: stream, - localAddr: q.LocalAddr(), - remoteAddr: q.RemoteAddr(), + Stream: stream, + localAddr: q.LocalAddr(), + remoteAddr: q.RemoteAddr(), + writeTimeout: q.writeTimeout, }, nil } @@ -84,6 +88,7 @@ const ( type quicNetConn struct { quic.Stream + writeTimeout time.Duration localAddr, remoteAddr net.Addr } @@ -98,6 +103,15 @@ func (q quicNetConn) Close() error { return q.Stream.Close() } +func (q quicNetConn) Write(b []byte) (n int, err error) { + if q.writeTimeout > 0 { + if err = q.Stream.SetWriteDeadline(time.Now().Add(q.writeTimeout)); err != nil { + return + } + } + return q.Stream.Write(b) +} + func (q quicNetConn) LocalAddr() net.Addr { return q.localAddr } diff --git a/net/transport/quic/quic.go b/net/transport/quic/quic.go index dbd712ca..27c7b786 100644 --- a/net/transport/quic/quic.go +++ b/net/transport/quic/quic.go @@ -147,7 +147,7 @@ func (q *quicTransport) Dial(ctx context.Context, addr string) (mc transport.Mul return nil, err } - return newConn(cctx, qConn), nil + return newConn(cctx, qConn, time.Second*time.Duration(q.conf.WriteTimeoutSec)), nil } func (q *quicTransport) acceptLoop(ctx context.Context, list *quic.Listener) { @@ -199,7 +199,7 @@ func (q *quicTransport) accept(conn quic.Connection) (err error) { }() return } - mc := newConn(cctx, conn) + mc := newConn(cctx, conn, time.Second*time.Duration(q.conf.WriteTimeoutSec)) return q.accepter.Accept(mc) } diff --git a/node/nodeclient/mock_nodeclient/mock_nodeclient.go b/node/nodeclient/mock_nodeclient/mock_nodeclient.go index dc8dea45..b908059f 100644 --- a/node/nodeclient/mock_nodeclient/mock_nodeclient.go +++ b/node/nodeclient/mock_nodeclient/mock_nodeclient.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_nodeclient/mock_nodeclient.go github.com/anyproto/any-sync/node/nodeclient NodeClient // - // Package mock_nodeclient is a generated GoMock package. package mock_nodeclient diff --git a/nodeconf/mock_nodeconf/mock_nodeconf.go b/nodeconf/mock_nodeconf/mock_nodeconf.go index 6bfdfaa8..2f6b26a4 100644 --- a/nodeconf/mock_nodeconf/mock_nodeconf.go +++ b/nodeconf/mock_nodeconf/mock_nodeconf.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_nodeconf/mock_nodeconf.go github.com/anyproto/any-sync/nodeconf Service // - // Package mock_nodeconf is a generated GoMock package. package mock_nodeconf diff --git a/paymentservice/paymentserviceclient/mock/mock_paymentserviceclient.go b/paymentservice/paymentserviceclient/mock/mock_paymentserviceclient.go index 9e97e1f4..2716f81b 100644 --- a/paymentservice/paymentserviceclient/mock/mock_paymentserviceclient.go +++ b/paymentservice/paymentserviceclient/mock/mock_paymentserviceclient.go @@ -5,7 +5,6 @@ // // mockgen -destination=mock/mock_paymentserviceclient.go -package=mock_paymentserviceclient github.com/anyproto/any-sync/paymentservice/paymentserviceclient AnyPpClientService // - // Package mock_paymentserviceclient is a generated GoMock package. package mock_paymentserviceclient diff --git a/paymentservice/paymentserviceclient/paymentserviceclient.go b/paymentservice/paymentserviceclient/paymentserviceclient.go index d70842c8..0fd39ede 100644 --- a/paymentservice/paymentserviceclient/paymentserviceclient.go +++ b/paymentservice/paymentserviceclient/paymentserviceclient.go @@ -4,6 +4,7 @@ package paymentserviceclient import ( "context" "errors" + "github.com/anyproto/any-sync/net" "github.com/anyproto/any-sync/app" "github.com/anyproto/any-sync/app/logger" @@ -78,7 +79,9 @@ func (s *service) doClient(ctx context.Context, fn func(cl pp.DRPCAnyPaymentProc // please use "paymentProcessingNode" type of node in the config (in the network.nodes array) peer, err := s.pool.GetOneOf(ctx, s.nodeconf.PaymentProcessingNodePeers()) if err != nil { - log.Error("failed to get a paymentnode peer", zap.Error(err)) + if !errors.Is(err, net.ErrUnableToConnect) { + log.Error("failed to get a paymentnode peer", zap.Error(err)) + } return err } diff --git a/util/crypto/derived.go b/util/crypto/derived.go index 0318b17a..86f64d0d 100644 --- a/util/crypto/derived.go +++ b/util/crypto/derived.go @@ -5,8 +5,9 @@ import ( ) const ( - AnysyncSpacePath = "m/SLIP-0021/anysync/space" - AnysyncTreePath = "m/SLIP-0021/anysync/tree/%s" + AnysyncSpacePath = "m/SLIP-0021/anysync/space" + AnysyncTreePath = "m/SLIP-0021/anysync/tree/%s" + AnysyncKeyValuePath = "m/SLIP-0021/anysync/keyvalue/%s" ) // DeriveSymmetricKey derives a symmetric key from seed and path using slip-21 diff --git a/util/periodicsync/mock_periodicsync/mock_periodicsync.go b/util/periodicsync/mock_periodicsync/mock_periodicsync.go index 78d858b5..4039cda8 100644 --- a/util/periodicsync/mock_periodicsync/mock_periodicsync.go +++ b/util/periodicsync/mock_periodicsync/mock_periodicsync.go @@ -5,7 +5,6 @@ // // mockgen -destination mock_periodicsync/mock_periodicsync.go github.com/anyproto/any-sync/util/periodicsync PeriodicSync // - // Package mock_periodicsync is a generated GoMock package. package mock_periodicsync