1
0
Fork 0
mirror of https://github.com/anyproto/anytype-heart.git synced 2025-06-08 05:47:07 +09:00

Fix file migration: move files migrated in non-personal space

This commit is contained in:
Sergey 2024-08-14 14:23:37 +02:00
parent aa9bc32392
commit 0ff4ae7863
No known key found for this signature in database
GPG key ID: 3B6BEF79160221C6
4 changed files with 81 additions and 3 deletions

View file

@ -86,12 +86,13 @@ func (f *File) Init(ctx *smartblock.InitContext) error {
f.SmartBlock.AddHook(f.reconciler.FileObjectHook(domain.FullID{SpaceID: f.SpaceID(), ObjectID: f.Id()}), smartblock.HookBeforeApply)
if !ctx.IsNewObject {
err = f.fileObjectService.EnsureFileAddedToSyncQueue(domain.FullID{ObjectID: f.Id(), SpaceID: f.SpaceID()}, ctx.State.Details())
fullId := domain.FullID{ObjectID: f.Id(), SpaceID: f.SpaceID()}
err = f.fileObjectService.EnsureFileAddedToSyncQueue(fullId, ctx.State.Details())
if err != nil {
log.Errorf("failed to ensure file added to sync queue: %v", err)
}
f.AddHook(func(applyInfo smartblock.ApplyInfo) error {
return f.fileObjectService.EnsureFileAddedToSyncQueue(domain.FullID{ObjectID: f.Id(), SpaceID: f.SpaceID()}, applyInfo.State.Details())
return f.fileObjectService.EnsureFileAddedToSyncQueue(fullId, applyInfo.State.Details())
}, smartblock.HookOnStateRebuild)
}
return nil

View file

@ -45,6 +45,7 @@ type Space interface {
GetTypeIdByKey(ctx context.Context, key domain.TypeKey) (id string, err error)
DeriveObjectID(ctx context.Context, uniqueKey domain.UniqueKey) (id string, err error)
StoredIds() []string
IsPersonal() bool
}
type Service interface {

View file

@ -23,6 +23,7 @@ import (
"github.com/anyproto/anytype-heart/core/files"
"github.com/anyproto/anytype-heart/core/files/fileoffloader"
"github.com/anyproto/anytype-heart/core/filestorage/filesync"
"github.com/anyproto/anytype-heart/core/session"
"github.com/anyproto/anytype-heart/core/syncstatus/filesyncstatus"
"github.com/anyproto/anytype-heart/pb"
"github.com/anyproto/anytype-heart/pkg/lib/bundle"
@ -80,6 +81,7 @@ type service struct {
objectStore objectstore.ObjectStore
spaceIdResolver idresolver.Resolver
migrationQueue *persistentqueue.Queue[*migrationItem]
objectArchiver objectArchiver
indexer *indexer
@ -116,6 +118,7 @@ func (s *service) Init(a *app.App) error {
s.fileStore = app.MustComponent[filestore.FileStore](a)
s.spaceIdResolver = app.MustComponent[idresolver.Resolver](a)
s.fileOffloader = app.MustComponent[fileoffloader.Service](a)
s.objectArchiver = app.MustComponent[objectArchiver](a)
cfg := app.MustComponent[configProvider](a)
s.indexer = s.newIndexer()
@ -141,7 +144,11 @@ func (s *service) Init(a *app.App) error {
func (s *service) Run(_ context.Context) error {
go func() {
err := s.ensureNotSyncedFilesAddedToQueue()
err := s.deleteMigratedFilesInNonPersonalSpaces(context.Background())
if err != nil {
log.Errorf("delete migrated files in non personal spaces: %v", err)
}
err = s.ensureNotSyncedFilesAddedToQueue()
if err != nil {
log.Errorf("ensure not synced files added to queue: %v", err)
}
@ -151,6 +158,46 @@ func (s *service) Run(_ context.Context) error {
return nil
}
type objectArchiver interface {
SetPagesIsArchived(ctx session.Context, req pb.RpcObjectListSetIsArchivedRequest) error
}
func (s *service) deleteMigratedFilesInNonPersonalSpaces(ctx context.Context) error {
personalSpace, err := s.spaceService.GetPersonalSpace(ctx)
if err != nil {
return err
}
objectIds, _, err := s.objectStore.QueryObjectIDs(database.Query{
Filters: []*model.BlockContentDataviewFilter{
{
RelationKey: bundle.RelationKeyFileId.String(),
Condition: model.BlockContentDataviewFilter_NotEmpty,
},
{
RelationKey: bundle.RelationKeyUniqueKey.String(),
Condition: model.BlockContentDataviewFilter_NotEmpty,
},
{
RelationKey: bundle.RelationKeySpaceId.String(),
Condition: model.BlockContentDataviewFilter_NotEqual,
Value: pbtypes.String(personalSpace.Id()),
},
},
})
if len(objectIds) > 0 {
err = s.objectArchiver.SetPagesIsArchived(nil, pb.RpcObjectListSetIsArchivedRequest{
ObjectIds: objectIds,
IsArchived: true,
})
if err != nil {
return err
}
}
return nil
}
// After migrating to new sync queue we need to ensure that all not synced files are added to the queue
func (s *service) ensureNotSyncedFilesAddedToQueue() error {
records, err := s.objectStore.Query(database.Query{

View file

@ -21,6 +21,35 @@ var (
addressPattern = regexp.MustCompile(`\+?0x[0-9a-z]*`)
)
func InlineCallStack() string {
// Allocate space for the call stack
var pcs [32]uintptr
// Skip 3 frames: runtime.Callers, printStack, and the function calling printStack
n := runtime.Callers(2, pcs[:])
// Get the stack frames
frames := runtime.CallersFrames(pcs[:n])
var sep string
buf := &strings.Builder{}
// Iterate through the frames and print them
for {
frame, more := frames.Next()
buf.WriteString(sep)
sep = " -> "
buf.WriteString(frame.Function)
buf.WriteString(" ")
buf.WriteString(frame.File)
buf.WriteString(":")
buf.WriteString(fmt.Sprintf("%d", frame.Line))
if !more {
break
}
}
return buf.String()
}
func ParseGoroutinesDump(trace string, pattern string) string {
var sb strings.Builder