File: uss_migrator.cc

package info (click to toggle)
chromium-browser 57.0.2987.98-1~deb8u1
  • links: PTS, VCS
  • area: main
  • in suites: jessie
  • size: 2,637,852 kB
  • ctags: 2,544,394
  • sloc: cpp: 12,815,961; ansic: 3,676,222; python: 1,147,112; asm: 526,608; java: 523,212; xml: 286,794; perl: 92,654; sh: 86,408; objc: 73,271; makefile: 27,698; cs: 18,487; yacc: 13,031; tcl: 12,957; pascal: 4,875; ml: 4,716; lex: 3,904; sql: 3,862; ruby: 1,982; lisp: 1,508; php: 1,368; exp: 404; awk: 325; csh: 117; jsp: 39; sed: 37
file content (120 lines) | stat: -rw-r--r-- 4,426 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "components/sync/engine_impl/uss_migrator.h"

#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>

#include "base/memory/ptr_util.h"
#include "components/sync/base/time.h"
#include "components/sync/engine_impl/model_type_worker.h"
#include "components/sync/protocol/sync.pb.h"
#include "components/sync/syncable/directory.h"
#include "components/sync/syncable/entry.h"
#include "components/sync/syncable/read_node.h"
#include "components/sync/syncable/read_transaction.h"
#include "components/sync/syncable/user_share.h"

namespace syncer {

namespace {

bool ExtractSyncEntity(ReadTransaction* trans,
                       int64_t id,
                       sync_pb::SyncEntity* entity) {
  ReadNode read_node(trans);
  if (read_node.InitByIdLookup(id) != BaseNode::INIT_OK)
    return false;

  const syncable::Entry& entry = *read_node.GetEntry();

  // Copy the fields USS cares about from the server side of the directory so
  // that we don't miss things that haven't been applied yet. See
  // ModelTypeWorker::ProcessGetUpdatesResponse for which fields are used.
  entity->set_id_string(entry.GetId().GetServerId());
  entity->set_version(entry.GetServerVersion());
  entity->set_mtime(TimeToProtoTime(entry.GetServerMtime()));
  entity->set_ctime(TimeToProtoTime(entry.GetServerCtime()));
  entity->set_name(entry.GetServerNonUniqueName());
  entity->set_deleted(entry.GetServerIsDel());
  entity->set_client_defined_unique_tag(entry.GetUniqueClientTag());

  // It looks like there are fancy other ways to get e.g. passwords specifics
  // out of Entry. Do we need to special-case them when we ship those types?
  entity->mutable_specifics()->CopyFrom(entry.GetServerSpecifics());
  return true;
}

}  // namespace

bool MigrateDirectoryData(ModelType type,
                          UserShare* user_share,
                          ModelTypeWorker* worker) {
  return MigrateDirectoryDataWithBatchSize(type, user_share, worker, 64);
}

bool MigrateDirectoryDataWithBatchSize(ModelType type,
                                       UserShare* user_share,
                                       ModelTypeWorker* worker,
                                       int batch_size) {
  DCHECK_NE(BOOKMARKS, type);
  DCHECK_NE(PASSWORDS, type);
  ReadTransaction trans(FROM_HERE, user_share);

  ReadNode root(&trans);
  if (root.InitTypeRoot(type) != BaseNode::INIT_OK) {
    LOG(ERROR) << "Missing root node for " << ModelTypeToString(type);
    // Inform the worker so it can trigger a fallback initial GetUpdates.
    worker->AbortMigration();
    return false;
  }

  // Get the progress marker and context from the directory.
  sync_pb::DataTypeProgressMarker progress;
  sync_pb::DataTypeContext context;
  user_share->directory->GetDownloadProgress(type, &progress);
  user_share->directory->GetDataTypeContext(trans.GetWrappedTrans(), type,
                                            &context);

  std::vector<int64_t> child_ids;
  root.GetChildIds(&child_ids);

  // Process |batch_size| entities at a time to reduce memory usage.
  size_t i = 0;
  while (i < child_ids.size()) {
    // Vector to own the temporary entities.
    std::vector<std::unique_ptr<sync_pb::SyncEntity>> entities;
    // Vector of raw pointers for passing to ProcessGetUpdatesResponse().
    SyncEntityList entity_ptrs;

    const size_t batch_limit = std::min(i + batch_size, child_ids.size());
    for (; i < batch_limit; i++) {
      auto entity = base::MakeUnique<sync_pb::SyncEntity>();
      if (!ExtractSyncEntity(&trans, child_ids[i], entity.get())) {
        LOG(ERROR) << "Failed to fetch child node for "
                   << ModelTypeToString(type);
        // Inform the worker so it can clear any partial data and trigger a
        // fallback initial GetUpdates.
        worker->AbortMigration();
        return false;
      }
      // Ignore tombstones; they are not included for initial GetUpdates.
      if (!entity->deleted()) {
        entity_ptrs.push_back(entity.get());
        entities.push_back(std::move(entity));
      }
    }

    worker->ProcessGetUpdatesResponse(progress, context, entity_ptrs, nullptr);
  }

  worker->PassiveApplyUpdates(nullptr);
  return true;
}

}  // namespace syncer