Revert of Catch Exception for Intent.parseUri instead of URISyntaxException (patchset...
[chromium-blink-merge.git] / sync / syncable / directory.cc
blob1232918f8f22dc93a34af37ab4d850d0c5807781
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sync/syncable/directory.h"
7 #include <algorithm>
8 #include <iterator>
10 #include "base/base64.h"
11 #include "base/metrics/histogram.h"
12 #include "base/stl_util.h"
13 #include "base/strings/string_number_conversions.h"
14 #include "base/trace_event/trace_event.h"
15 #include "sync/internal_api/public/base/attachment_id_proto.h"
16 #include "sync/internal_api/public/base/unique_position.h"
17 #include "sync/internal_api/public/util/unrecoverable_error_handler.h"
18 #include "sync/syncable/entry.h"
19 #include "sync/syncable/entry_kernel.h"
20 #include "sync/syncable/in_memory_directory_backing_store.h"
21 #include "sync/syncable/on_disk_directory_backing_store.h"
22 #include "sync/syncable/scoped_kernel_lock.h"
23 #include "sync/syncable/scoped_parent_child_index_updater.h"
24 #include "sync/syncable/syncable-inl.h"
25 #include "sync/syncable/syncable_base_transaction.h"
26 #include "sync/syncable/syncable_changes_version.h"
27 #include "sync/syncable/syncable_read_transaction.h"
28 #include "sync/syncable/syncable_util.h"
29 #include "sync/syncable/syncable_write_transaction.h"
31 using std::string;
33 namespace syncer {
34 namespace syncable {
36 // static
37 const base::FilePath::CharType Directory::kSyncDatabaseFilename[] =
38 FILE_PATH_LITERAL("SyncData.sqlite3");
40 Directory::PersistedKernelInfo::PersistedKernelInfo()
41 : next_id(0) {
42 ModelTypeSet protocol_types = ProtocolTypes();
43 for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
44 iter.Inc()) {
45 ResetDownloadProgress(iter.Get());
46 transaction_version[iter.Get()] = 0;
50 Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
52 void Directory::PersistedKernelInfo::ResetDownloadProgress(
53 ModelType model_type) {
54 // Clear everything except the data type id field.
55 download_progress[model_type].Clear();
56 download_progress[model_type].set_data_type_id(
57 GetSpecificsFieldNumberFromModelType(model_type));
59 // Explicitly set an empty token field to denote no progress.
60 download_progress[model_type].set_token("");
63 bool Directory::PersistedKernelInfo::HasEmptyDownloadProgress(
64 ModelType model_type) {
65 const sync_pb::DataTypeProgressMarker& progress_marker =
66 download_progress[model_type];
67 return progress_marker.token().empty();
70 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
71 : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
74 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
75 STLDeleteElements(&dirty_metas);
76 STLDeleteElements(&delete_journals);
79 bool Directory::SaveChangesSnapshot::HasUnsavedMetahandleChanges() const {
80 return !dirty_metas.empty() || !metahandles_to_purge.empty() ||
81 !delete_journals.empty() || !delete_journals_to_purge.empty();
84 Directory::Kernel::Kernel(
85 const std::string& name,
86 const KernelLoadInfo& info,
87 DirectoryChangeDelegate* delegate,
88 const WeakHandle<TransactionObserver>& transaction_observer)
89 : next_write_transaction_id(0),
90 name(name),
91 info_status(Directory::KERNEL_SHARE_INFO_VALID),
92 persisted_info(info.kernel_info),
93 cache_guid(info.cache_guid),
94 next_metahandle(info.max_metahandle + 1),
95 delegate(delegate),
96 transaction_observer(transaction_observer) {
97 DCHECK(delegate);
98 DCHECK(transaction_observer.IsInitialized());
101 Directory::Kernel::~Kernel() {
102 STLDeleteContainerPairSecondPointers(metahandles_map.begin(),
103 metahandles_map.end());
106 Directory::Directory(
107 DirectoryBackingStore* store,
108 UnrecoverableErrorHandler* unrecoverable_error_handler,
109 ReportUnrecoverableErrorFunction report_unrecoverable_error_function,
110 NigoriHandler* nigori_handler,
111 Cryptographer* cryptographer)
112 : kernel_(NULL),
113 store_(store),
114 unrecoverable_error_handler_(unrecoverable_error_handler),
115 report_unrecoverable_error_function_(report_unrecoverable_error_function),
116 unrecoverable_error_set_(false),
117 nigori_handler_(nigori_handler),
118 cryptographer_(cryptographer),
119 invariant_check_level_(VERIFY_CHANGES),
120 weak_ptr_factory_(this) {
123 Directory::~Directory() {
124 Close();
127 DirOpenResult Directory::Open(
128 const string& name,
129 DirectoryChangeDelegate* delegate,
130 const WeakHandle<TransactionObserver>& transaction_observer) {
131 TRACE_EVENT0("sync", "SyncDatabaseOpen");
133 const DirOpenResult result =
134 OpenImpl(name, delegate, transaction_observer);
136 if (OPENED != result)
137 Close();
138 return result;
141 void Directory::InitializeIndices(MetahandlesMap* handles_map) {
142 ScopedKernelLock lock(this);
143 kernel_->metahandles_map.swap(*handles_map);
144 for (MetahandlesMap::const_iterator it = kernel_->metahandles_map.begin();
145 it != kernel_->metahandles_map.end(); ++it) {
146 EntryKernel* entry = it->second;
147 if (ParentChildIndex::ShouldInclude(entry))
148 kernel_->parent_child_index.Insert(entry);
149 const int64 metahandle = entry->ref(META_HANDLE);
150 if (entry->ref(IS_UNSYNCED))
151 kernel_->unsynced_metahandles.insert(metahandle);
152 if (entry->ref(IS_UNAPPLIED_UPDATE)) {
153 const ModelType type = entry->GetServerModelType();
154 kernel_->unapplied_update_metahandles[type].insert(metahandle);
156 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
157 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
158 kernel_->server_tags_map.end())
159 << "Unexpected duplicate use of client tag";
160 kernel_->server_tags_map[entry->ref(UNIQUE_SERVER_TAG)] = entry;
162 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
163 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
164 kernel_->server_tags_map.end())
165 << "Unexpected duplicate use of server tag";
166 kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry;
168 DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) ==
169 kernel_->ids_map.end()) << "Unexpected duplicate use of ID";
170 kernel_->ids_map[entry->ref(ID).value()] = entry;
171 DCHECK(!entry->is_dirty());
172 AddToAttachmentIndex(lock, metahandle, entry->ref(ATTACHMENT_METADATA));
176 DirOpenResult Directory::OpenImpl(
177 const string& name,
178 DirectoryChangeDelegate* delegate,
179 const WeakHandle<TransactionObserver>&
180 transaction_observer) {
181 KernelLoadInfo info;
182 // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
183 // swap these later.
184 Directory::MetahandlesMap tmp_handles_map;
186 // Avoids mem leaks on failure. Harmlessly deletes the empty hash map after
187 // the swap in the success case.
188 STLValueDeleter<MetahandlesMap> deleter(&tmp_handles_map);
190 JournalIndex delete_journals;
191 MetahandleSet metahandles_to_purge;
193 DirOpenResult result = store_->Load(&tmp_handles_map, &delete_journals,
194 &metahandles_to_purge, &info);
195 if (OPENED != result)
196 return result;
198 DCHECK(!kernel_);
199 kernel_ = new Kernel(name, info, delegate, transaction_observer);
200 delete_journal_.reset(new DeleteJournal(&delete_journals));
201 InitializeIndices(&tmp_handles_map);
203 // Write back the share info to reserve some space in 'next_id'. This will
204 // prevent local ID reuse in the case of an early crash. See the comments in
205 // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information.
206 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
208 kernel_->metahandles_to_purge.swap(metahandles_to_purge);
209 if (!SaveChanges())
210 return FAILED_INITIAL_WRITE;
212 // Now that we've successfully opened the store, install an error handler to
213 // deal with catastrophic errors that may occur later on. Use a weak pointer
214 // because we cannot guarantee that this Directory will outlive the Closure.
215 store_->SetCatastrophicErrorHandler(base::Bind(
216 &Directory::OnCatastrophicError, weak_ptr_factory_.GetWeakPtr()));
218 return OPENED;
221 DeleteJournal* Directory::delete_journal() {
222 DCHECK(delete_journal_.get());
223 return delete_journal_.get();
226 void Directory::Close() {
227 store_.reset();
228 if (kernel_) {
229 delete kernel_;
230 kernel_ = NULL;
234 void Directory::OnUnrecoverableError(const BaseTransaction* trans,
235 const tracked_objects::Location& location,
236 const std::string & message) {
237 DCHECK(trans != NULL);
238 unrecoverable_error_set_ = true;
239 unrecoverable_error_handler_->OnUnrecoverableError(location,
240 message);
243 EntryKernel* Directory::GetEntryById(const Id& id) {
244 ScopedKernelLock lock(this);
245 return GetEntryById(lock, id);
248 EntryKernel* Directory::GetEntryById(const ScopedKernelLock& lock,
249 const Id& id) {
250 DCHECK(kernel_);
251 // Find it in the in memory ID index.
252 IdsMap::iterator id_found = kernel_->ids_map.find(id.value());
253 if (id_found != kernel_->ids_map.end()) {
254 return id_found->second;
256 return NULL;
259 EntryKernel* Directory::GetEntryByClientTag(const string& tag) {
260 ScopedKernelLock lock(this);
261 DCHECK(kernel_);
263 TagsMap::iterator it = kernel_->client_tags_map.find(tag);
264 if (it != kernel_->client_tags_map.end()) {
265 return it->second;
267 return NULL;
270 EntryKernel* Directory::GetEntryByServerTag(const string& tag) {
271 ScopedKernelLock lock(this);
272 DCHECK(kernel_);
273 TagsMap::iterator it = kernel_->server_tags_map.find(tag);
274 if (it != kernel_->server_tags_map.end()) {
275 return it->second;
277 return NULL;
280 EntryKernel* Directory::GetEntryByHandle(int64 metahandle) {
281 ScopedKernelLock lock(this);
282 return GetEntryByHandle(lock, metahandle);
285 EntryKernel* Directory::GetEntryByHandle(const ScopedKernelLock& lock,
286 int64 metahandle) {
287 // Look up in memory
288 MetahandlesMap::iterator found =
289 kernel_->metahandles_map.find(metahandle);
290 if (found != kernel_->metahandles_map.end()) {
291 // Found it in memory. Easy.
292 return found->second;
294 return NULL;
297 bool Directory::GetChildHandlesById(
298 BaseTransaction* trans, const Id& parent_id,
299 Directory::Metahandles* result) {
300 if (!SyncAssert(this == trans->directory(), FROM_HERE,
301 "Directories don't match", trans))
302 return false;
303 result->clear();
305 ScopedKernelLock lock(this);
306 AppendChildHandles(lock, parent_id, result);
307 return true;
310 int Directory::GetTotalNodeCount(
311 BaseTransaction* trans,
312 EntryKernel* kernel) const {
313 if (!SyncAssert(this == trans->directory(), FROM_HERE,
314 "Directories don't match", trans))
315 return false;
317 int count = 1;
318 std::deque<const OrderedChildSet*> child_sets;
320 GetChildSetForKernel(trans, kernel, &child_sets);
321 while (!child_sets.empty()) {
322 const OrderedChildSet* set = child_sets.front();
323 child_sets.pop_front();
324 for (OrderedChildSet::const_iterator it = set->begin();
325 it != set->end(); ++it) {
326 count++;
327 GetChildSetForKernel(trans, *it, &child_sets);
331 return count;
334 void Directory::GetChildSetForKernel(
335 BaseTransaction* trans,
336 EntryKernel* kernel,
337 std::deque<const OrderedChildSet*>* child_sets) const {
338 if (!kernel->ref(IS_DIR))
339 return; // Not a directory => no children.
341 const OrderedChildSet* descendants =
342 kernel_->parent_child_index.GetChildren(kernel->ref(ID));
343 if (!descendants)
344 return; // This directory has no children.
346 // Add our children to the list of items to be traversed.
347 child_sets->push_back(descendants);
350 int Directory::GetPositionIndex(
351 BaseTransaction* trans,
352 EntryKernel* kernel) const {
353 const OrderedChildSet* siblings =
354 kernel_->parent_child_index.GetSiblings(kernel);
356 OrderedChildSet::const_iterator it = siblings->find(kernel);
357 return std::distance(siblings->begin(), it);
360 bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) {
361 ScopedKernelLock lock(this);
362 return InsertEntry(lock, trans, entry);
365 bool Directory::InsertEntry(const ScopedKernelLock& lock,
366 BaseWriteTransaction* trans,
367 EntryKernel* entry) {
368 if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans))
369 return false;
371 static const char error[] = "Entry already in memory index.";
373 if (!SyncAssert(
374 kernel_->metahandles_map.insert(
375 std::make_pair(entry->ref(META_HANDLE), entry)).second,
376 FROM_HERE,
377 error,
378 trans)) {
379 return false;
381 if (!SyncAssert(
382 kernel_->ids_map.insert(
383 std::make_pair(entry->ref(ID).value(), entry)).second,
384 FROM_HERE,
385 error,
386 trans)) {
387 return false;
389 if (ParentChildIndex::ShouldInclude(entry)) {
390 if (!SyncAssert(kernel_->parent_child_index.Insert(entry),
391 FROM_HERE,
392 error,
393 trans)) {
394 return false;
397 AddToAttachmentIndex(
398 lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA));
400 // Should NEVER be created with a client tag or server tag.
401 if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE,
402 "Server tag should be empty", trans)) {
403 return false;
405 if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE,
406 "Client tag should be empty", trans))
407 return false;
409 return true;
412 bool Directory::ReindexId(BaseWriteTransaction* trans,
413 EntryKernel* const entry,
414 const Id& new_id) {
415 ScopedKernelLock lock(this);
416 if (NULL != GetEntryById(lock, new_id))
417 return false;
420 // Update the indices that depend on the ID field.
421 ScopedParentChildIndexUpdater updater_b(lock, entry,
422 &kernel_->parent_child_index);
423 size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
424 DCHECK_EQ(1U, num_erased);
425 entry->put(ID, new_id);
426 kernel_->ids_map[entry->ref(ID).value()] = entry;
428 return true;
431 bool Directory::ReindexParentId(BaseWriteTransaction* trans,
432 EntryKernel* const entry,
433 const Id& new_parent_id) {
434 ScopedKernelLock lock(this);
437 // Update the indices that depend on the PARENT_ID field.
438 ScopedParentChildIndexUpdater index_updater(lock, entry,
439 &kernel_->parent_child_index);
440 entry->put(PARENT_ID, new_parent_id);
442 return true;
445 void Directory::RemoveFromAttachmentIndex(
446 const ScopedKernelLock& lock,
447 const int64 metahandle,
448 const sync_pb::AttachmentMetadata& attachment_metadata) {
449 for (int i = 0; i < attachment_metadata.record_size(); ++i) {
450 AttachmentIdUniqueId unique_id =
451 attachment_metadata.record(i).id().unique_id();
452 IndexByAttachmentId::iterator iter =
453 kernel_->index_by_attachment_id.find(unique_id);
454 if (iter != kernel_->index_by_attachment_id.end()) {
455 iter->second.erase(metahandle);
456 if (iter->second.empty()) {
457 kernel_->index_by_attachment_id.erase(iter);
463 void Directory::AddToAttachmentIndex(
464 const ScopedKernelLock& lock,
465 const int64 metahandle,
466 const sync_pb::AttachmentMetadata& attachment_metadata) {
467 for (int i = 0; i < attachment_metadata.record_size(); ++i) {
468 AttachmentIdUniqueId unique_id =
469 attachment_metadata.record(i).id().unique_id();
470 IndexByAttachmentId::iterator iter =
471 kernel_->index_by_attachment_id.find(unique_id);
472 if (iter == kernel_->index_by_attachment_id.end()) {
473 iter = kernel_->index_by_attachment_id.insert(std::make_pair(
474 unique_id,
475 MetahandleSet())).first;
477 iter->second.insert(metahandle);
481 void Directory::UpdateAttachmentIndex(
482 const int64 metahandle,
483 const sync_pb::AttachmentMetadata& old_metadata,
484 const sync_pb::AttachmentMetadata& new_metadata) {
485 ScopedKernelLock lock(this);
486 RemoveFromAttachmentIndex(lock, metahandle, old_metadata);
487 AddToAttachmentIndex(lock, metahandle, new_metadata);
490 void Directory::GetMetahandlesByAttachmentId(
491 BaseTransaction* trans,
492 const sync_pb::AttachmentIdProto& attachment_id_proto,
493 Metahandles* result) {
494 DCHECK(result);
495 result->clear();
496 ScopedKernelLock lock(this);
497 IndexByAttachmentId::const_iterator index_iter =
498 kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id());
499 if (index_iter == kernel_->index_by_attachment_id.end())
500 return;
501 const MetahandleSet& metahandle_set = index_iter->second;
502 std::copy(
503 metahandle_set.begin(), metahandle_set.end(), back_inserter(*result));
506 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const {
507 DCHECK(trans != NULL);
508 return unrecoverable_error_set_;
511 void Directory::ClearDirtyMetahandles(const ScopedKernelLock& lock) {
512 kernel_->transaction_mutex.AssertAcquired();
513 kernel_->dirty_metahandles.clear();
516 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans,
517 const EntryKernel* const entry) const {
518 bool safe = entry->ref(IS_DEL) && !entry->is_dirty() &&
519 !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) &&
520 !entry->ref(IS_UNSYNCED);
522 if (safe) {
523 int64 handle = entry->ref(META_HANDLE);
524 const ModelType type = entry->GetServerModelType();
525 if (!SyncAssert(kernel_->dirty_metahandles.count(handle) == 0U,
526 FROM_HERE,
527 "Dirty metahandles should be empty", trans))
528 return false;
529 // TODO(tim): Bug 49278.
530 if (!SyncAssert(!kernel_->unsynced_metahandles.count(handle),
531 FROM_HERE,
532 "Unsynced handles should be empty",
533 trans))
534 return false;
535 if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle),
536 FROM_HERE,
537 "Unapplied metahandles should be empty",
538 trans))
539 return false;
542 return safe;
545 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
546 ReadTransaction trans(FROM_HERE, this);
547 ScopedKernelLock lock(this);
549 // If there is an unrecoverable error then just bail out.
550 if (unrecoverable_error_set(&trans))
551 return;
553 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
554 // clear dirty flags.
555 for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles.begin();
556 i != kernel_->dirty_metahandles.end(); ++i) {
557 EntryKernel* entry = GetEntryByHandle(lock, *i);
558 if (!entry)
559 continue;
560 // Skip over false positives; it happens relatively infrequently.
561 if (!entry->is_dirty())
562 continue;
563 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(),
564 new EntryKernel(*entry));
565 DCHECK_EQ(1U, kernel_->dirty_metahandles.count(*i));
566 // We don't bother removing from the index here as we blow the entire thing
567 // in a moment, and it unnecessarily complicates iteration.
568 entry->clear_dirty(NULL);
570 ClearDirtyMetahandles(lock);
572 // Set purged handles.
573 DCHECK(snapshot->metahandles_to_purge.empty());
574 snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge);
576 // Fill kernel_info_status and kernel_info.
577 snapshot->kernel_info = kernel_->persisted_info;
578 // To avoid duplicates when the process crashes, we record the next_id to be
579 // greater magnitude than could possibly be reached before the next save
580 // changes. In other words, it's effectively impossible for the user to
581 // generate 65536 new bookmarks in 3 seconds.
582 snapshot->kernel_info.next_id -= 65536;
583 snapshot->kernel_info_status = kernel_->info_status;
584 // This one we reset on failure.
585 kernel_->info_status = KERNEL_SHARE_INFO_VALID;
587 delete_journal_->TakeSnapshotAndClear(
588 &trans, &snapshot->delete_journals, &snapshot->delete_journals_to_purge);
591 bool Directory::SaveChanges() {
592 bool success = false;
594 base::AutoLock scoped_lock(kernel_->save_changes_mutex);
596 // Snapshot and save.
597 SaveChangesSnapshot snapshot;
598 TakeSnapshotForSaveChanges(&snapshot);
599 success = store_->SaveChanges(snapshot);
601 // Handle success or failure.
602 if (success)
603 success = VacuumAfterSaveChanges(snapshot);
604 else
605 HandleSaveChangesFailure(snapshot);
606 return success;
609 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
610 if (snapshot.dirty_metas.empty())
611 return true;
613 // Need a write transaction as we are about to permanently purge entries.
614 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
615 ScopedKernelLock lock(this);
616 // Now drop everything we can out of memory.
617 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
618 i != snapshot.dirty_metas.end(); ++i) {
619 MetahandlesMap::iterator found =
620 kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
621 EntryKernel* entry = (found == kernel_->metahandles_map.end() ?
622 NULL : found->second);
623 if (entry && SafeToPurgeFromMemory(&trans, entry)) {
624 // We now drop deleted metahandles that are up to date on both the client
625 // and the server.
626 size_t num_erased = 0;
627 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
628 DCHECK_EQ(1u, num_erased);
629 num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
630 DCHECK_EQ(1u, num_erased);
631 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
632 num_erased =
633 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
634 DCHECK_EQ(1u, num_erased);
636 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
637 num_erased =
638 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
639 DCHECK_EQ(1u, num_erased);
641 if (!SyncAssert(!kernel_->parent_child_index.Contains(entry),
642 FROM_HERE,
643 "Deleted entry still present",
644 (&trans)))
645 return false;
646 RemoveFromAttachmentIndex(
647 lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA));
649 delete entry;
651 if (trans.unrecoverable_error_set())
652 return false;
654 return true;
657 void Directory::UnapplyEntry(EntryKernel* entry) {
658 int64 handle = entry->ref(META_HANDLE);
659 ModelType server_type = GetModelTypeFromSpecifics(
660 entry->ref(SERVER_SPECIFICS));
662 // Clear enough so that on the next sync cycle all local data will
663 // be overwritten.
664 // Note: do not modify the root node in order to preserve the
665 // initial sync ended bit for this type (else on the next restart
666 // this type will be treated as disabled and therefore fully purged).
667 if (IsRealDataType(server_type) &&
668 ModelTypeToRootTag(server_type) == entry->ref(UNIQUE_SERVER_TAG)) {
669 return;
672 // Set the unapplied bit if this item has server data.
673 if (IsRealDataType(server_type) && !entry->ref(IS_UNAPPLIED_UPDATE)) {
674 entry->put(IS_UNAPPLIED_UPDATE, true);
675 kernel_->unapplied_update_metahandles[server_type].insert(handle);
676 entry->mark_dirty(&kernel_->dirty_metahandles);
679 // Unset the unsynced bit.
680 if (entry->ref(IS_UNSYNCED)) {
681 kernel_->unsynced_metahandles.erase(handle);
682 entry->put(IS_UNSYNCED, false);
683 entry->mark_dirty(&kernel_->dirty_metahandles);
686 // Mark the item as locally deleted. No deleted items are allowed in the
687 // parent child index.
688 if (!entry->ref(IS_DEL)) {
689 kernel_->parent_child_index.Remove(entry);
690 entry->put(IS_DEL, true);
691 entry->mark_dirty(&kernel_->dirty_metahandles);
694 // Set the version to the "newly created" version.
695 if (entry->ref(BASE_VERSION) != CHANGES_VERSION) {
696 entry->put(BASE_VERSION, CHANGES_VERSION);
697 entry->mark_dirty(&kernel_->dirty_metahandles);
700 // At this point locally created items that aren't synced will become locally
701 // deleted items, and purged on the next snapshot. All other items will match
702 // the state they would have had if they were just created via a server
703 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
706 void Directory::DeleteEntry(const ScopedKernelLock& lock,
707 bool save_to_journal,
708 EntryKernel* entry,
709 EntryKernelSet* entries_to_journal) {
710 int64 handle = entry->ref(META_HANDLE);
711 ModelType server_type = GetModelTypeFromSpecifics(
712 entry->ref(SERVER_SPECIFICS));
714 kernel_->metahandles_to_purge.insert(handle);
716 size_t num_erased = 0;
717 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
718 DCHECK_EQ(1u, num_erased);
719 num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
720 DCHECK_EQ(1u, num_erased);
721 num_erased = kernel_->unsynced_metahandles.erase(handle);
722 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0);
723 num_erased =
724 kernel_->unapplied_update_metahandles[server_type].erase(handle);
725 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
726 if (kernel_->parent_child_index.Contains(entry))
727 kernel_->parent_child_index.Remove(entry);
729 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
730 num_erased =
731 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
732 DCHECK_EQ(1u, num_erased);
734 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
735 num_erased =
736 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
737 DCHECK_EQ(1u, num_erased);
739 RemoveFromAttachmentIndex(lock, handle, entry->ref(ATTACHMENT_METADATA));
741 if (save_to_journal) {
742 entries_to_journal->insert(entry);
743 } else {
744 delete entry;
748 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types,
749 ModelTypeSet types_to_journal,
750 ModelTypeSet types_to_unapply) {
751 disabled_types.RemoveAll(ProxyTypes());
753 if (disabled_types.Empty())
754 return true;
757 WriteTransaction trans(FROM_HERE, PURGE_ENTRIES, this);
759 EntryKernelSet entries_to_journal;
760 STLElementDeleter<EntryKernelSet> journal_deleter(&entries_to_journal);
763 ScopedKernelLock lock(this);
765 bool found_progress = false;
766 for (ModelTypeSet::Iterator iter = disabled_types.First(); iter.Good();
767 iter.Inc()) {
768 if (!kernel_->persisted_info.HasEmptyDownloadProgress(iter.Get()))
769 found_progress = true;
772 // If none of the disabled types have progress markers, there's nothing to
773 // purge.
774 if (!found_progress)
775 return true;
777 // We iterate in two passes to avoid a bug in STLport (which is used in
778 // the Android build). There are some versions of that library where a
779 // hash_map's iterators can be invalidated when an item is erased from the
780 // hash_map.
781 // See http://sourceforge.net/p/stlport/bugs/239/.
783 std::set<EntryKernel*> to_purge;
784 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
785 it != kernel_->metahandles_map.end(); ++it) {
786 const sync_pb::EntitySpecifics& local_specifics =
787 it->second->ref(SPECIFICS);
788 const sync_pb::EntitySpecifics& server_specifics =
789 it->second->ref(SERVER_SPECIFICS);
790 ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
791 ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
793 if ((IsRealDataType(local_type) && disabled_types.Has(local_type)) ||
794 (IsRealDataType(server_type) && disabled_types.Has(server_type))) {
795 to_purge.insert(it->second);
799 for (std::set<EntryKernel*>::iterator it = to_purge.begin();
800 it != to_purge.end(); ++it) {
801 EntryKernel* entry = *it;
803 const sync_pb::EntitySpecifics& local_specifics =
804 (*it)->ref(SPECIFICS);
805 const sync_pb::EntitySpecifics& server_specifics =
806 (*it)->ref(SERVER_SPECIFICS);
807 ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
808 ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
810 if (types_to_unapply.Has(local_type) ||
811 types_to_unapply.Has(server_type)) {
812 UnapplyEntry(entry);
813 } else {
814 bool save_to_journal =
815 (types_to_journal.Has(local_type) ||
816 types_to_journal.Has(server_type)) &&
817 (delete_journal_->IsDeleteJournalEnabled(local_type) ||
818 delete_journal_->IsDeleteJournalEnabled(server_type));
819 DeleteEntry(lock, save_to_journal, entry, &entries_to_journal);
823 delete_journal_->AddJournalBatch(&trans, entries_to_journal);
825 // Ensure meta tracking for these data types reflects the purged state.
826 for (ModelTypeSet::Iterator it = disabled_types.First();
827 it.Good(); it.Inc()) {
828 kernel_->persisted_info.transaction_version[it.Get()] = 0;
830 // Don't discard progress markers or context for unapplied types.
831 if (!types_to_unapply.Has(it.Get())) {
832 kernel_->persisted_info.ResetDownloadProgress(it.Get());
833 kernel_->persisted_info.datatype_context[it.Get()].Clear();
837 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
840 return true;
843 bool Directory::ResetVersionsForType(BaseWriteTransaction* trans,
844 ModelType type) {
845 if (!ProtocolTypes().Has(type))
846 return false;
847 DCHECK_NE(type, BOOKMARKS) << "Only non-hierarchical types are supported";
849 EntryKernel* type_root = GetEntryByServerTag(ModelTypeToRootTag(type));
850 if (!type_root)
851 return false;
853 ScopedKernelLock lock(this);
854 const Id& type_root_id = type_root->ref(ID);
855 Directory::Metahandles children;
856 AppendChildHandles(lock, type_root_id, &children);
858 for (Metahandles::iterator it = children.begin(); it != children.end();
859 ++it) {
860 EntryKernel* entry = GetEntryByHandle(lock, *it);
861 if (!entry)
862 continue;
863 if (entry->ref(BASE_VERSION) > 1)
864 entry->put(BASE_VERSION, 1);
865 if (entry->ref(SERVER_VERSION) > 1)
866 entry->put(SERVER_VERSION, 1);
868 // Note that we do not unset IS_UNSYNCED or IS_UNAPPLIED_UPDATE in order
869 // to ensure no in-transit data is lost.
871 entry->mark_dirty(&kernel_->dirty_metahandles);
874 return true;
877 bool Directory::IsAttachmentLinked(
878 const sync_pb::AttachmentIdProto& attachment_id_proto) const {
879 ScopedKernelLock lock(this);
880 IndexByAttachmentId::const_iterator iter =
881 kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id());
882 if (iter != kernel_->index_by_attachment_id.end() && !iter->second.empty()) {
883 return true;
885 return false;
888 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
889 WriteTransaction trans(FROM_HERE, HANDLE_SAVE_FAILURE, this);
890 ScopedKernelLock lock(this);
891 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
893 // Because we optimistically cleared the dirty bit on the real entries when
894 // taking the snapshot, we must restore it on failure. Not doing this could
895 // cause lost data, if no other changes are made to the in-memory entries
896 // that would cause the dirty bit to get set again. Setting the bit ensures
897 // that SaveChanges will at least try again later.
898 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
899 i != snapshot.dirty_metas.end(); ++i) {
900 MetahandlesMap::iterator found =
901 kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
902 if (found != kernel_->metahandles_map.end()) {
903 found->second->mark_dirty(&kernel_->dirty_metahandles);
907 kernel_->metahandles_to_purge.insert(snapshot.metahandles_to_purge.begin(),
908 snapshot.metahandles_to_purge.end());
910 // Restore delete journals.
911 delete_journal_->AddJournalBatch(&trans, snapshot.delete_journals);
912 delete_journal_->PurgeDeleteJournals(&trans,
913 snapshot.delete_journals_to_purge);
916 void Directory::GetDownloadProgress(
917 ModelType model_type,
918 sync_pb::DataTypeProgressMarker* value_out) const {
919 ScopedKernelLock lock(this);
920 return value_out->CopyFrom(
921 kernel_->persisted_info.download_progress[model_type]);
924 void Directory::GetDownloadProgressAsString(
925 ModelType model_type,
926 std::string* value_out) const {
927 ScopedKernelLock lock(this);
928 kernel_->persisted_info.download_progress[model_type].SerializeToString(
929 value_out);
932 size_t Directory::GetEntriesCount() const {
933 ScopedKernelLock lock(this);
934 return kernel_->metahandles_map.size();
937 void Directory::SetDownloadProgress(
938 ModelType model_type,
939 const sync_pb::DataTypeProgressMarker& new_progress) {
940 ScopedKernelLock lock(this);
941 kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress);
942 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
945 bool Directory::HasEmptyDownloadProgress(ModelType type) const {
946 ScopedKernelLock lock(this);
947 return kernel_->persisted_info.HasEmptyDownloadProgress(type);
950 int64 Directory::GetTransactionVersion(ModelType type) const {
951 kernel_->transaction_mutex.AssertAcquired();
952 return kernel_->persisted_info.transaction_version[type];
955 void Directory::IncrementTransactionVersion(ModelType type) {
956 kernel_->transaction_mutex.AssertAcquired();
957 kernel_->persisted_info.transaction_version[type]++;
960 void Directory::GetDataTypeContext(BaseTransaction* trans,
961 ModelType type,
962 sync_pb::DataTypeContext* context) const {
963 ScopedKernelLock lock(this);
964 context->CopyFrom(kernel_->persisted_info.datatype_context[type]);
967 void Directory::SetDataTypeContext(
968 BaseWriteTransaction* trans,
969 ModelType type,
970 const sync_pb::DataTypeContext& context) {
971 ScopedKernelLock lock(this);
972 kernel_->persisted_info.datatype_context[type].CopyFrom(context);
973 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
976 // TODO(stanisc): crbug.com/438313: change these to not rely on the folders.
977 ModelTypeSet Directory::InitialSyncEndedTypes() {
978 syncable::ReadTransaction trans(FROM_HERE, this);
979 ModelTypeSet protocol_types = ProtocolTypes();
980 ModelTypeSet initial_sync_ended_types;
981 for (ModelTypeSet::Iterator i = protocol_types.First(); i.Good(); i.Inc()) {
982 if (InitialSyncEndedForType(&trans, i.Get())) {
983 initial_sync_ended_types.Put(i.Get());
986 return initial_sync_ended_types;
989 bool Directory::InitialSyncEndedForType(ModelType type) {
990 syncable::ReadTransaction trans(FROM_HERE, this);
991 return InitialSyncEndedForType(&trans, type);
994 bool Directory::InitialSyncEndedForType(
995 BaseTransaction* trans, ModelType type) {
996 // True iff the type's root node has been received and applied.
997 syncable::Entry entry(trans, syncable::GET_TYPE_ROOT, type);
998 return entry.good() && entry.GetBaseVersion() != CHANGES_VERSION;
1001 string Directory::store_birthday() const {
1002 ScopedKernelLock lock(this);
1003 return kernel_->persisted_info.store_birthday;
1006 void Directory::set_store_birthday(const string& store_birthday) {
1007 ScopedKernelLock lock(this);
1008 if (kernel_->persisted_info.store_birthday == store_birthday)
1009 return;
1010 kernel_->persisted_info.store_birthday = store_birthday;
1011 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1014 string Directory::bag_of_chips() const {
1015 ScopedKernelLock lock(this);
1016 return kernel_->persisted_info.bag_of_chips;
1019 void Directory::set_bag_of_chips(const string& bag_of_chips) {
1020 ScopedKernelLock lock(this);
1021 if (kernel_->persisted_info.bag_of_chips == bag_of_chips)
1022 return;
1023 kernel_->persisted_info.bag_of_chips = bag_of_chips;
1024 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1028 string Directory::cache_guid() const {
1029 // No need to lock since nothing ever writes to it after load.
1030 return kernel_->cache_guid;
1033 NigoriHandler* Directory::GetNigoriHandler() {
1034 return nigori_handler_;
1037 Cryptographer* Directory::GetCryptographer(const BaseTransaction* trans) {
1038 DCHECK_EQ(this, trans->directory());
1039 return cryptographer_;
1042 void Directory::GetAllMetaHandles(BaseTransaction* trans,
1043 MetahandleSet* result) {
1044 result->clear();
1045 ScopedKernelLock lock(this);
1046 for (MetahandlesMap::iterator i = kernel_->metahandles_map.begin();
1047 i != kernel_->metahandles_map.end(); ++i) {
1048 result->insert(i->first);
1052 void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
1053 Metahandles* result) {
1054 result->clear();
1055 ScopedKernelLock lock(this);
1056 copy(kernel_->unsynced_metahandles.begin(),
1057 kernel_->unsynced_metahandles.end(), back_inserter(*result));
1060 int64 Directory::unsynced_entity_count() const {
1061 ScopedKernelLock lock(this);
1062 return kernel_->unsynced_metahandles.size();
1065 bool Directory::TypeHasUnappliedUpdates(ModelType type) {
1066 ScopedKernelLock lock(this);
1067 return !kernel_->unapplied_update_metahandles[type].empty();
1070 void Directory::GetUnappliedUpdateMetaHandles(
1071 BaseTransaction* trans,
1072 FullModelTypeSet server_types,
1073 std::vector<int64>* result) {
1074 result->clear();
1075 ScopedKernelLock lock(this);
1076 for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
1077 const ModelType type = ModelTypeFromInt(i);
1078 if (server_types.Has(type)) {
1079 std::copy(kernel_->unapplied_update_metahandles[type].begin(),
1080 kernel_->unapplied_update_metahandles[type].end(),
1081 back_inserter(*result));
1086 void Directory::GetMetaHandlesOfType(BaseTransaction* trans,
1087 ModelType type,
1088 std::vector<int64>* result) {
1089 ScopedKernelLock lock(this);
1090 GetMetaHandlesOfType(lock, trans, type, result);
1093 void Directory::GetMetaHandlesOfType(const ScopedKernelLock& lock,
1094 BaseTransaction* trans,
1095 ModelType type,
1096 std::vector<int64>* result) {
1097 result->clear();
1098 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
1099 it != kernel_->metahandles_map.end(); ++it) {
1100 EntryKernel* entry = it->second;
1101 const ModelType entry_type =
1102 GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
1103 if (entry_type == type)
1104 result->push_back(it->first);
1108 void Directory::CollectMetaHandleCounts(
1109 std::vector<int>* num_entries_by_type,
1110 std::vector<int>* num_to_delete_entries_by_type) {
1111 syncable::ReadTransaction trans(FROM_HERE, this);
1112 ScopedKernelLock lock(this);
1114 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
1115 it != kernel_->metahandles_map.end(); ++it) {
1116 EntryKernel* entry = it->second;
1117 const ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
1118 (*num_entries_by_type)[type]++;
1119 if (entry->ref(IS_DEL))
1120 (*num_to_delete_entries_by_type)[type]++;
1124 scoped_ptr<base::ListValue> Directory::GetNodeDetailsForType(
1125 BaseTransaction* trans,
1126 ModelType type) {
1127 scoped_ptr<base::ListValue> nodes(new base::ListValue());
1129 ScopedKernelLock lock(this);
1130 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
1131 it != kernel_->metahandles_map.end(); ++it) {
1132 if (GetModelTypeFromSpecifics(it->second->ref(SPECIFICS)) != type) {
1133 continue;
1136 EntryKernel* kernel = it->second;
1137 scoped_ptr<base::DictionaryValue> node(
1138 kernel->ToValue(GetCryptographer(trans)));
1140 // Add the position index if appropriate. This must be done here (and not
1141 // in EntryKernel) because the EntryKernel does not have access to its
1142 // siblings.
1143 if (kernel->ShouldMaintainPosition() && !kernel->ref(IS_DEL)) {
1144 node->SetInteger("positionIndex", GetPositionIndex(trans, kernel));
1147 nodes->Append(node.release());
1150 return nodes.Pass();
1153 bool Directory::CheckInvariantsOnTransactionClose(
1154 syncable::BaseTransaction* trans,
1155 const MetahandleSet& modified_handles) {
1156 // NOTE: The trans may be in the process of being destructed. Be careful if
1157 // you wish to call any of its virtual methods.
1158 switch (invariant_check_level_) {
1159 case FULL_DB_VERIFICATION: {
1160 MetahandleSet all_handles;
1161 GetAllMetaHandles(trans, &all_handles);
1162 return CheckTreeInvariants(trans, all_handles);
1164 case VERIFY_CHANGES: {
1165 return CheckTreeInvariants(trans, modified_handles);
1167 case OFF: {
1168 return true;
1171 NOTREACHED();
1172 return false;
1175 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) {
1176 MetahandleSet handles;
1177 GetAllMetaHandles(trans, &handles);
1178 return CheckTreeInvariants(trans, handles);
1181 bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
1182 const MetahandleSet& handles) {
1183 MetahandleSet::const_iterator i;
1184 for (i = handles.begin() ; i != handles.end() ; ++i) {
1185 int64 metahandle = *i;
1186 Entry e(trans, GET_BY_HANDLE, metahandle);
1187 if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans))
1188 return false;
1189 syncable::Id id = e.GetId();
1190 syncable::Id parentid = e.GetParentId();
1192 if (id.IsRoot()) {
1193 if (!SyncAssert(e.GetIsDir(), FROM_HERE,
1194 "Entry should be a directory",
1195 trans))
1196 return false;
1197 if (!SyncAssert(parentid.IsRoot(), FROM_HERE,
1198 "Entry should be root",
1199 trans))
1200 return false;
1201 if (!SyncAssert(!e.GetIsUnsynced(), FROM_HERE, "Entry should be synced",
1202 trans))
1203 return false;
1204 continue;
1207 if (!e.GetIsDel()) {
1208 if (!SyncAssert(id != parentid, FROM_HERE,
1209 "Id should be different from parent id.",
1210 trans))
1211 return false;
1212 if (!SyncAssert(!e.GetNonUniqueName().empty(), FROM_HERE,
1213 "Non unique name should not be empty.",
1214 trans))
1215 return false;
1217 if (!parentid.IsNull()) {
1218 int safety_count = handles.size() + 1;
1219 while (!parentid.IsRoot()) {
1220 Entry parent(trans, GET_BY_ID, parentid);
1221 if (!SyncAssert(parent.good(), FROM_HERE,
1222 "Parent entry is not valid.", trans))
1223 return false;
1224 if (handles.end() == handles.find(parent.GetMetahandle()))
1225 break; // Skip further checking if parent was unmodified.
1226 if (!SyncAssert(parent.GetIsDir(), FROM_HERE,
1227 "Parent should be a directory", trans))
1228 return false;
1229 if (!SyncAssert(!parent.GetIsDel(), FROM_HERE,
1230 "Parent should not have been marked for deletion.",
1231 trans))
1232 return false;
1233 if (!SyncAssert(handles.end() != handles.find(parent.GetMetahandle()),
1234 FROM_HERE, "Parent should be in the index.", trans))
1235 return false;
1236 parentid = parent.GetParentId();
1237 if (!SyncAssert(--safety_count > 0, FROM_HERE,
1238 "Count should be greater than zero.", trans))
1239 return false;
1243 int64 base_version = e.GetBaseVersion();
1244 int64 server_version = e.GetServerVersion();
1245 bool using_unique_client_tag = !e.GetUniqueClientTag().empty();
1246 if (CHANGES_VERSION == base_version || 0 == base_version) {
1247 ModelType model_type = e.GetModelType();
1248 bool is_client_creatable_type_root_folder =
1249 parentid.IsRoot() &&
1250 IsTypeWithClientGeneratedRoot(model_type) &&
1251 e.GetUniqueServerTag() == ModelTypeToRootTag(model_type);
1252 if (e.GetIsUnappliedUpdate()) {
1253 // Must be a new item, or a de-duplicated unique client tag
1254 // that was created both locally and remotely, or a type root folder
1255 // that was created both locally and remotely.
1256 if (!(using_unique_client_tag ||
1257 is_client_creatable_type_root_folder)) {
1258 if (!SyncAssert(e.GetIsDel(), FROM_HERE,
1259 "The entry should have been deleted.", trans))
1260 return false;
1262 // It came from the server, so it must have a server ID.
1263 if (!SyncAssert(id.ServerKnows(), FROM_HERE,
1264 "The id should be from a server.",
1265 trans))
1266 return false;
1267 } else {
1268 if (e.GetIsDir()) {
1269 // TODO(chron): Implement this mode if clients ever need it.
1270 // For now, you can't combine a client tag and a directory.
1271 if (!SyncAssert(!using_unique_client_tag, FROM_HERE,
1272 "Directory cannot have a client tag.",
1273 trans))
1274 return false;
1276 if (is_client_creatable_type_root_folder) {
1277 // This must be a locally created type root folder.
1278 if (!SyncAssert(
1279 !e.GetIsUnsynced(), FROM_HERE,
1280 "Locally created type root folders should not be unsynced.",
1281 trans))
1282 return false;
1284 if (!SyncAssert(
1285 !e.GetIsDel(), FROM_HERE,
1286 "Locally created type root folders should not be deleted.",
1287 trans))
1288 return false;
1289 } else {
1290 // Should be an uncomitted item, or a successfully deleted one.
1291 if (!e.GetIsDel()) {
1292 if (!SyncAssert(e.GetIsUnsynced(), FROM_HERE,
1293 "The item should be unsynced.", trans))
1294 return false;
1297 // If the next check failed, it would imply that an item exists
1298 // on the server, isn't waiting for application locally, but either
1299 // is an unsynced create or a sucessful delete in the local copy.
1300 // Either way, that's a mismatch.
1301 if (!SyncAssert(0 == server_version, FROM_HERE,
1302 "Server version should be zero.",
1303 trans))
1304 return false;
1305 // Items that aren't using the unique client tag should have a zero
1306 // base version only if they have a local ID. Items with unique client
1307 // tags are allowed to use the zero base version for undeletion and
1308 // de-duplication; the unique client tag trumps the server ID.
1309 if (!using_unique_client_tag) {
1310 if (!SyncAssert(!id.ServerKnows(), FROM_HERE,
1311 "Should be a client only id.",
1312 trans))
1313 return false;
1316 } else {
1317 if (!SyncAssert(id.ServerKnows(),
1318 FROM_HERE,
1319 "Should be a server id.",
1320 trans))
1321 return false;
1324 // Previously we would assert that locally deleted items that have never
1325 // been synced must not be sent to the server (IS_UNSYNCED must be false).
1326 // This is not always true in the case that an item is deleted while the
1327 // initial commit is in flight. See crbug.com/426865.
1329 return true;
1332 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) {
1333 invariant_check_level_ = check_level;
1336 int64 Directory::NextMetahandle() {
1337 ScopedKernelLock lock(this);
1338 int64 metahandle = (kernel_->next_metahandle)++;
1339 return metahandle;
1342 // Always returns a client ID that is the string representation of a negative
1343 // number.
1344 Id Directory::NextId() {
1345 int64 result;
1347 ScopedKernelLock lock(this);
1348 result = (kernel_->persisted_info.next_id)--;
1349 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1351 DCHECK_LT(result, 0);
1352 return Id::CreateFromClientString(base::Int64ToString(result));
1355 bool Directory::HasChildren(BaseTransaction* trans, const Id& id) {
1356 ScopedKernelLock lock(this);
1357 return kernel_->parent_child_index.GetChildren(id) != NULL;
1360 Id Directory::GetFirstChildId(BaseTransaction* trans,
1361 const EntryKernel* parent) {
1362 DCHECK(parent);
1363 DCHECK(parent->ref(IS_DIR));
1365 ScopedKernelLock lock(this);
1366 const OrderedChildSet* children =
1367 kernel_->parent_child_index.GetChildren(parent->ref(ID));
1369 // We're expected to return root if there are no children.
1370 if (!children)
1371 return Id();
1373 return (*children->begin())->ref(ID);
1376 syncable::Id Directory::GetPredecessorId(EntryKernel* e) {
1377 ScopedKernelLock lock(this);
1379 DCHECK(ParentChildIndex::ShouldInclude(e));
1380 const OrderedChildSet* siblings = kernel_->parent_child_index.GetSiblings(e);
1381 OrderedChildSet::const_iterator i = siblings->find(e);
1382 DCHECK(i != siblings->end());
1384 if (i == siblings->begin()) {
1385 return Id();
1386 } else {
1387 i--;
1388 return (*i)->ref(ID);
1392 syncable::Id Directory::GetSuccessorId(EntryKernel* e) {
1393 ScopedKernelLock lock(this);
1395 DCHECK(ParentChildIndex::ShouldInclude(e));
1396 const OrderedChildSet* siblings = kernel_->parent_child_index.GetSiblings(e);
1397 OrderedChildSet::const_iterator i = siblings->find(e);
1398 DCHECK(i != siblings->end());
1400 i++;
1401 if (i == siblings->end()) {
1402 return Id();
1403 } else {
1404 return (*i)->ref(ID);
1408 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
1409 // items as siblings of items that do not maintain postions. It is required
1410 // only for tests. See crbug.com/178282.
1411 void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) {
1412 DCHECK(!e->ref(IS_DEL));
1413 if (!e->ShouldMaintainPosition()) {
1414 DCHECK(!e->ref(UNIQUE_POSITION).IsValid());
1415 return;
1417 std::string suffix = e->ref(UNIQUE_BOOKMARK_TAG);
1418 DCHECK(!suffix.empty());
1420 // Remove our item from the ParentChildIndex and remember to re-add it later.
1421 ScopedKernelLock lock(this);
1422 ScopedParentChildIndexUpdater updater(lock, e, &kernel_->parent_child_index);
1424 // Note: The ScopedParentChildIndexUpdater will update this set for us as we
1425 // leave this function.
1426 const OrderedChildSet* siblings =
1427 kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1429 if (!siblings) {
1430 // This parent currently has no other children.
1431 DCHECK(predecessor == NULL);
1432 UniquePosition pos = UniquePosition::InitialPosition(suffix);
1433 e->put(UNIQUE_POSITION, pos);
1434 return;
1437 if (predecessor == NULL) {
1438 // We have at least one sibling, and we're inserting to the left of them.
1439 UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION);
1441 UniquePosition pos;
1442 if (!successor_pos.IsValid()) {
1443 // If all our successors are of non-positionable types, just create an
1444 // initial position. We arbitrarily choose to sort invalid positions to
1445 // the right of the valid positions.
1447 // We really shouldn't need to support this. See TODO above.
1448 pos = UniquePosition::InitialPosition(suffix);
1449 } else {
1450 DCHECK(!siblings->empty());
1451 pos = UniquePosition::Before(successor_pos, suffix);
1454 e->put(UNIQUE_POSITION, pos);
1455 return;
1458 // We can't support placing an item after an invalid position. Fortunately,
1459 // the tests don't exercise this particular case. We should not support
1460 // siblings with invalid positions at all. See TODO above.
1461 DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid());
1463 OrderedChildSet::const_iterator neighbour = siblings->find(predecessor);
1464 DCHECK(neighbour != siblings->end());
1466 ++neighbour;
1467 if (neighbour == siblings->end()) {
1468 // Inserting at the end of the list.
1469 UniquePosition pos = UniquePosition::After(
1470 predecessor->ref(UNIQUE_POSITION),
1471 suffix);
1472 e->put(UNIQUE_POSITION, pos);
1473 return;
1476 EntryKernel* successor = *neighbour;
1478 // Another mixed valid and invalid position case. This one could be supported
1479 // in theory, but we're trying to deprecate support for siblings with and
1480 // without valid positions. See TODO above.
1481 DCHECK(successor->ref(UNIQUE_POSITION).IsValid());
1483 // Finally, the normal case: inserting between two elements.
1484 UniquePosition pos = UniquePosition::Between(
1485 predecessor->ref(UNIQUE_POSITION),
1486 successor->ref(UNIQUE_POSITION),
1487 suffix);
1488 e->put(UNIQUE_POSITION, pos);
1489 return;
1492 // TODO(rlarocque): Avoid this indirection. Just return the set.
1493 void Directory::AppendChildHandles(const ScopedKernelLock& lock,
1494 const Id& parent_id,
1495 Directory::Metahandles* result) {
1496 const OrderedChildSet* children =
1497 kernel_->parent_child_index.GetChildren(parent_id);
1498 if (!children)
1499 return;
1501 for (OrderedChildSet::const_iterator i = children->begin();
1502 i != children->end(); ++i) {
1503 result->push_back((*i)->ref(META_HANDLE));
1507 void Directory::UnmarkDirtyEntry(WriteTransaction* trans, Entry* entry) {
1508 CHECK(trans);
1509 entry->kernel_->clear_dirty(&kernel_->dirty_metahandles);
1512 void Directory::GetAttachmentIdsToUpload(BaseTransaction* trans,
1513 ModelType type,
1514 AttachmentIdList* ids) {
1515 // TODO(maniscalco): Maintain an index by ModelType and rewrite this method to
1516 // use it. The approach below is likely very expensive because it iterates
1517 // all entries (bug 415199).
1518 DCHECK(trans);
1519 DCHECK(ids);
1520 ids->clear();
1521 AttachmentIdSet on_server_id_set;
1522 AttachmentIdSet not_on_server_id_set;
1523 std::vector<int64> metahandles;
1525 ScopedKernelLock lock(this);
1526 GetMetaHandlesOfType(lock, trans, type, &metahandles);
1527 std::vector<int64>::const_iterator iter = metahandles.begin();
1528 const std::vector<int64>::const_iterator end = metahandles.end();
1529 // For all of this type's entries...
1530 for (; iter != end; ++iter) {
1531 EntryKernel* entry = GetEntryByHandle(lock, *iter);
1532 DCHECK(entry);
1533 const sync_pb::AttachmentMetadata metadata =
1534 entry->ref(ATTACHMENT_METADATA);
1535 // for each of this entry's attachments...
1536 for (int i = 0; i < metadata.record_size(); ++i) {
1537 AttachmentId id =
1538 AttachmentId::CreateFromProto(metadata.record(i).id());
1539 // if this attachment is known to be on the server, remember it for
1540 // later,
1541 if (metadata.record(i).is_on_server()) {
1542 on_server_id_set.insert(id);
1543 } else {
1544 // otherwise, add it to id_set.
1545 not_on_server_id_set.insert(id);
1550 // Why did we bother keeping a set of ids known to be on the server? The
1551 // is_on_server flag is stored denormalized so we can end up with two entries
1552 // with the same attachment id where one says it's on the server and the other
1553 // says it's not. When this happens, we trust the one that says it's on the
1554 // server. To avoid re-uploading the same attachment mulitple times, we
1555 // remove any ids known to be on the server from the id_set we are about to
1556 // return.
1558 // TODO(maniscalco): Eliminate redundant metadata storage (bug 415203).
1559 std::set_difference(not_on_server_id_set.begin(), not_on_server_id_set.end(),
1560 on_server_id_set.begin(), on_server_id_set.end(),
1561 std::back_inserter(*ids));
1564 void Directory::OnCatastrophicError() {
1565 UMA_HISTOGRAM_BOOLEAN("Sync.DirectoryCatastrophicError", true);
1566 ReadTransaction trans(FROM_HERE, this);
1567 OnUnrecoverableError(&trans, FROM_HERE,
1568 "Catastrophic error detected, Sync DB is unrecoverable");
1571 Directory::Kernel* Directory::kernel() {
1572 return kernel_;
1575 const Directory::Kernel* Directory::kernel() const {
1576 return kernel_;
1579 } // namespace syncable
1580 } // namespace syncer