diff --git a/CHANGES_UNRELEASED.md b/CHANGES_UNRELEASED.md index 7297788613..0fc7b3fb37 100644 --- a/CHANGES_UNRELEASED.md +++ b/CHANGES_UNRELEASED.md @@ -27,3 +27,6 @@ longer cause syncs to fail ([#2750](https://github.com/mozilla/application-services/pull/2750)), and bookmarks with duplicate or mismatched tags are reuploaded ([#2774](https://github.com/mozilla/application-services/pull/2774)). +- Improve handling of bookmark search keywords. Keywords are now imported + correctly from Fennec, and signing out of Sync in Firefox for iOS no longer + loses keywords ([#2501](https://github.com/mozilla/application-services/pull/2501)). diff --git a/components/places/sql/create_shared_schema.sql b/components/places/sql/create_shared_schema.sql index 99945aa7d9..8d00d8ac91 100644 --- a/components/places/sql/create_shared_schema.sql +++ b/components/places/sql/create_shared_schema.sql @@ -220,3 +220,16 @@ CREATE TABLE IF NOT EXISTS moz_bookmarks_synced_tag_relation( ON DELETE CASCADE, PRIMARY KEY(itemId, tagId) ) WITHOUT ROWID; + +-- This table holds search keywords for URLs. Desktop would like to replace +-- these with custom search engines eventually (bug 648398); however, we +-- must still round-trip keywords imported via Sync or migrated from Fennec. +-- Since none of the `moz_bookmarks_synced_*` tables are durable, we store +-- keywords for URLs in a separate table. Unlike Desktop, we don't support +-- custom POST data, since we don't sync it (bug 1345417), and Fennec +-- doesn't write it. +CREATE TABLE IF NOT EXISTS moz_keywords( + place_id INTEGER PRIMARY KEY REFERENCES moz_places(id) + ON DELETE CASCADE, + keyword TEXT NOT NULL UNIQUE +); diff --git a/components/places/sql/create_shared_triggers.sql b/components/places/sql/create_shared_triggers.sql index 11f8349e11..45f87c2084 100644 --- a/components/places/sql/create_shared_triggers.sql +++ b/components/places/sql/create_shared_triggers.sql @@ -306,3 +306,35 @@ BEGIN foreign_count = foreign_count - 1 WHERE id = OLD.place_id; END; + +-- These triggers adjust the foreign count for URLs with keywords, so that +-- they won't be expired or automatically removed. + +CREATE TEMP TRIGGER moz_keywords_afterinsert_trigger +AFTER INSERT ON moz_keywords +BEGIN + UPDATE moz_places SET + foreign_count = foreign_count + 1 + WHERE id = NEW.place_id; +END; + +CREATE TEMP TRIGGER moz_keywords_afterupdate_trigger +AFTER UPDATE OF place_id ON moz_keywords +WHEN OLD.place_id <> NEW.place_id +BEGIN + UPDATE moz_places SET + foreign_count = foreign_count + 1 + WHERE id = NEW.place_id; + + UPDATE moz_places SET + foreign_count = foreign_count - 1 + WHERE id = OLD.place_id; +END; + +CREATE TEMP TRIGGER moz_keywords_afterdelete_trigger +AFTER DELETE ON moz_keywords +BEGIN + UPDATE moz_places SET + foreign_count = foreign_count - 1 + WHERE id = OLD.place_id; +END; diff --git a/components/places/sql/create_sync_temp_tables.sql b/components/places/sql/create_sync_temp_tables.sql index 416bb5ddde..c7a60fd61a 100644 --- a/components/places/sql/create_sync_temp_tables.sql +++ b/components/places/sql/create_sync_temp_tables.sql @@ -25,7 +25,8 @@ CREATE TEMP TABLE itemsToApply( oldTitle TEXT, newTitle TEXT, oldPlaceId INTEGER, - newPlaceId INTEGER + newPlaceId INTEGER, + newKeyword TEXT ); CREATE INDEX existingItems ON itemsToApply(localId) WHERE localId NOT NULL; @@ -34,6 +35,8 @@ CREATE INDEX oldPlaceIds ON itemsToApply(newKind, oldPlaceId); CREATE INDEX newPlaceIds ON itemsToApply(newKind, newPlaceId); +CREATE INDEX newKeywords ON itemsToApply(newKeyword) WHERE newKeyword NOT NULL; + CREATE TEMP TABLE applyNewLocalStructureOps( mergedGuid TEXT PRIMARY KEY, mergedParentGuid TEXT NOT NULL, diff --git a/components/places/src/bookmark_sync/incoming.rs b/components/places/src/bookmark_sync/incoming.rs index 7ca9b89a26..436ec86229 100644 --- a/components/places/src/bookmark_sync/incoming.rs +++ b/components/places/src/bookmark_sync/incoming.rs @@ -64,7 +64,7 @@ impl<'a> IncomingApplicator<'a> { let parent_record_id = unpack_optional_id("parentid", b); let date_added = unpack_optional_i64("dateAdded", b, &mut validity); let title = unpack_optional_str("title", b, &mut validity); - let keyword = unpack_optional_str("keyword", b, &mut validity); + let keyword = unpack_optional_keyword("keyword", b, &mut validity); let raw_tags = &b["tags"]; let tags = if let Some(array) = raw_tags.as_array() { @@ -528,6 +528,32 @@ fn unpack_optional_i64( } } +fn unpack_optional_keyword( + key: &str, + data: &JsonValue, + validity: &mut SyncedBookmarkValidity, +) -> Option { + match &data[key] { + JsonValue::String(ref s) => { + // Like Desktop, we don't reupload if a keyword has leading or + // trailing whitespace, or isn't lowercase. + let k = s.trim(); + if k.is_empty() { + None + } else { + Some(k.to_lowercase()) + } + } + JsonValue::Null => None, + _ => { + // ...But we do reupload if it's not a string, since older + // clients expect that. + set_reupload(validity); + None + } + } +} + fn set_replace(validity: &mut SyncedBookmarkValidity) { if *validity < SyncedBookmarkValidity::Replace { *validity = SyncedBookmarkValidity::Replace; diff --git a/components/places/src/bookmark_sync/store.rs b/components/places/src/bookmark_sync/store.rs index d5c354de9e..90670194e4 100644 --- a/components/places/src/bookmark_sync/store.rs +++ b/components/places/src/bookmark_sync/store.rs @@ -168,12 +168,14 @@ impl<'a> BookmarksStore<'a> { remoteGuid, newLevel, newKind, localDateAdded, remoteDateAdded, lastModified, oldTitle, newTitle, - oldPlaceId, newPlaceId) + oldPlaceId, newPlaceId, + newKeyword) SELECT n.mergedGuid, b.id, v.id, v.guid, n.level, n.remoteType, b.dateAdded, v.dateAdded, MAX(v.dateAdded, {now}), b.title, v.title, - b.fk, v.placeId + b.fk, v.placeId, + v.keyword FROM ops n JOIN moz_bookmarks_synced v ON v.guid = n.remoteGuid LEFT JOIN moz_bookmarks b ON b.guid = n.localGuid", @@ -450,6 +452,21 @@ impl<'a> BookmarksStore<'a> { } fn apply_remote_items(&self, now: Timestamp) -> Result<()> { + // Remove all keywords from old and new URLs, and remove new keywords + // from all existing URLs. The `NOT NULL` conditions are important; they + // ensure that SQLite uses our partial indexes, instead of a table scan. + log::debug!("Removing old keywords"); + self.interruptee.err_if_interrupted()?; + self.db.execute_batch( + "DELETE FROM moz_keywords + WHERE place_id IN (SELECT oldPlaceId FROM itemsToApply + WHERE oldPlaceId NOT NULL) OR + place_id IN (SELECT newPlaceId FROM itemsToApply + WHERE newPlaceId NOT NULL) OR + keyword IN (SELECT newKeyword FROM itemsToApply + WHERE newKeyword NOT NULL)", + )?; + log::debug!("Removing old tags"); self.interruptee.err_if_interrupted()?; self.db.execute_batch( @@ -514,6 +531,15 @@ impl<'a> BookmarksStore<'a> { bookmark_kind = SyncedBookmarkKind::Bookmark as u8, ))?; + log::debug!("Inserting new keywords for new URLs"); + self.interruptee.err_if_interrupted()?; + self.db.execute_batch( + "INSERT OR IGNORE INTO moz_keywords(keyword, place_id) + SELECT newKeyword, newPlaceId + FROM itemsToApply + WHERE newKeyword NOT NULL", + )?; + log::debug!("Inserting new tags for new URLs"); self.interruptee.err_if_interrupted()?; self.db.execute_batch( @@ -557,50 +583,27 @@ impl<'a> BookmarksStore<'a> { {} JOIN itemsToApply n ON n.mergedGuid = b.guid WHERE n.localDateAdded < n.remoteDateAdded", - UploadItemsFragment { - alias: "b", - remote_guid_column_name: "n.remoteGuid", - }, + UploadItemsFragment("b") ))?; log::debug!("Staging remaining locally changed items for upload"); - sql_support::each_sized_chunk( + sql_support::each_chunk_mapped( upload_items, - sql_support::default_max_variable_number() / 2, + |op| op.merged_node.guid.as_str(), |chunk, _| -> Result<()> { let sql = format!( - "WITH ops(mergedGuid, remoteGuid) AS (VALUES {vars}) - INSERT OR IGNORE INTO itemsToUpload(id, guid, syncChangeCounter, + "INSERT OR IGNORE INTO itemsToUpload(id, guid, syncChangeCounter, parentGuid, parentTitle, dateAdded, kind, title, placeId, url, keyword, position) {upload_items_fragment} - JOIN ops n ON n.mergedGuid = b.guid", - vars = - sql_support::repeat_display(chunk.len(), ",", |_, f| write!(f, "(?, ?)")), - upload_items_fragment = UploadItemsFragment { - alias: "b", - remote_guid_column_name: "n.remoteGuid", - }, + WHERE b.guid IN ({vars})", + vars = sql_support::repeat_sql_vars(chunk.len()), + upload_items_fragment = UploadItemsFragment("b") ); - let mut params = Vec::with_capacity(chunk.len() * 2); - for op in chunk.iter() { - self.interruptee.err_if_interrupted()?; - - let merged_guid = op.merged_node.guid.as_str(); - params.push(Some(merged_guid)); - - let remote_guid = op - .merged_node - .merge_state - .remote_node() - .map(|node| node.guid.as_str()); - params.push(remote_guid); - } - - self.db.execute(&sql, ¶ms)?; + self.db.execute(&sql, chunk)?; Ok(()) }, )?; @@ -1141,6 +1144,46 @@ impl<'a> Merger<'a> { /// Prepares synced bookmarks for merging. fn prepare(&self) -> Result<()> { + // Sync and Fennec associate keywords with bookmarks, and don't sync + // POST data; Rust Places associates them with URLs, and also doesn't + // support POST data; Desktop associates keywords with (URL, POST data) + // pairs, and multiple bookmarks may have the same URL. + // + // When a keyword changes, clients should reupload all bookmarks with + // the affected URL (bug 1328737). Just in case, we flag any synced + // bookmarks that have different keywords for the same URL, or the same + // keyword for different URLs, for reupload. + self.store.interruptee.err_if_interrupted()?; + log::debug!("Flagging bookmarks with mismatched keywords for reupload"); + let sql = format!( + "UPDATE moz_bookmarks_synced SET + validity = {reupload} + WHERE validity = {valid} AND ( + placeId IN ( + /* Same URL, different keywords. `COUNT` ignores NULLs, so + we need to count them separately. This handles cases where + a keyword was removed from one, but not all bookmarks with + the same URL. */ + SELECT placeId FROM moz_bookmarks_synced + GROUP BY placeId + HAVING COUNT(DISTINCT keyword) + + COUNT(DISTINCT CASE WHEN keyword IS NULL + THEN 1 END) > 1 + ) OR keyword IN ( + /* Different URLs, same keyword. Bookmarks with keywords but + without URLs are already invalid, so we don't need to handle + NULLs here. */ + SELECT keyword FROM moz_bookmarks_synced + WHERE keyword NOT NULL + GROUP BY keyword + HAVING COUNT(DISTINCT placeId) > 1 + ) + )", + reupload = SyncedBookmarkValidity::Reupload as u8, + valid = SyncedBookmarkValidity::Valid as u8, + ); + self.store.db.execute_batch(&sql)?; + // Like keywords, Sync associates tags with bookmarks, but Places // associates them with URLs. This means multiple bookmarks with the // same URL should have the same tags. In practice, different tags for @@ -1161,8 +1204,8 @@ impl<'a> Merger<'a> { // than two string lists! If a bookmark has mismatched tags, the sum of // its tag IDs in `tagsByItemId` won't match the sum in `tagsByPlaceId`, // and we'll flag the item for reupload. - log::debug!("Flagging tags with mismatched URLs for reupload"); self.store.interruptee.err_if_interrupted()?; + log::debug!("Flagging bookmarks with mismatched tags for reupload"); let sql = format!( "WITH tagsByPlaceId(placeId, tagIds) AS ( @@ -1536,12 +1579,7 @@ impl fmt::Display for ItemTypeFragment { /// Formats a `SELECT` statement for staging local items in the `itemsToUpload` /// table. -struct UploadItemsFragment { - /// The alias to use for the Places `moz_bookmarks` table. - alias: &'static str, - /// The name of the column containing the synced item's GUID. - remote_guid_column_name: &'static str, -} +struct UploadItemsFragment(&'static str); impl fmt::Display for UploadItemsFragment { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -1551,16 +1589,14 @@ impl fmt::Display for UploadItemsFragment { p.guid AS parentGuid, p.title AS parentTitle, {alias}.dateAdded, {kind_fragment} AS kind, {alias}.title, h.id AS placeId, h.url, - (SELECT v.keyword FROM moz_bookmarks_synced v - WHERE v.guid = {remote_guid_column_name}), + (SELECT k.keyword FROM moz_keywords k + WHERE k.place_id = h.id) AS keyword, {alias}.position FROM moz_bookmarks {alias} JOIN moz_bookmarks p ON p.id = {alias}.parent LEFT JOIN moz_places h ON h.id = {alias}.fk", - alias = self.alias, - kind_fragment = - item_kind_fragment(self.alias, "type", UrlOrPlaceIdFragment::Url("h.url")), - remote_guid_column_name = self.remote_guid_column_name, + alias = self.0, + kind_fragment = item_kind_fragment(self.0, "type", UrlOrPlaceIdFragment::Url("h.url")), ) } } @@ -1709,6 +1745,25 @@ mod tests { use sync15::{CollSyncIds, Payload}; + type ExpectedSyncedItem<'a> = ( + &'static str, + &'a SyncedBookmarkItem, + Option &mut SyncedBookmarkItem>>, + ); + + fn check_expected_synced_items(conn: &PlacesDb, table: &[ExpectedSyncedItem<'_>]) { + for (guid, base, func) in table { + let actual = SyncedBookmarkItem::get(conn, &SyncGuid::from(guid)) + .expect("Should fetch synced bookmark item") + .expect("Expected synced item should exist"); + let mut expected = SyncedBookmarkItem::clone(base); + match func { + Some(f) => assert_eq!(&actual, f(&mut expected)), + None => assert_eq!(actual, expected), + } + } + } + fn apply_incoming(conn: &PlacesDb, remote_time: ServerTimestamp, records_json: Value) { // suck records into the store. let interrupt_scope = conn.begin_interrupt_scope(); @@ -2112,8 +2167,8 @@ mod tests { } } - // Now for some fun server data. Only B and C have problems; D and E - // are fine, and shouldn't be reuploaded. + // Now for some fun server data. Only B, C, and F2 have problems; + // D and E are fine, and shouldn't be reuploaded. let remote_records = json!([{ // Change B's tags on the server, and duplicate `two` for good // measure. We should reupload B with only one `two` tag. @@ -2408,15 +2463,10 @@ mod tests { .kind(SyncedBookmarkKind::Bookmark) .url(Some("http://example.com/f")) .tags(vec!["twelve".into()]); - type Test<'a> = &'a [( - &'static str, - &'a SyncedBookmarkItem, - Option &mut SyncedBookmarkItem>>, - )]; // A table-driven test to clean up some of the boilerplate. We clone // the base item for each test, and pass it to the boxed closure to set // additional properties. - let expected_synced_items: Test<'_> = &[ + let expected_synced_items: &[ExpectedSyncedItem<'_>] = &[ ( "bookmarkAAA1", &synced_item_for_a, @@ -2476,15 +2526,7 @@ mod tests { })), ), ]; - for (guid, base, func) in expected_synced_items { - let actual = SyncedBookmarkItem::get(&writer, &SyncGuid::from(guid))? - .expect("Expected remote item should exist"); - let mut expected = SyncedBookmarkItem::clone(base); - match func { - Some(f) => assert_eq!(&actual, f(&mut expected)), - None => assert_eq!(actual, expected), - } - } + check_expected_synced_items(&writer, expected_synced_items); Ok(()) } @@ -2977,6 +3019,280 @@ mod tests { Ok(()) } + #[test] + fn test_apply_complex_bookmark_keywords() -> Result<()> { + use crate::storage::bookmarks::bookmarks_get_url_for_keyword; + + // We don't provide an API for setting keywords locally, but we'll + // still round-trip and fix up keywords on the server. + + let api = new_mem_api(); + let writer = api.open_connection(ConnectionType::ReadWrite)?; + + // Let's add some remote bookmarks with keywords. + let remote_records = json!([{ + // A1 and A2 have the same URL and keyword, so we shouldn't + // reupload them. + "id": "bookmarkAAA1", + "type": "bookmark", + "parentid": "unfiled", + "parentName": "Unfiled", + "title": "A1", + "bmkUri": "http://example.com/a", + "keyword": "one", + }, { + "id": "bookmarkAAA2", + "type": "bookmark", + "parentid": "menu", + "parentName": "Menu", + "title": "A2", + "bmkUri": "http://example.com/a", + "keyword": "one", + }, { + // B1 and B2 have mismatched keywords, and we should reupload + // both of them. It's not specified which keyword wins, but + // reuploading both means we make them consistent. + "id": "bookmarkBBB1", + "type": "bookmark", + "parentid": "unfiled", + "parentName": "Unfiled", + "title": "B1", + "bmkUri": "http://example.com/b", + "keyword": "two", + }, { + "id": "bookmarkBBB2", + "type": "bookmark", + "parentid": "menu", + "parentName": "Menu", + "title": "B2", + "bmkUri": "http://example.com/b", + "keyword": "three", + }, { + // C1 has a keyword; C2 doesn't. As with B, which one wins + // depends on which record we apply last, and how SQLite + // processes the rows, but we should reupload both. + "id": "bookmarkCCC1", + "type": "bookmark", + "parentid": "unfiled", + "parentName": "Unfiled", + "title": "C1", + "bmkUri": "http://example.com/c", + "keyword": "four", + }, { + "id": "bookmarkCCC2", + "type": "bookmark", + "parentid": "menu", + "parentName": "Menu", + "title": "C2", + "bmkUri": "http://example.com/c", + }, { + // D has a keyword that needs to be cleaned up before + // inserting. In this case, we intentionally don't reupload. + "id": "bookmarkDDDD", + "type": "bookmark", + "parentid": "unfiled", + "parentName": "Unfiled", + "title": "D", + "bmkUri": "http://example.com/d", + "keyword": " FIVE ", + }, { + "id": "unfiled", + "type": "folder", + "parentid": "root", + "title": "Unfiled", + "children": ["bookmarkAAA1", "bookmarkBBB1", "bookmarkCCC1", "bookmarkDDDD"], + }, { + "id": "menu", + "type": "folder", + "parentid": "root", + "title": "Menu", + "children": ["bookmarkAAA2", "bookmarkBBB2", "bookmarkCCC2"], + }]); + + let syncer = api.open_sync_connection()?; + let interrupt_scope = syncer.begin_interrupt_scope(); + let store = BookmarksStore::new(&syncer, &interrupt_scope); + let mut incoming = IncomingChangeset::new(store.collection_name(), ServerTimestamp(0)); + if let Value::Array(records) = remote_records { + for record in records { + let payload = Payload::from_json(record).unwrap(); + incoming.changes.push((payload, ServerTimestamp(0))); + } + } else { + unreachable!("JSON records must be an array"); + } + let mut outgoing = store + .apply_incoming(vec![incoming], &mut telemetry::Engine::new("bookmarks")) + .expect("Should apply incoming and stage outgoing records with keywords"); + outgoing.changes.sort_by(|a, b| a.id.cmp(&b.id)); + + assert_local_json_tree( + &writer, + &BookmarkRootGuid::Root.as_guid(), + json!({ + "guid": &BookmarkRootGuid::Root.as_guid(), + "children": [{ + "guid": &BookmarkRootGuid::Menu.as_guid(), + "children": [{ + "guid": "bookmarkAAA2", + "title": "A2", + "url": "http://example.com/a", + }, { + "guid": "bookmarkBBB2", + "title": "B2", + "url": "http://example.com/b", + }, { + "guid": "bookmarkCCC2", + "title": "C2", + "url": "http://example.com/c", + }], + }, { + "guid": &BookmarkRootGuid::Toolbar.as_guid(), + "children": [], + }, { + "guid": &BookmarkRootGuid::Unfiled.as_guid(), + "children": [{ + "guid": "bookmarkAAA1", + "title": "A1", + "url": "http://example.com/a", + }, { + "guid": "bookmarkBBB1", + "title": "B1", + "url": "http://example.com/b", + }, { + "guid": "bookmarkCCC1", + "title": "C1", + "url": "http://example.com/c", + }, { + "guid": "bookmarkDDDD", + "title": "D", + "url": "http://example.com/d", + }], + }, { + "guid": &BookmarkRootGuid::Mobile.as_guid(), + "children": [], + }], + }), + ); + // And verify our local keywords are correct, too. + let url_for_one = bookmarks_get_url_for_keyword(&writer, "one")? + .expect("Should have URL for keyword `one`"); + assert_eq!(url_for_one.as_str(), "http://example.com/a"); + + let keyword_for_b = match ( + bookmarks_get_url_for_keyword(&writer, "two")?, + bookmarks_get_url_for_keyword(&writer, "three")?, + ) { + (Some(url), None) => { + assert_eq!(url.as_str(), "http://example.com/b"); + "two" + } + (None, Some(url)) => { + assert_eq!(url.as_str(), "http://example.com/b"); + "three" + } + (Some(_), Some(_)) => panic!("Should pick `two` or `three`, not both"), + (None, None) => panic!("Should have URL for either `two` or `three`"), + }; + + let keyword_for_c = match bookmarks_get_url_for_keyword(&writer, "four")? { + Some(url) => { + assert_eq!(url.as_str(), "http://example.com/c"); + Some("four") + } + None => None, + }; + + let url_for_five = bookmarks_get_url_for_keyword(&writer, "five")? + .expect("Should have URL for keyword `five`"); + assert_eq!(url_for_five.as_str(), "http://example.com/d"); + + let expected_outgoing_keywords = &[ + ("bookmarkBBB1", Some(keyword_for_b)), + ("bookmarkBBB2", Some(keyword_for_b)), + ("bookmarkCCC1", keyword_for_c), + ("bookmarkCCC2", keyword_for_c), + ("menu", None), // Roots always get uploaded on the first sync. + ("mobile", None), + ("toolbar", None), + ("unfiled", None), + ]; + assert_eq!( + outgoing + .changes + .iter() + .map(|p| ( + p.id.as_str(), + p.data.get("keyword").and_then(|v| v.as_str()) + )) + .collect::>(), + expected_outgoing_keywords, + "Should upload new bookmarks and fix up keywords", + ); + + // Now push the records back to the store, so we can check what we're + // uploading. + store + .sync_finished( + ServerTimestamp(0), + expected_outgoing_keywords + .iter() + .map(|(id, _)| SyncGuid::from(id)) + .collect(), + ) + .expect("Should push synced changes back to the store"); + + let mut synced_item_for_b = SyncedBookmarkItem::new(); + synced_item_for_b + .validity(SyncedBookmarkValidity::Valid) + .kind(SyncedBookmarkKind::Bookmark) + .url(Some("http://example.com/b")) + .keyword(Some(&keyword_for_b)); + let mut synced_item_for_c = SyncedBookmarkItem::new(); + synced_item_for_c + .validity(SyncedBookmarkValidity::Valid) + .kind(SyncedBookmarkKind::Bookmark) + .url(Some("http://example.com/c")) + .keyword(keyword_for_c.to_owned()); + let expected_synced_items: &[ExpectedSyncedItem<'_>] = &[ + ( + "bookmarkBBB1", + &synced_item_for_b, + Some(Box::new(|a| { + a.parent_guid(Some(&BookmarkRootGuid::Unfiled.as_guid())) + .title(Some("B1")) + })), + ), + ( + "bookmarkBBB2", + &synced_item_for_b, + Some(Box::new(|a| { + a.parent_guid(Some(&BookmarkRootGuid::Menu.as_guid())) + .title(Some("B2")) + })), + ), + ( + "bookmarkCCC1", + &synced_item_for_c, + Some(Box::new(|a| { + a.parent_guid(Some(&BookmarkRootGuid::Unfiled.as_guid())) + .title(Some("C1")) + })), + ), + ( + "bookmarkCCC2", + &synced_item_for_c, + Some(Box::new(|a| { + a.parent_guid(Some(&BookmarkRootGuid::Menu.as_guid())) + .title(Some("C2")) + })), + ), + ]; + check_expected_synced_items(&writer, expected_synced_items); + + Ok(()) + } + #[test] fn test_wipe() -> Result<()> { let api = new_mem_api(); diff --git a/components/places/src/db/schema.rs b/components/places/src/db/schema.rs index 9a872ec52f..8402b40bba 100644 --- a/components/places/src/db/schema.rs +++ b/components/places/src/db/schema.rs @@ -18,7 +18,7 @@ use crate::types::SyncStatus; use rusqlite::NO_PARAMS; use sql_support::ConnExt; -const VERSION: i64 = 10; +const VERSION: i64 = 11; // Shared schema and temp tables for the read-write and Sync connections. const CREATE_SHARED_SCHEMA_SQL: &str = include_str!("../../sql/create_shared_schema.sql"); @@ -229,6 +229,22 @@ fn upgrade(db: &PlacesDb, from: i64) -> Result<()> { ], || Ok(()), )?; + migration( + db, + 10, + 11, + &[ + // Migrate synced keywords into their own table, so that they're + // available via `bookmarks_get_url_for_keyword` before the next + // sync. + "INSERT OR IGNORE INTO moz_keywords(keyword, place_id) + SELECT keyword, placeId + FROM moz_bookmarks_synced + WHERE placeId NOT NULL AND + keyword NOT NULL", + ], + || Ok(()), + )?; // Add more migrations here... if get_current_schema_version(db)? == VERSION { diff --git a/components/places/src/storage/bookmarks.rs b/components/places/src/storage/bookmarks.rs index a115c3f267..57a36f072b 100644 --- a/components/places/src/storage/bookmarks.rs +++ b/components/places/src/storage/bookmarks.rs @@ -912,9 +912,9 @@ impl<'de> Deserialize<'de> for BookmarkTreeNode { /// Get the URL of the bookmark matching a keyword pub fn bookmarks_get_url_for_keyword(db: &PlacesDb, keyword: &str) -> Result> { let bookmark_url = db.try_query_row( - "SELECT url FROM moz_places p - JOIN moz_bookmarks_synced b ON b.placeId = p.id - WHERE b.keyword = :keyword", + "SELECT h.url FROM moz_keywords k + JOIN moz_places h ON h.id = k.place_id + WHERE k.keyword = :keyword", &[(":keyword", &keyword)], |row| row.get::<_, String>("url"), true, @@ -1565,10 +1565,10 @@ mod tests { // create a bookmark with keyword 'donut' pointing at it. conn.execute_named_cached( - "INSERT INTO moz_bookmarks_synced - (keyword, placeId, guid) + "INSERT INTO moz_keywords + (keyword, place_id) VALUES - ('donut', :place_id, 'fake_guid___')", + ('donut', :place_id)", &[(":place_id", &place_id)], ) .expect("should work"); @@ -1581,10 +1581,10 @@ mod tests { // now change the keyword to 'ice cream' conn.execute_named_cached( - "REPLACE INTO moz_bookmarks_synced - (keyword, placeId, guid) + "REPLACE INTO moz_keywords + (keyword, place_id) VALUES - ('ice cream', :place_id, 'fake_guid___')", + ('ice cream', :place_id)", &[(":place_id", &place_id)], ) .expect("should work");