Skip to content
This repository has been archived by the owner on Apr 9, 2020. It is now read-only.

Commit

Permalink
rust: Head start on some upcoming warnings
Browse files Browse the repository at this point in the history
Summary:
This diff sets two Rust lints to warn in fbcode:

```
[rust]
  warn_lints = bare_trait_objects, ellipsis_inclusive_range_patterns
```

and fixes occurrences of those warnings within common/rust, hg, and mononoke.

Both of these lints are set to warn by default starting with rustc 1.37. Enabling them early avoids writing even more new code that needs to be fixed when we pull in 1.37 in six weeks.

Upstream tracking issue: rust-lang/rust#54910

Reviewed By: Imxset21

Differential Revision: D16200291

fbshipit-source-id: aca11a7a944e9fa95f94e226b52f6f053b97ec74
  • Loading branch information
David Tolnay authored and facebook-github-bot committed Jul 12, 2019
1 parent b9f18bd commit fed2ac8
Show file tree
Hide file tree
Showing 47 changed files with 220 additions and 215 deletions.
2 changes: 1 addition & 1 deletion blobrepo/blob_changeset/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ use mononoke_types::DateTime;
/// 1. It ensures that the prefix applies first, which is important for shared caches like
/// memcache.
/// 2. It ensures that all possible blobrepos use a prefix.
pub type RepoBlobstore = CensoredBlob<PrefixBlobstore<Arc<Blobstore>>>;
pub type RepoBlobstore = CensoredBlob<PrefixBlobstore<Arc<dyn Blobstore>>>;

pub struct ChangesetMetadata {
pub user: String,
Expand Down
6 changes: 3 additions & 3 deletions blobrepo/changeset_fetcher/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,19 +30,19 @@ pub trait ChangesetFetcher: Send + Sync {
cs_id: ChangesetId,
) -> BoxFuture<Vec<ChangesetId>, Error>;

fn get_stats(&self) -> HashMap<String, Box<Any>> {
fn get_stats(&self) -> HashMap<String, Box<dyn Any>> {
HashMap::new()
}
}

/// Simplest ChangesetFetcher implementation which is just a wrapper around `Changesets` object
pub struct SimpleChangesetFetcher {
changesets: Arc<Changesets>,
changesets: Arc<dyn Changesets>,
repo_id: RepositoryId,
}

impl SimpleChangesetFetcher {
pub fn new(changesets: Arc<Changesets>, repo_id: RepositoryId) -> Self {
pub fn new(changesets: Arc<dyn Changesets>, repo_id: RepositoryId) -> Self {
Self {
changesets,
repo_id,
Expand Down
11 changes: 5 additions & 6 deletions blobrepo/factory/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ pub fn new_memblob_empty(
fn new_development<T: SqlFactory>(
logger: Logger,
sql_factory: &T,
blobstore: Arc<Blobstore>,
blobstore: Arc<dyn Blobstore>,
censored_blobs: Option<HashMap<String, String>>,
scuba_censored_table: Option<String>,
repoid: RepositoryId,
Expand Down Expand Up @@ -226,7 +226,7 @@ fn new_development<T: SqlFactory>(
fn new_production<T: SqlFactory>(
logger: Logger,
sql_factory: &T,
blobstore: Arc<Blobstore>,
blobstore: Arc<dyn Blobstore>,
censored_blobs: Option<HashMap<String, String>>,
scuba_censored_table: Option<String>,
repoid: RepositoryId,
Expand Down Expand Up @@ -280,10 +280,9 @@ fn new_production<T: SqlFactory>(
let changeset_fetcher_factory = {
cloned!(changesets, repoid);
move || {
let res: Arc<ChangesetFetcher + Send + Sync> = Arc::new(SimpleChangesetFetcher::new(
changesets.clone(),
repoid.clone(),
));
let res: Arc<dyn ChangesetFetcher + Send + Sync> = Arc::new(
SimpleChangesetFetcher::new(changesets.clone(), repoid.clone()),
);
res
}
};
Expand Down
4 changes: 2 additions & 2 deletions blobrepo/src/manifest.rs
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ impl BlobManifest {
}

impl Manifest for BlobManifest {
fn lookup(&self, path: &MPathElement) -> Option<Box<Entry + Sync>> {
fn lookup(&self, path: &MPathElement) -> Option<Box<dyn Entry + Sync>> {
self.content.files.get(path).map({
move |entry_id| {
HgBlobEntry::new(
Expand All @@ -240,7 +240,7 @@ impl Manifest for BlobManifest {
})
}

fn list(&self) -> Box<Iterator<Item = Box<Entry + Sync>> + Send> {
fn list(&self) -> Box<dyn Iterator<Item = Box<dyn Entry + Sync>> + Send> {
let list_iter = self.content.files.clone().into_iter().map({
let blobstore = self.blobstore.clone();
move |(path, entry_id)| {
Expand Down
49 changes: 26 additions & 23 deletions blobrepo/src/repo.rs
Original file line number Diff line number Diff line change
Expand Up @@ -115,32 +115,33 @@ define_stats! {
pub struct BlobRepo {
logger: Logger,
blobstore: RepoBlobstore,
bookmarks: Arc<Bookmarks>,
filenodes: Arc<Filenodes>,
changesets: Arc<Changesets>,
bonsai_hg_mapping: Arc<BonsaiHgMapping>,
bookmarks: Arc<dyn Bookmarks>,
filenodes: Arc<dyn Filenodes>,
changesets: Arc<dyn Changesets>,
bonsai_hg_mapping: Arc<dyn BonsaiHgMapping>,
repoid: RepositoryId,
// Returns new ChangesetFetcher that can be used by operation that work with commit graph
// (for example, revsets).
changeset_fetcher_factory: Arc<Fn() -> Arc<ChangesetFetcher + Send + Sync> + Send + Sync>,
hg_generation_lease: Arc<LeaseOps>,
changeset_fetcher_factory:
Arc<dyn Fn() -> Arc<dyn ChangesetFetcher + Send + Sync> + Send + Sync>,
hg_generation_lease: Arc<dyn LeaseOps>,
}

impl BlobRepo {
pub fn new(
logger: Logger,
bookmarks: Arc<Bookmarks>,
bookmarks: Arc<dyn Bookmarks>,
blobstore: RepoBlobstore,
filenodes: Arc<Filenodes>,
changesets: Arc<Changesets>,
bonsai_hg_mapping: Arc<BonsaiHgMapping>,
filenodes: Arc<dyn Filenodes>,
changesets: Arc<dyn Changesets>,
bonsai_hg_mapping: Arc<dyn BonsaiHgMapping>,
repoid: RepositoryId,
hg_generation_lease: Arc<LeaseOps>,
hg_generation_lease: Arc<dyn LeaseOps>,
) -> Self {
let changeset_fetcher_factory = {
cloned!(changesets, repoid);
move || {
let res: Arc<ChangesetFetcher + Send + Sync> = Arc::new(
let res: Arc<dyn ChangesetFetcher + Send + Sync> = Arc::new(
SimpleChangesetFetcher::new(changesets.clone(), repoid.clone()),
);
res
Expand All @@ -162,14 +163,16 @@ impl BlobRepo {

pub fn new_with_changeset_fetcher_factory(
logger: Logger,
bookmarks: Arc<Bookmarks>,
bookmarks: Arc<dyn Bookmarks>,
blobstore: RepoBlobstore,
filenodes: Arc<Filenodes>,
changesets: Arc<Changesets>,
bonsai_hg_mapping: Arc<BonsaiHgMapping>,
filenodes: Arc<dyn Filenodes>,
changesets: Arc<dyn Changesets>,
bonsai_hg_mapping: Arc<dyn BonsaiHgMapping>,
repoid: RepositoryId,
changeset_fetcher_factory: Arc<Fn() -> Arc<ChangesetFetcher + Send + Sync> + Send + Sync>,
hg_generation_lease: Arc<LeaseOps>,
changeset_fetcher_factory: Arc<
dyn Fn() -> Arc<dyn ChangesetFetcher + Send + Sync> + Send + Sync,
>,
hg_generation_lease: Arc<dyn LeaseOps>,
) -> Self {
BlobRepo {
logger,
Expand Down Expand Up @@ -643,7 +646,7 @@ impl BlobRepo {
&self,
ctx: CoreContext,
manifestid: HgManifestId,
) -> BoxFuture<Box<Manifest + Sync>, Error> {
) -> BoxFuture<Box<dyn Manifest + Sync>, Error> {
STATS::get_manifest_by_nodeid.add_value(1);
BlobManifest::load(ctx, &self.blobstore, manifestid)
.and_then(move |mf| mf.ok_or(ErrorKind::ManifestMissing(manifestid).into()))
Expand Down Expand Up @@ -764,7 +767,7 @@ impl BlobRepo {
to_hg_bookmark_stream(&self, &ctx, stream)
}

pub fn update_bookmark_transaction(&self, ctx: CoreContext) -> Box<bookmarks::Transaction> {
pub fn update_bookmark_transaction(&self, ctx: CoreContext) -> Box<dyn bookmarks::Transaction> {
STATS::update_bookmark_transaction.add_value(1);
self.bookmarks.create_transaction(ctx, self.repoid)
}
Expand Down Expand Up @@ -933,7 +936,7 @@ impl BlobRepo {
.map(|res| res.map(|res| Generation::new(res.gen)))
}

pub fn get_changeset_fetcher(&self) -> Arc<ChangesetFetcher> {
pub fn get_changeset_fetcher(&self) -> Arc<dyn ChangesetFetcher> {
(self.changeset_fetcher_factory)()
}

Expand Down Expand Up @@ -1042,7 +1045,7 @@ impl BlobRepo {
self.repoid
}

pub fn get_filenodes(&self) -> Arc<Filenodes> {
pub fn get_filenodes(&self) -> Arc<dyn Filenodes> {
self.filenodes.clone()
}

Expand Down Expand Up @@ -1719,7 +1722,7 @@ impl BlobRepo {
ctx: CoreContext,
repo: BlobRepo,
bcs_id: ChangesetId,
bonsai_hg_mapping: Arc<BonsaiHgMapping>,
bonsai_hg_mapping: Arc<dyn BonsaiHgMapping>,
) -> impl Future<Item = Vec<BonsaiChangeset>, Error = Error> {
let mut queue = VecDeque::new();
let mut visited: HashSet<ChangesetId> = HashSet::new();
Expand Down
2 changes: 1 addition & 1 deletion blobrepo/test/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -824,7 +824,7 @@ fn test_get_manifest_from_bonsai() {
};
let get_entries = {
cloned!(ctx, repo);
move |ms_hash: HgManifestId| -> BoxFuture<HashMap<String, Box<Entry + Sync>>, Error> {
move |ms_hash: HgManifestId| -> BoxFuture<HashMap<String, Box<dyn Entry + Sync>>, Error> {
repo.get_manifest_by_nodeid(ctx.clone(), ms_hash)
.map(|ms| {
ms.list()
Expand Down
8 changes: 4 additions & 4 deletions blobstore/multiplexedblob/src/base.rs
Original file line number Diff line number Diff line change
Expand Up @@ -89,15 +89,15 @@ pub trait MultiplexedBlobstorePutHandler: Send + Sync {
}

pub struct MultiplexedBlobstoreBase {
blobstores: Arc<[(BlobstoreId, Arc<Blobstore>)]>,
handler: Arc<MultiplexedBlobstorePutHandler>,
blobstores: Arc<[(BlobstoreId, Arc<dyn Blobstore>)]>,
handler: Arc<dyn MultiplexedBlobstorePutHandler>,
scuba_logger: Option<Arc<ScubaClient>>,
}

impl MultiplexedBlobstoreBase {
pub fn new(
blobstores: Vec<(BlobstoreId, Arc<Blobstore>)>,
handler: Arc<MultiplexedBlobstorePutHandler>,
blobstores: Vec<(BlobstoreId, Arc<dyn Blobstore>)>,
handler: Arc<dyn MultiplexedBlobstorePutHandler>,
scuba_logger: Option<Arc<ScubaClient>>,
) -> Self {
Self {
Expand Down
12 changes: 6 additions & 6 deletions blobstore/multiplexedblob/src/queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,14 @@ use std::sync::Arc;
pub struct MultiplexedBlobstore {
repo_id: RepositoryId,
blobstore: Arc<MultiplexedBlobstoreBase>,
queue: Arc<BlobstoreSyncQueue>,
queue: Arc<dyn BlobstoreSyncQueue>,
}

impl MultiplexedBlobstore {
pub fn new(
repo_id: RepositoryId,
blobstores: Vec<(BlobstoreId, Arc<Blobstore>)>,
queue: Arc<BlobstoreSyncQueue>,
blobstores: Vec<(BlobstoreId, Arc<dyn Blobstore>)>,
queue: Arc<dyn BlobstoreSyncQueue>,
scuba_logger: Option<Arc<ScubaClient>>,
) -> Self {
let put_handler = Arc::new(QueueBlobstorePutHandler {
Expand Down Expand Up @@ -58,7 +58,7 @@ impl fmt::Debug for MultiplexedBlobstore {

struct QueueBlobstorePutHandler {
repo_id: RepositoryId,
queue: Arc<BlobstoreSyncQueue>,
queue: Arc<dyn BlobstoreSyncQueue>,
}

impl MultiplexedBlobstorePutHandler for QueueBlobstorePutHandler {
Expand Down Expand Up @@ -151,8 +151,8 @@ pub struct ScrubBlobstore {
impl ScrubBlobstore {
pub fn new(
repo_id: RepositoryId,
blobstores: Vec<(BlobstoreId, Arc<Blobstore>)>,
queue: Arc<BlobstoreSyncQueue>,
blobstores: Vec<(BlobstoreId, Arc<dyn Blobstore>)>,
queue: Arc<dyn BlobstoreSyncQueue>,
scuba_logger: Option<Arc<ScubaClient>>,
) -> Self {
let inner = MultiplexedBlobstore::new(repo_id, blobstores, queue, scuba_logger);
Expand Down
2 changes: 1 addition & 1 deletion blobstore_sync_queue/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ pub trait BlobstoreSyncQueue: Send + Sync {
) -> BoxFuture<Vec<BlobstoreSyncQueueEntry>, Error>;
}

impl BlobstoreSyncQueue for Arc<BlobstoreSyncQueue> {
impl BlobstoreSyncQueue for Arc<dyn BlobstoreSyncQueue> {
fn add(&self, ctx: CoreContext, entry: BlobstoreSyncQueueEntry) -> BoxFuture<(), Error> {
(**self).add(ctx, entry)
}
Expand Down
2 changes: 1 addition & 1 deletion bookmarks/dbbookmarks/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,7 @@ impl Bookmarks for SqlBookmarks {
.boxify()
}

fn create_transaction(&self, _ctx: CoreContext, repoid: RepositoryId) -> Box<Transaction> {
fn create_transaction(&self, _ctx: CoreContext, repoid: RepositoryId) -> Box<dyn Transaction> {
Box::new(SqlBookmarksTransaction::new(
self.write_connection.clone(),
repoid.clone(),
Expand Down
2 changes: 1 addition & 1 deletion bookmarks/src/cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -502,7 +502,7 @@ mod tests {
bookmarks: &T,
ctx: CoreContext,
repoid: RepositoryId,
) -> Box<Transaction> {
) -> Box<dyn Transaction> {
let mut transaction = bookmarks.create_transaction(ctx.clone(), repoid);

// Dirty the transaction.
Expand Down
2 changes: 1 addition & 1 deletion bundle2_resolver/src/changegroup/filelog.rs
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,7 @@ impl Arbitrary for Filelog {
}
}

fn shrink(&self) -> Box<Iterator<Item = Self>> {
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
fn append(result: &mut Vec<Filelog>, f: Filelog) {
result.append(&mut f.shrink().collect());
result.push(f);
Expand Down
8 changes: 4 additions & 4 deletions bundle2_resolver/src/getbundle_response.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ pub fn create_getbundle_response(
blobrepo: BlobRepo,
common: Vec<HgChangesetId>,
heads: Vec<HgChangesetId>,
lca_hint: Arc<LeastCommonAncestorsHint>,
phases_hint: Option<Arc<Phases>>,
lca_hint: Arc<dyn LeastCommonAncestorsHint>,
phases_hint: Option<Arc<dyn Phases>>,
) -> Result<Vec<PartEncodeBuilder>> {
if common.is_empty() {
return Err(err_msg("no 'common' heads specified. Pull will be very inefficient. Please use hg clone instead"));
Expand Down Expand Up @@ -180,7 +180,7 @@ fn prepare_phases_stream(
ctx: CoreContext,
repo: BlobRepo,
heads: Vec<HgChangesetId>,
phases: Arc<Phases>,
phases: Arc<dyn Phases>,
) -> impl Stream<Item = (HgChangesetId, HgPhase), Error = Error> {
// create 'bonsai changesetid' => 'hg changesetid' hash map that will be later used
// heads that are not known by the server will be skipped
Expand Down Expand Up @@ -240,7 +240,7 @@ fn calculate_public_roots(
ctx: CoreContext,
repo: BlobRepo,
drafts: HashSet<ChangesetId>,
phases: Arc<Phases>,
phases: Arc<dyn Phases>,
) -> impl Future<Item = HashSet<ChangesetId>, Error = Error> {
future::loop_fn(
(drafts, HashSet::new(), HashSet::new()),
Expand Down
4 changes: 2 additions & 2 deletions bundle2_resolver/src/resolver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -685,7 +685,7 @@ struct Bundle2Resolver {
bookmark_attrs: BookmarkAttrs,
infinitepush_params: InfinitepushParams,
hook_manager: Arc<HookManager>,
scribe_commit_queue: Arc<ScribeCommitQueue>,
scribe_commit_queue: Arc<dyn ScribeCommitQueue>,
}

impl Bundle2Resolver {
Expand Down Expand Up @@ -1734,7 +1734,7 @@ fn filter_or_check_infinitepush_allowed(
}

fn add_bookmark_to_transaction(
txn: &mut Box<Transaction>,
txn: &mut Box<dyn Transaction>,
bookmark_push: BookmarkPush<ChangesetId>,
reason: BookmarkUpdateReason,
) -> Result<()> {
Expand Down
9 changes: 6 additions & 3 deletions changesets/src/caching.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ pub fn get_cache_key(repo_id: RepositoryId, cs_id: &ChangesetId) -> String {
}

pub struct CachingChangesets {
changesets: Arc<Changesets>,
changesets: Arc<dyn Changesets>,
cachelib: CachelibHandler<ChangesetEntry>,
memcache: MemcacheHandler,
keygen: KeyGen,
Expand All @@ -58,7 +58,10 @@ fn get_keygen() -> KeyGen {
}

impl CachingChangesets {
pub fn new(changesets: Arc<Changesets>, cache_pool: cachelib::VolatileLruCachePool) -> Self {
pub fn new(
changesets: Arc<dyn Changesets>,
cache_pool: cachelib::VolatileLruCachePool,
) -> Self {
Self {
changesets,
cachelib: cache_pool.into(),
Expand All @@ -68,7 +71,7 @@ impl CachingChangesets {
}

#[cfg(test)]
pub fn mocked(changesets: Arc<Changesets>) -> Self {
pub fn mocked(changesets: Arc<dyn Changesets>) -> Self {
let cachelib = CachelibHandler::create_mock();
let memcache = MemcacheHandler::create_mock();

Expand Down
2 changes: 1 addition & 1 deletion changesets/src/wrappers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ use mononoke_types::{ChangesetId, RepositoryId};
use crate::errors::*;
use crate::{ChangesetEntry, ChangesetInsert, Changesets};

impl Changesets for Arc<Changesets> {
impl Changesets for Arc<dyn Changesets> {
fn add(&self, ctx: CoreContext, cs: ChangesetInsert) -> BoxFuture<bool, Error> {
(**self).add(ctx, cs)
}
Expand Down
Loading

0 comments on commit fed2ac8

Please sign in to comment.