Skip to content

Commit

Permalink
bug/4 fixed for clippy
Browse files Browse the repository at this point in the history
  • Loading branch information
fulmicoton committed Oct 16, 2016
1 parent 2b7444b commit 20c089b
Show file tree
Hide file tree
Showing 18 changed files with 44 additions and 42 deletions.
2 changes: 1 addition & 1 deletion examples/simple_search.rs
Expand Up @@ -14,7 +14,7 @@ fn main() {
// Let's create a temporary directory for the
// sake of this example
if let Ok(dir) = TempDir::new("tantivy_example_dir") {
run_example(&dir.path()).unwrap();
run_example(dir.path()).unwrap();
dir.close().unwrap();
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/collector/top_collector.rs
Expand Up @@ -114,7 +114,7 @@ impl Collector for TopCollector {
// It's ok to unwrap as long as a limit of 0 is forbidden.
let limit_doc: GlobalScoredDoc = *self.heap.peek().expect("Top collector with size 0 is forbidden");
if limit_doc.score < scored_doc.score() {
let mut mut_head = self.heap.peek_mut().unwrap();
let mut mut_head = self.heap.peek_mut().expect("Top collector with size 0 is forbidden");
mut_head.score = scored_doc.score();
mut_head.doc_address = DocAddress(self.segment_id, scored_doc.doc());
}
Expand Down
2 changes: 1 addition & 1 deletion src/core/index_meta.rs
Expand Up @@ -3,7 +3,7 @@ use schema::Schema;
use core::SegmentId;


/// MetaInformation about the `Index`.
/// Meta information about the `Index`.
///
/// This object is serialized on disk in the `meta.json` file.
/// It keeps information about
Expand Down
4 changes: 2 additions & 2 deletions src/datastruct/stacker/hashmap.rs
Expand Up @@ -25,7 +25,7 @@ impl Default for BytesRef {

/// `KeyValue` is the item stored in the hash table.
/// The key is actually a `BytesRef` object stored in an external heap.
/// The value_addr also points to an address in the heap.
/// The `value_addr` also points to an address in the heap.
///
/// The key and the value are actually stored contiguously.
/// For this reason, the (start, stop) information is actually redundant
Expand All @@ -48,7 +48,7 @@ pub enum Entry {
}


/// Customized HashMap with string keys
/// Customized `HashMap` with string keys
///
/// This `HashMap` takes String as keys. Keys are
/// stored in a user defined heap.
Expand Down
8 changes: 4 additions & 4 deletions src/fastfield/mod.rs
@@ -1,11 +1,11 @@
/// FastField module
/// Fast field module
///
/// FastField are the equivalent of `DocValues` in `Lucene`.
/// FastFields are stored in column-oriented fashion and allow fast
/// Fast fields are the equivalent of `DocValues` in `Lucene`.
/// Fast fields are stored in column-oriented fashion and allow fast
/// random access given a `DocId`.
///
/// Their performance is comparable to that of an array lookup.
/// FastField are useful when a field is required for all or most of
/// They are useful when a field is required for all or most of
/// the `DocSet` : for instance for scoring, grouping, filtering, or facetting.
///
/// Currently only u32 fastfield are supported.
Expand Down
2 changes: 1 addition & 1 deletion src/fastfield/serializer.rs
Expand Up @@ -9,7 +9,7 @@ use super::compute_num_bits;
/// `FastFieldSerializer` is in charge of serializing
/// fastfields on disk.
///
/// FastField are encoded using bit-packing.
/// Fast fields are encoded using bit-packing.
///
/// `FastFieldWriter`s are in charge of pushing the data to
/// the serializer.
Expand Down
2 changes: 1 addition & 1 deletion src/indexer/segment_manager.rs
Expand Up @@ -53,7 +53,7 @@ impl Debug for SegmentManager {
}


/// Returns the segment_metas for (committed segment, uncommitted segments).
/// Returns the `SegmentMeta`s for (committed segment, uncommitted segments).
/// The result is consistent with other transactions.
///
/// For instance, a segment will not appear in both committed and uncommitted
Expand Down
11 changes: 5 additions & 6 deletions src/indexer/segment_register.rs
Expand Up @@ -52,7 +52,7 @@ pub struct SegmentRegister {
impl Debug for SegmentRegister {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
try!(write!(f, "SegmentRegister("));
for (ref k, ref v) in &self.segment_states {
for (k, v) in &self.segment_states {
try!(write!(f, "{}:{}, ", k.short_uuid_string(), v.state.letter_code()));
}
try!(write!(f, ")"));
Expand Down Expand Up @@ -91,11 +91,10 @@ impl SegmentRegister {
}

pub fn segment_ids(&self,) -> Vec<SegmentId> {
let segment_ids: Vec<SegmentId> = self.segment_metas()
self.segment_metas()
.into_iter()
.map(|segment_meta| segment_meta.segment_id)
.collect();
segment_ids
.collect()
}

#[cfg(test)]
Expand Down Expand Up @@ -134,7 +133,7 @@ impl SegmentRegister {

pub fn start_merge(&mut self, segment_id: &SegmentId) {
self.segment_states
.get_mut(&segment_id)
.get_mut(segment_id)
.expect("Received a merge notification for a segment that is not registered")
.start_merge();
}
Expand All @@ -147,7 +146,7 @@ impl From<Vec<SegmentMeta>> for SegmentRegister {
fn from(segment_metas: Vec<SegmentMeta>) -> SegmentRegister {
let mut segment_states = HashMap::new();
for segment_meta in segment_metas {
let segment_id = segment_meta.segment_id.clone();
let segment_id = segment_meta.segment_id;
let segment_entry = SegmentEntry {
meta: segment_meta,
state: SegmentState::Ready,
Expand Down
11 changes: 9 additions & 2 deletions src/indexer/segment_updater.rs
@@ -1,3 +1,5 @@
#![allow(for_kv_map)]

use chan;
use core::Index;
use core::Segment;
Expand Down Expand Up @@ -67,8 +69,13 @@ fn end_merge(
}


/// The segment updater is in charge of
/// receiving different SegmentUpdate
/// The segment updater is in charge of processing all of the
/// `SegmentUpdate`s.
///
/// All this processing happens on a single thread
/// consuming a common queue.
///
/// The segment updates producers are :
/// - indexing threads are sending new segments
/// - merging threads are sending merge operations
/// - the index writer sends "terminate"
Expand Down
2 changes: 1 addition & 1 deletion src/indexer/segment_writer.rs
Expand Up @@ -20,7 +20,7 @@ use indexer::segment_serializer::SegmentSerializer;
use datastruct::stacker::Heap;
use indexer::index_writer::MARGIN_IN_BYTES;

/// A SegmentWriter is the object in charge of creating segment index from a
/// A `SegmentWriter` is in charge of creating segment index from a
/// documents.
///
/// They creates the postings list in anonymous memory.
Expand Down
11 changes: 4 additions & 7 deletions src/postings/freq_handler.rs
@@ -1,13 +1,12 @@
use compression::SIMDBlockDecoder;
use std::io::Cursor;
use common::VInt;
use common::BinarySerializable;
use compression::CompositeDecoder;
use postings::SegmentPostingsOption;
use compression::NUM_DOCS_PER_BLOCK;


/// The FreqHandler object is in charge of decompressing
/// `FreqHandler` is in charge of decompressing
/// frequencies and/or positions.
pub struct FreqHandler {
freq_decoder: SIMDBlockDecoder,
Expand All @@ -19,11 +18,9 @@ pub struct FreqHandler {

fn read_positions(data: &[u8]) -> Vec<u32> {
let mut composite_reader = CompositeDecoder::new();
let mut cursor = Cursor::new(data);
// TODO error
let uncompressed_len = VInt::deserialize(&mut cursor).unwrap().0 as usize;
let offset_data = &data[cursor.position() as usize..];
composite_reader.uncompress_unsorted(offset_data, uncompressed_len);
let mut readable: &[u8] = data;
let uncompressed_len = VInt::deserialize(&mut readable).unwrap().0 as usize;
composite_reader.uncompress_unsorted(readable, uncompressed_len);
composite_reader.into()
}

Expand Down
4 changes: 2 additions & 2 deletions src/postings/intersection.rs
Expand Up @@ -2,9 +2,9 @@ use postings::DocSet;
use std::cmp::Ordering;
use DocId;

// TODO Find a way to specialize IntersectionDocSet
// TODO Find a way to specialize `IntersectionDocSet`

/// Creates a DocSet that iterator through the intersection of two `DocSet`s.
/// Creates a `DocSet` that iterator through the intersection of two `DocSet`s.
pub struct IntersectionDocSet<'a> {
left: Box<DocSet + 'a>,
right: Box<DocSet + 'a>,
Expand Down
4 changes: 2 additions & 2 deletions src/postings/postings.rs
Expand Up @@ -12,8 +12,8 @@ use common::HasLen;
/// as well as the list of term positions.
///
/// Its main implementation is `SegmentPostings`,
/// but other implementations mocking SegmentPostings exist,
/// in order to help when merging segments or for testing.
/// but other implementations mocking `SegmentPostings` exist,
/// for merging segments or for testing.
pub trait Postings: DocSet {
/// Returns the term frequency
fn term_freq(&self,) -> u32;
Expand Down
2 changes: 1 addition & 1 deletion src/postings/postings_writer.rs
Expand Up @@ -54,7 +54,7 @@ pub trait PostingsWriter {
}
}

/// The SpecializedPostingsWriter is just here to remove dynamic
/// The `SpecializedPostingsWriter` is just here to remove dynamic
/// dispatch to the recorder information.
pub struct SpecializedPostingsWriter<'a, Rec: Recorder + 'static> {
term_index: HashMap<'a, Rec>,
Expand Down
4 changes: 2 additions & 2 deletions src/postings/term_info.rs
Expand Up @@ -6,10 +6,10 @@ use std::io;
/// associated to terms in the `.term` file.
///
/// It consists of
/// * doc_freq : the number of document in the segment
/// * `doc_freq` : the number of document in the segment
/// containing this term. It is also the length of the
/// posting list associated to this term
/// * postings_offset: an offset in the `.idx` file
/// * `postings_offset` : an offset in the `.idx` file
/// addressing the start of the posting list associated
/// to this term.
#[derive(Debug,Ord,PartialOrd,Eq,PartialEq,Clone)]
Expand Down
9 changes: 4 additions & 5 deletions src/query/daat_multiterm_scorer.rs
Expand Up @@ -13,17 +13,16 @@ use Score;
/// Each `HeapItem` represents the head of
/// a segment postings being merged.
///
/// Heap(doc_id, segment_ordinal)
/// * doc_id - is the current doc id for the given segment postings
/// * segment_ordinal - is the ordinal used to identify to which segment postings
/// * `doc` - is the current doc id for the given segment postings
/// * `ord` - is the ordinal used to identify to which segment postings
/// this heap item belong to.
#[derive(Eq, PartialEq)]
struct HeapItem {
doc: DocId,
ord: u32,
}

/// HeapItem are ordered by the document
/// `HeapItem` are ordered by the document
impl PartialOrd for HeapItem {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
Expand Down Expand Up @@ -185,7 +184,7 @@ impl<TPostings: Postings, TAccumulator: MultiTermAccumulator> DocSet for DAATMul
self.similarity.clear();
let mut ord_bitset = 0u64;
match self.queue.peek() {
Some(ref heap_item) => {
Some(heap_item) => {
self.doc = heap_item.doc;
let ord: usize = heap_item.ord as usize;
let fieldnorm = self.get_field_norm(ord, heap_item.doc);
Expand Down
4 changes: 2 additions & 2 deletions src/query/tfidf.rs
Expand Up @@ -4,9 +4,9 @@ use super::Explanation;
use super::Similarity;


/// TfIdf is the default pertinence score in tantivy.
/// `TfIdf` is the default pertinence score in tantivy.
///
/// See [TfIdf in the global documentation](https://fulmicoton.gitbooks.io/tantivy-doc/content/tfidf.html)
/// See [Tf-Idf in the global documentation](https://fulmicoton.gitbooks.io/tantivy-doc/content/tfidf.html)
#[derive(Clone)]
pub struct TfIdf {
coords: Vec<f32>,
Expand Down
2 changes: 1 addition & 1 deletion src/schema/named_field_document.rs
Expand Up @@ -8,7 +8,7 @@ use rustc_serialize::Encoder;
/// Internal representation of a document used for JSON
/// serialization.
///
/// A NamedFieldDocument is a simple representation of a document
/// A `NamedFieldDocument` is a simple representation of a document
/// as a `BTreeMap<String, Vec<Value>>`.
///
pub struct NamedFieldDocument(pub BTreeMap<String, Vec<Value>>);
Expand Down

0 comments on commit 20c089b

Please sign in to comment.