Skip to content

Commit

Permalink
[src] Fix && and || uses where & and | intended, and other weird erro…
Browse files Browse the repository at this point in the history
…rs (#3087)
  • Loading branch information
kkm000 authored and danpovey committed Mar 11, 2019
1 parent bcfe3f8 commit 1209c07
Show file tree
Hide file tree
Showing 15 changed files with 63 additions and 63 deletions.
6 changes: 3 additions & 3 deletions src/bin/compute-wer-bootci.cc
Expand Up @@ -162,7 +162,7 @@ int main(int argc, char *argv[]) {

try {
const char *usage =
"Compute a bootstrapping of WER to extract the 95\% confidence interval.\n"
"Compute a bootstrapping of WER to extract the 95% confidence interval.\n"
"Take a reference and a transcription file, in integer or text format,\n"
"and outputs overall WER statistics to standard output along with its\n"
"confidence interval using the bootstrap method of Bisani and Ney.\n"
Expand Down Expand Up @@ -234,12 +234,12 @@ int main(int argc, char *argv[]) {
std::cout.precision(2);
std::cerr.precision(2);
std::cout << "Set1: %WER " << std::fixed << 100*mean_wer <<
" 95\% Conf Interval [ " << 100*mean_wer-100*interval <<
" 95% Conf Interval [ " << 100*mean_wer-100*interval <<
", " << 100*mean_wer+100*interval << " ]" << '\n';

if(!hyp2_rspecifier.empty()) {
std::cout << "Set2: %WER " << std::fixed << 100*mean_wer2 <<
" 95\% Conf Interval [ " << 100*mean_wer2-100*interval2 <<
" 95% Conf Interval [ " << 100*mean_wer2-100*interval2 <<
", " << 100*mean_wer2+100*interval2 << " ]" << '\n';

std::cout << "Probability of Set2 improving Set1: " << std::fixed <<
Expand Down
4 changes: 2 additions & 2 deletions src/fstext/determinize-lattice-inl.h
Expand Up @@ -510,7 +510,7 @@ template<class Weight, class IntType> class LatticeDeterminizer {
if (!CheckMemoryUsage()) return false;
}
return (determinized_ = true);
} catch (std::bad_alloc) {
} catch (const std::bad_alloc &) {
int32 repo_size = repository_.MemSize(),
arcs_size = num_arcs_ * sizeof(TempArc),
elems_size = num_elems_ * sizeof(Element),
Expand All @@ -520,7 +520,7 @@ template<class Weight, class IntType> class LatticeDeterminizer {
<< " (repo,arcs,elems) = ("
<< repo_size << "," << arcs_size << "," << elems_size << ")";
return (determinized_ = false);
} catch (std::runtime_error) {
} catch (const std::runtime_error &) {
KALDI_WARN << "Caught exception doing lattice determinization";
return (determinized_ = false);
}
Expand Down
3 changes: 1 addition & 2 deletions src/fstext/lattice-weight.h
Expand Up @@ -179,8 +179,7 @@ class LatticeWeightTpl {
} else if (s == "-Infinity") {
f = -numeric_limits<T>::infinity();
} else if (s == "BadNumber") {
f = numeric_limits<T>::infinity();
f -= f; // get NaN
f = numeric_limits<T>::quiet_NaN();
} else {
char *p;
f = strtod(s.c_str(), &p);
Expand Down
12 changes: 6 additions & 6 deletions src/gmm/mle-diag-gmm.h
Expand Up @@ -85,7 +85,7 @@ struct MapDiagGmmOptions {
/// Tau value for the weights-- this tau value is applied
/// per state, not per Gaussian.
BaseFloat weight_tau;

MapDiagGmmOptions(): mean_tau(10.0),
variance_tau(50.0),
weight_tau(10.0) { }
Expand Down Expand Up @@ -150,8 +150,8 @@ class AccumDiagGmm {
const MatrixBase<BaseFloat> &data,
const VectorBase<BaseFloat> &frame_weights,
int32 num_threads);


/// Increment the stats for this component by the specified amount
/// (not all parts may be taken, depending on flags).
/// Note: x_stats and x2_stats are assumed to already be multiplied by "occ"
Expand All @@ -162,7 +162,7 @@ class AccumDiagGmm {

/// Increment with stats from this other accumulator (times scale)
void Add(double scale, const AccumDiagGmm &acc);

/// Smooths the accumulated counts by adding 'tau' extra frames. An example
/// use for this is I-smoothing for MMIE. Calls SmoothWithAccum.
void SmoothStats(BaseFloat tau);
Expand All @@ -179,13 +179,13 @@ class AccumDiagGmm {
void SmoothWithModel(BaseFloat tau, const DiagGmm &src_gmm);

// Const accessors
const GmmFlagsType Flags() const { return flags_; }
GmmFlagsType Flags() const { return flags_; }
const VectorBase<double> &occupancy() const { return occupancy_; }
const MatrixBase<double> &mean_accumulator() const { return mean_accumulator_; }
const MatrixBase<double> &variance_accumulator() const { return variance_accumulator_; }

// used in testing.
void AssertEqual(const AccumDiagGmm &other);
void AssertEqual(const AccumDiagGmm &other);
private:
int32 dim_;
int32 num_comp_;
Expand Down
8 changes: 4 additions & 4 deletions src/gmm/mle-full-gmm.h
@@ -1,7 +1,7 @@
// gmm/mle-full-gmm.h

// Copyright 2009-2011 Jan Silovsky; Saarland University;
// Microsoft Corporation;
// Microsoft Corporation;
// Univ. Erlangen Nuremberg, Korbinian Riedhammer

// See ../../COPYING for clarification regarding multiple authors
Expand Down Expand Up @@ -91,7 +91,7 @@ class AccumFullGmm {
void Resize(int32 num_components, int32 dim, GmmFlagsType flags);
/// Calls Resize with arguments based on gmm_ptr_
void Resize(const FullGmm &gmm, GmmFlagsType flags);

void ResizeVarAccumulator(int32 num_comp, int32 dim);
/// Returns the number of mixture components
int32 NumGauss() const { return num_comp_; }
Expand Down Expand Up @@ -122,8 +122,8 @@ class AccumFullGmm {
const VectorBase<BaseFloat> &data,
BaseFloat frame_posterior);

/// Accessors
const GmmFlagsType Flags() const { return flags_; }
/// Accessors
GmmFlagsType Flags() const { return flags_; }
const Vector<double> &occupancy() const { return occupancy_; }
const Matrix<double> &mean_accumulator() const { return mean_accumulator_; }
const std::vector<SpMatrix<double> > &covariance_accumulator() const { return covariance_accumulator_; }
Expand Down
2 changes: 1 addition & 1 deletion src/nnet2/combine-nnet-fast.cc
Expand Up @@ -204,7 +204,7 @@ void FastNnetCombiner::CombineNnets(const Vector<double> &scale_params,
int32 num_nnets = nnets.size();
KALDI_ASSERT(num_nnets >= 1);
int32 num_uc = nnets[0].NumUpdatableComponents();
KALDI_ASSERT(num_nnets * nnets[0].NumUpdatableComponents());
KALDI_ASSERT(nnets[0].NumUpdatableComponents() >= 1);


*dest = nnets[0];
Expand Down
48 changes: 24 additions & 24 deletions src/nnet2/combine-nnet.cc
Expand Up @@ -31,9 +31,9 @@ static void CombineNnets(const Vector<BaseFloat> &scale_params,
int32 num_nnets = nnets.size();
KALDI_ASSERT(num_nnets >= 1);
int32 num_uc = nnets[0].NumUpdatableComponents();
KALDI_ASSERT(num_nnets * nnets[0].NumUpdatableComponents());
KALDI_ASSERT(nnets[0].NumUpdatableComponents() >= 1);


*dest = nnets[0];
SubVector<BaseFloat> scale_params0(scale_params, 0, num_uc);
dest->ScaleComponents(scale_params0);
Expand All @@ -59,7 +59,7 @@ static int32 GetInitialModel(
for (int32 n = 0; n < num_nnets; n++) {
BaseFloat objf = ComputeNnetObjf(nnets[n], validation_set,
minibatch_size) / tot_frames;

if (n == 0 || objf > best_objf) {
best_objf = objf;
best_n = n;
Expand Down Expand Up @@ -98,7 +98,7 @@ static void GetInitialScaleParams(
num_nnets = static_cast<int32>(nnets.size());
if (initial_model < 0 || initial_model > num_nnets)
initial_model = GetInitialModel(validation_set, nnets);

KALDI_ASSERT(initial_model >= 0 && initial_model <= num_nnets);
int32 num_uc = nnets[0].NumUpdatableComponents();

Expand All @@ -107,7 +107,7 @@ static void GetInitialScaleParams(
KALDI_LOG << "Initializing with neural net with index " << initial_model;
// At this point we're using the best of the individual neural nets.
scale_params->Set(0.0);

// Set the block of parameters corresponding to the "best" of the
// source neural nets to
SubVector<double> best_block(*scale_params, num_uc * initial_model, num_uc);
Expand All @@ -129,14 +129,14 @@ static double ComputeObjfAndGradient(
Vector<double> *gradient) {

Vector<BaseFloat> scale_params_float(scale_params);

Nnet nnet_combined;
CombineNnets(scale_params_float, nnets, &nnet_combined);

Nnet nnet_gradient(nnet_combined);
bool is_gradient = true;
nnet_gradient.SetZero(is_gradient);

// note: "ans" is normalized by the total weight of validation frames.
int32 batch_size = 1024;
double ans = ComputeNnetGradient(nnet_combined,
Expand All @@ -146,7 +146,7 @@ static double ComputeObjfAndGradient(

double tot_frames = validation_set.size();
if (gradient != NULL) {
int32 i = 0; // index into scale_params.
int32 i = 0; // index into scale_params.
for (int32 n = 0; n < static_cast<int32>(nnets.size()); n++) {
for (int32 j = 0; j < nnet_combined.NumComponents(); j++) {
const UpdatableComponent *uc =
Expand All @@ -155,7 +155,7 @@ static double ComputeObjfAndGradient(
dynamic_cast<const UpdatableComponent*>(&(nnet_gradient.GetComponent(j)));
if (uc != NULL) {
double dotprod = uc->DotProduct(*uc_gradient) / tot_frames;
(*gradient)(i) = dotprod;
(*gradient)(i) = dotprod;
i++;
}
}
Expand All @@ -165,14 +165,14 @@ static double ComputeObjfAndGradient(

if (debug) {
KALDI_LOG << "Double-checking gradient computation";

Vector<BaseFloat> manual_gradient(scale_params.Dim());
for (int32 i = 0; i < scale_params.Dim(); i++) {
double delta = 1.0e-04, fg = fabs((*gradient)(i));
if (fg < 1.0e-07) fg = 1.0e-07;
if (fg * delta < 1.0e-05)
delta = 1.0e-05 / fg;

Vector<double> scale_params_temp(scale_params);
scale_params_temp(i) += delta;
double new_ans = ComputeObjfAndGradient(validation_set,
Expand All @@ -185,10 +185,10 @@ static double ComputeObjfAndGradient(
KALDI_LOG << "Manually computed gradient is " << manual_gradient;
KALDI_LOG << "Gradient we computed is " << *gradient;
}

return ans;
}


void CombineNnets(const NnetCombineConfig &combine_config,
const std::vector<NnetExample> &validation_set,
Expand All @@ -205,19 +205,19 @@ void CombineNnets(const NnetCombineConfig &combine_config,
int32 dim = scale_params.Dim();
KALDI_ASSERT(dim > 0);
Vector<double> gradient(dim);

double objf, initial_objf;

LbfgsOptions lbfgs_options;
lbfgs_options.minimize = false; // We're maximizing.
lbfgs_options.m = dim; // Store the same number of vectors as the dimension
// itself, so this is BFGS.
lbfgs_options.first_step_impr = combine_config.initial_impr;

OptimizeLbfgs<double> lbfgs(scale_params,
lbfgs_options);
for (int32 i = 0; i < combine_config.num_bfgs_iters; i++) {

for (int32 i = 0; i < combine_config.num_bfgs_iters; i++) {
scale_params.CopyFromVec(lbfgs.GetProposedValue());
objf = ComputeObjfAndGradient(validation_set,
scale_params,
Expand All @@ -227,9 +227,9 @@ void CombineNnets(const NnetCombineConfig &combine_config,

KALDI_VLOG(2) << "Iteration " << i << " scale-params = " << scale_params
<< ", objf = " << objf << ", gradient = " << gradient;

if (i == 0) initial_objf = objf;

lbfgs.DoStep(objf, gradient);
}

Expand All @@ -244,10 +244,10 @@ void CombineNnets(const NnetCombineConfig &combine_config,
nnets[0].NumUpdatableComponents());
scale_params_mat.CopyRowsFromVec(scale_params_float);
KALDI_LOG << "Final scale factors are " << scale_params_mat;

CombineNnets(scale_params_float, nnets, nnet_out);
}


} // namespace nnet2
} // namespace kaldi
2 changes: 1 addition & 1 deletion src/nnet3/nnet-analyze.cc
Expand Up @@ -880,7 +880,7 @@ void ComputationChecker::CheckComputationIndexes() const {
KALDI_ERR << "Backprop input needed but not supplied.";
if ((properties & kBackpropNeedsOutput) && c.arg4 == 0)
KALDI_ERR << "Backprop output needed but not supplied.";
if (c.arg6 == 0 && !(properties && kUpdatableComponent)) {
if (c.arg6 == 0 && !(properties & kUpdatableComponent)) {
// note: we could perhaps make this just a warning,
// or optimize it away somehow.
KALDI_ERR << "Backprop is done but has no effect.";
Expand Down
4 changes: 2 additions & 2 deletions src/nnet3/nnet-chain-training.cc
Expand Up @@ -298,7 +298,7 @@ void NnetChainTrainer::PrintMaxChangeStats() const {
(num_minibatches_processed_ *
(nnet_config.backstitch_training_scale == 0.0 ? 1.0 :
1.0 + 1.0 / nnet_config.backstitch_training_interval))
<< " \% of the time.";
<< " % of the time.";
i++;
}
}
Expand All @@ -308,7 +308,7 @@ void NnetChainTrainer::PrintMaxChangeStats() const {
(num_minibatches_processed_ *
(nnet_config.backstitch_training_scale == 0.0 ? 1.0 :
1.0 + 1.0 / nnet_config.backstitch_training_interval))
<< " \% of the time.";
<< " % of the time.";
}

NnetChainTrainer::~NnetChainTrainer() {
Expand Down
8 changes: 4 additions & 4 deletions src/nnet3/nnet-simple-component.cc
Expand Up @@ -4068,13 +4068,13 @@ bool CompositeComponent::IsUpdatable() const {
int32 CompositeComponent::InputDim() const {
KALDI_ASSERT(!components_.empty());
return components_.front()->InputDim();
};
}

// virtual
int32 CompositeComponent::OutputDim() const {
KALDI_ASSERT(!components_.empty());
return components_.back()->OutputDim();
};
}

// virtual
int32 CompositeComponent::Properties() const {
Expand All @@ -4096,7 +4096,7 @@ int32 CompositeComponent::Properties() const {
if (last_component_properties & kStoresStats)
ans |= kBackpropNeedsOutput;
return ans;
};
}


MatrixStrideType CompositeComponent::GetStrideType(int32 i) const {
Expand Down Expand Up @@ -4319,7 +4319,7 @@ void CompositeComponent::Backprop(const std::string &debug_info,
// optimization; other propagates might also be skippable.
int32 properties = components_[num_components - 2]->Properties(),
next_properties = components_[num_components - 1]->Properties();
if (!(properties & (kBackpropNeedsOutput || kUsesMemo)) &&
if (!(properties & (kBackpropNeedsOutput | kUsesMemo)) &&
!(next_properties & kBackpropNeedsInput)) {
num_components_to_propagate--;
}
Expand Down
4 changes: 2 additions & 2 deletions src/nnet3/nnet-training.cc
Expand Up @@ -257,7 +257,7 @@ void NnetTrainer::PrintMaxChangeStats() const {
(num_minibatches_processed_ *
(config_.backstitch_training_scale == 0.0 ? 1.0 :
1.0 + 1.0 / config_.backstitch_training_interval))
<< " \% of the time.";
<< " % of the time.";
i++;
}
}
Expand All @@ -267,7 +267,7 @@ void NnetTrainer::PrintMaxChangeStats() const {
(num_minibatches_processed_ *
(config_.backstitch_training_scale == 0.0 ? 1.0 :
1.0 + 1.0 / config_.backstitch_training_interval))
<< " \% of the time.";
<< " % of the time.";
}

void ObjectiveFunctionInfo::UpdateStats(
Expand Down
4 changes: 2 additions & 2 deletions src/rnnlm/rnnlm-core-training.cc
Expand Up @@ -302,7 +302,7 @@ void RnnlmCoreTrainer::PrintMaxChangeStats() const {
<< ", per-component max-change was enforced "
<< ((100.0 * num_max_change_per_component_applied_[i]) /
num_minibatches_processed_)
<< "\% of the time.";
<< "% of the time.";
i++;
}
}
Expand All @@ -312,7 +312,7 @@ void RnnlmCoreTrainer::PrintMaxChangeStats() const {
(num_minibatches_processed_ *
(config_.backstitch_training_scale == 0.0 ? 1.0 :
1.0 + 1.0 / config_.backstitch_training_interval))
<< "\% of the time.";
<< "% of the time.";
}

void RnnlmCoreTrainer::ProcessOutput(
Expand Down

0 comments on commit 1209c07

Please sign in to comment.