Skip to content

Commit

Permalink
add explicit type conversions to address MSVC C4267,C4244 warnings
Browse files Browse the repository at this point in the history
  • Loading branch information
micooke committed Nov 21, 2019
1 parent 8f00312 commit 7770a0e
Show file tree
Hide file tree
Showing 8 changed files with 20 additions and 20 deletions.
8 changes: 4 additions & 4 deletions bindings/eml_audio.cpp
Expand Up @@ -22,7 +22,7 @@ rfft_py(py::array_t<float, py::array::c_style | py::array::forcecast> in) {
throw std::runtime_error("SFT input must have dimensions 1"); throw std::runtime_error("SFT input must have dimensions 1");
} }


const int n_fft = in.shape(0); const int n_fft = (int)in.shape(0);


// Precompute FFT table // Precompute FFT table
const int n_fft_table = n_fft/2; const int n_fft_table = n_fft/2;
Expand Down Expand Up @@ -72,7 +72,7 @@ melfilter_py(py::array_t<float, py::array::c_style | py::array::forcecast> in,
const EmlAudioMel params = { n_mels, fmin, fmax, n_fft, samplerate }; const EmlAudioMel params = { n_mels, fmin, fmax, n_fft, samplerate };


// Copy input to avoid modifying // Copy input to avoid modifying
const int length = in.shape(0); const int length = (int)in.shape(0);
EmlVector inv = {(float *)in.data(), length}; EmlVector inv = {(float *)in.data(), length};
// Prepare output // Prepare output
auto ret = py::array_t<float>(params.n_mels); auto ret = py::array_t<float>(params.n_mels);
Expand Down Expand Up @@ -110,7 +110,7 @@ melspectrogram_py(py::array_t<float, py::array::c_style | py::array::forcecast>
const EmlAudioMel params = { n_mels, fmin, fmax, n_fft, samplerate }; const EmlAudioMel params = { n_mels, fmin, fmax, n_fft, samplerate };


// Copy input to avoid modifying // Copy input to avoid modifying
const int length = in.shape(0); const int length = (int)in.shape(0);
std::vector<float> inout(length); std::vector<float> inout(length);
std::vector<float> temp(length); std::vector<float> temp(length);
EmlVector inoutv = { (float *)inout.data(), length }; EmlVector inoutv = { (float *)inout.data(), length };
Expand Down Expand Up @@ -146,7 +146,7 @@ sparse_filterbank_py(py::array_t<float, py::array::c_style | py::array::forcecas
EMLPY_PRECONDITION(coeffs.ndim() == 1, "Coefficients must have dim 1"); EMLPY_PRECONDITION(coeffs.ndim() == 1, "Coefficients must have dim 1");
EMLPY_PRECONDITION(starts.shape(0) == stops.shape(0), "Number of starts must equals stops"); EMLPY_PRECONDITION(starts.shape(0) == stops.shape(0), "Number of starts must equals stops");


const int output_length = starts.shape(0); const int output_length = (int)starts.shape(0);
auto ret = py::array_t<float>(output_length); auto ret = py::array_t<float>(output_length);


EMLPY_CHECK_ERROR(eml_sparse_filterbank(in.data(), EMLPY_CHECK_ERROR(eml_sparse_filterbank(in.data(),
Expand Down
2 changes: 1 addition & 1 deletion bindings/eml_bayes.cpp
Expand Up @@ -50,7 +50,7 @@ class Classifier {
} }


const int64_t n_samples = in.shape()[0]; const int64_t n_samples = in.shape()[0];
const int32_t n_features = in.shape()[1]; const int32_t n_features = (int32_t)in.shape()[1];


auto classes = py::array_t<int32_t>(n_samples); auto classes = py::array_t<int32_t>(n_samples);
auto r = classes.mutable_unchecked<1>(); auto r = classes.mutable_unchecked<1>();
Expand Down
10 changes: 5 additions & 5 deletions bindings/eml_net.cpp
Expand Up @@ -52,7 +52,7 @@ class EmlNetClassifier {
throw std::runtime_error("Must have at least 3 layers total (1 hidden)"); throw std::runtime_error("Must have at least 3 layers total (1 hidden)");
} }


model.n_layers = weights.size(); model.n_layers = (int32_t)weights.size();
layers = std::vector<EmlNetLayer>(model.n_layers); layers = std::vector<EmlNetLayer>(model.n_layers);
model.layers = layers.data(); model.layers = layers.data();


Expand All @@ -62,8 +62,8 @@ class EmlNetClassifier {
throw std::runtime_error("Unsupported activation function: " + activations[i]); throw std::runtime_error("Unsupported activation function: " + activations[i]);
} }


layers[i].n_inputs = weights[i].shape(0); layers[i].n_inputs = (int32_t)weights[i].shape(0);
layers[i].n_outputs = weights[i].shape(1); layers[i].n_outputs = (int32_t)weights[i].shape(1);
layers[i].activation = (EmlNetActivationFunction)a; layers[i].activation = (EmlNetActivationFunction)a;
layers[i].weights = (float *)weights[i].data(); layers[i].weights = (float *)weights[i].data();
layers[i].biases = (float *)biases[i].data(); layers[i].biases = (float *)biases[i].data();
Expand All @@ -89,7 +89,7 @@ class EmlNetClassifier {
} }


const int64_t n_samples = in.shape()[0]; const int64_t n_samples = in.shape()[0];
const int32_t n_features = in.shape()[1]; const int32_t n_features = (int32_t)in.shape()[1];


auto classes = py::array_t<int32_t>(n_samples); auto classes = py::array_t<int32_t>(n_samples);
//auto s = in.unchecked(); //auto s = in.unchecked();
Expand All @@ -114,7 +114,7 @@ class EmlNetClassifier {
} }


const int64_t n_samples = in.shape()[0]; const int64_t n_samples = in.shape()[0];
const int32_t n_features = in.shape()[1]; const int32_t n_features = (int32_t)in.shape()[1];
const int32_t n_outputs = eml_net_outputs_proba(&model); const int32_t n_outputs = eml_net_outputs_proba(&model);


const auto out_shape = std::vector<int64_t>{n_samples, n_outputs}; const auto out_shape = std::vector<int64_t>{n_samples, n_outputs};
Expand Down
2 changes: 1 addition & 1 deletion bindings/eml_signal.cpp
Expand Up @@ -23,7 +23,7 @@ iirfilter_py(py::array_t<float, py::array::c_style | py::array::forcecast> sos,
EMLPY_PRECONDITION(sos.ndim() == 2, "SOS coefficients must have dimensions 2"); EMLPY_PRECONDITION(sos.ndim() == 2, "SOS coefficients must have dimensions 2");
EMLPY_PRECONDITION(data.ndim() == 1, "data must have dimensions 1"); EMLPY_PRECONDITION(data.ndim() == 1, "data must have dimensions 1");


const int n_stages = sos.shape(0); const int n_stages = (int)sos.shape(0);


// Setup cascade // Setup cascade
std::vector<float> coefficients(sos.data(), sos.data() + 6*n_stages); std::vector<float> coefficients(sos.data(), sos.data() + 6*n_stages);
Expand Down
8 changes: 4 additions & 4 deletions emlearn/eml_audio.h
Expand Up @@ -63,7 +63,7 @@ eml_audio_power_spectrogram(EmlVector rfft, EmlVector out, int n_fft) {


const float scale = 1.0f/n_fft; const float scale = 1.0f/n_fft;
for (int i=0; i<spec_length; i++) { for (int i=0; i<spec_length; i++) {
const float a = fabs(rfft.data[i]); const float a = (float)fabs(rfft.data[i]);
out.data[i] = scale * powf(a, 2); out.data[i] = scale * powf(a, 2);
} }
return EmlOk; return EmlOk;
Expand All @@ -73,11 +73,11 @@ eml_audio_power_spectrogram(EmlVector rfft, EmlVector out, int n_fft) {
// in librosa have to use htk=True to match // in librosa have to use htk=True to match
float float
eml_audio_mels_from_hz(float hz) { eml_audio_mels_from_hz(float hz) {
return 2595.0 * log10(1.0 + (hz / 700.0)); return (float)(2595.0 * log10(1.0 + (hz / 700.0)));
} }
float float
eml_audio_mels_to_hz(float mels) { eml_audio_mels_to_hz(float mels) {
return 700.0 * (powf(10.0, mels/2595.0) - 1.0); return (float)(700.0 * (powf(10.0, (float)(mels/2595.0)) - 1.0));
} }




Expand All @@ -103,7 +103,7 @@ eml_audio_mel_center(EmlAudioMel params, int n) {
} }
int int
eml_audio_mel_bin(EmlAudioMel params, float hz) { eml_audio_mel_bin(EmlAudioMel params, float hz) {
const int bin = floor((params.n_fft+1)*(hz/params.samplerate)); const int bin = (int)floor((params.n_fft+1)*(hz/params.samplerate));
return bin; return bin;
} }
static int static int
Expand Down
2 changes: 1 addition & 1 deletion emlearn/eml_benchmark.h
Expand Up @@ -33,7 +33,7 @@ int64_t eml_benchmark_micros(void)
QueryPerformanceCounter(&t); QueryPerformanceCounter(&t);
QueryPerformanceFrequency(&f); QueryPerformanceFrequency(&f);
double sec = (double)t.QuadPart/(double)f.QuadPart; double sec = (double)t.QuadPart/(double)f.QuadPart;
return sec * 1000000LL; return (int64_t)(sec * 1000000LL);
} }
#endif #endif


Expand Down
6 changes: 3 additions & 3 deletions emlearn/eml_fft.h
Expand Up @@ -55,9 +55,9 @@ eml_fft_fill(EmlFFT table, size_t n) {
EML_PRECONDITION((size_t)table.length == n/2, EmlSizeMismatch); EML_PRECONDITION((size_t)table.length == n/2, EmlSizeMismatch);


// Trignometric tables // Trignometric tables
for (size_t i = 0; i < n / 2; i++) { for (size_t i = 0; i < (size_t)(n / 2); i++) {
table.cos[i] = cos(2 * M_PI * i / n); table.cos[i] = (float)cos(2 * M_PI * i / n);
table.sin[i] = sin(2 * M_PI * i / n); table.sin[i] = (float)sin(2 * M_PI * i / n);
} }
return EmlOk; return EmlOk;
} }
Expand Down
2 changes: 1 addition & 1 deletion emlearn/eml_vector.h
Expand Up @@ -116,7 +116,7 @@ eml_vector_hann_apply(EmlVector out) {


const long len = out.length; const long len = out.length;
for (int i=0; i<len; i++) { for (int i=0; i<len; i++) {
float m = 0.5 * (1 - cos(2*M_PI*i/(len-1))); float m = (float)(0.5 * (1 - cos(2*M_PI*i/(len-1))));
out.data[i] = m * out.data[i]; out.data[i] = m * out.data[i];
} }
return EmlOk; return EmlOk;
Expand Down

0 comments on commit 7770a0e

Please sign in to comment.