From 0a088f550f3db1ea1bfda170bbea3b3160fe99fb Mon Sep 17 00:00:00 2001 From: Felipe Hofmann Date: Thu, 4 Nov 2021 15:01:15 -0700 Subject: [PATCH] Make double quotes into single --- sdmetrics/goal.py | 6 +++--- sdmetrics/single_table/bayesian_network.py | 2 +- sdmetrics/single_table/detection/sklearn.py | 6 +++--- sdmetrics/single_table/privacy/base.py | 8 ++++---- setup.cfg | 1 + setup.py | 1 + .../integration/single_column/statistical/test_cstest.py | 8 ++++---- .../integration/single_column/statistical/test_kstest.py | 8 ++++---- 8 files changed, 21 insertions(+), 19 deletions(-) diff --git a/sdmetrics/goal.py b/sdmetrics/goal.py index f91b743e..96a903c7 100644 --- a/sdmetrics/goal.py +++ b/sdmetrics/goal.py @@ -10,6 +10,6 @@ class Goal(Enum): minimized, or maximized. """ - IGNORE = "ignore" - MAXIMIZE = "maximize" - MINIMIZE = "minimize" + IGNORE = 'ignore' + MAXIMIZE = 'maximize' + MINIMIZE = 'minimize' diff --git a/sdmetrics/single_table/bayesian_network.py b/sdmetrics/single_table/bayesian_network.py index edf4be13..04b0d69b 100644 --- a/sdmetrics/single_table/bayesian_network.py +++ b/sdmetrics/single_table/bayesian_network.py @@ -40,7 +40,7 @@ def _likelihoods(cls, real_data, synthetic_data, metadata=None, structure=None): try: from pomegranate import BayesianNetwork except ImportError: - raise ImportError("Please install pomegranate with `pip install pomegranate`") + raise ImportError('Please install pomegranate with `pip install pomegranate`') metadata = cls._validate_inputs(real_data, synthetic_data, metadata) structure = metadata.get('structure', structure) diff --git a/sdmetrics/single_table/detection/sklearn.py b/sdmetrics/single_table/detection/sklearn.py index 8f32ece6..a9d4e709 100644 --- a/sdmetrics/single_table/detection/sklearn.py +++ b/sdmetrics/single_table/detection/sklearn.py @@ -46,11 +46,11 @@ class LogisticDetection(ScikitLearnClassifierDetectionMetric): The output of the metric is one minus the average ROC AUC score obtained. """ - name = "LogisticRegression Detection" + name = 'LogisticRegression Detection' @staticmethod def _get_classifier(): - return LogisticRegression(solver="lbfgs") + return LogisticRegression(solver='lbfgs') class SVCDetection(ScikitLearnClassifierDetectionMetric): @@ -62,7 +62,7 @@ class SVCDetection(ScikitLearnClassifierDetectionMetric): The output of the metric is one minus the average ROC AUC score obtained. """ - name = "SVC Detection" + name = 'SVC Detection' @staticmethod def _get_classifier(): diff --git a/sdmetrics/single_table/privacy/base.py b/sdmetrics/single_table/privacy/base.py index 54ce376e..f0d9af7c 100644 --- a/sdmetrics/single_table/privacy/base.py +++ b/sdmetrics/single_table/privacy/base.py @@ -14,8 +14,8 @@ class CategoricalType(Enum): The value can be one-hot-encoded, or coded as class number. """ - CLASS_NUM = "Class_num" - ONE_HOT = "One_hot" + CLASS_NUM = 'Class_num' + ONE_HOT = 'One_hot' class CategoricalPrivacyMetric(SingleTableMetric): @@ -320,7 +320,7 @@ def fit(self, synthetic_data, key_fields, sensitive_fields): sensitive_fields(list[str]): The names of the sensitive columns. """ - raise NotImplementedError("Please implement fit method of attackers") + raise NotImplementedError('Please implement fit method of attackers') def predict(self, key_data): """Make a prediction of the sensitive data given keys. @@ -333,7 +333,7 @@ def predict(self, key_data): tuple: The predicted sensitive data. """ - raise NotImplementedError("Please implement predict method of attackers") + raise NotImplementedError('Please implement predict method of attackers') def score(self, key_data, sensitive_data): """Score based on the belief of the attacker, in the form P(sensitive_data|key|data). diff --git a/setup.cfg b/setup.cfg index 931236df..fe0262b3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -34,6 +34,7 @@ universal = 1 [flake8] max-line-length = 99 exclude = docs, .tox, .git, __pycache__, .ipynb_checkpoints +inline-quotes = single [isort] include_trailing_comment = True diff --git a/setup.py b/setup.py index e760def5..9341ed6e 100644 --- a/setup.py +++ b/setup.py @@ -51,6 +51,7 @@ 'flake8>=3.7.7,<4', 'flake8-absolute-import>=1.0,<2', 'isort>=4.3.4,<5', + 'flake8-quotes>=3.3.0,<4', # fix style issues 'autoflake>=1.1,<2', diff --git a/tests/integration/single_column/statistical/test_cstest.py b/tests/integration/single_column/statistical/test_cstest.py index a1b6f64f..098591eb 100644 --- a/tests/integration/single_column/statistical/test_cstest.py +++ b/tests/integration/single_column/statistical/test_cstest.py @@ -5,7 +5,7 @@ from sdmetrics.single_column.statistical.cstest import CSTest -@pytest.mark.parametrize("array_like", [np.array, pd.Series]) +@pytest.mark.parametrize('array_like', [np.array, pd.Series]) def test_max(array_like): data = array_like(['a', 'b', 'b', 'c', 'c', 'c'] * 100) output = CSTest.compute(data, data) @@ -15,7 +15,7 @@ def test_max(array_like): assert normalized == 1 -@pytest.mark.parametrize("array_like", [np.array, pd.Series]) +@pytest.mark.parametrize('array_like', [np.array, pd.Series]) def test_min(array_like): real = array_like(['a', 'b', 'b', 'c', 'c', 'c'] * 100) synth = array_like(['d', 'e', 'e', 'f', 'f', 'f'] * 100) @@ -26,7 +26,7 @@ def test_min(array_like): assert normalized == 0 -@pytest.mark.parametrize("array_like", [np.array, pd.Series]) +@pytest.mark.parametrize('array_like', [np.array, pd.Series]) def test_good(array_like): real = array_like(['a', 'b', 'b', 'c', 'c', 'c'] * 100) synth = array_like(['a', 'b', 'b', 'b', 'c', 'c'] * 100) @@ -37,7 +37,7 @@ def test_good(array_like): assert 0.5 < normalized <= 1.0 -@pytest.mark.parametrize("array_like", [np.array, pd.Series]) +@pytest.mark.parametrize('array_like', [np.array, pd.Series]) def test_bad(array_like): real = array_like(['a', 'b', 'b', 'c', 'c', 'c'] * 100) synth = array_like(['a', 'a', 'a', 'a', 'b', 'c'] * 100) diff --git a/tests/integration/single_column/statistical/test_kstest.py b/tests/integration/single_column/statistical/test_kstest.py index b5ed2bdf..6a7260c7 100644 --- a/tests/integration/single_column/statistical/test_kstest.py +++ b/tests/integration/single_column/statistical/test_kstest.py @@ -5,7 +5,7 @@ from sdmetrics.single_column.statistical.kstest import KSTest -@pytest.mark.parametrize("array_like", [np.array, pd.Series]) +@pytest.mark.parametrize('array_like', [np.array, pd.Series]) def test_max(array_like): data = array_like(np.random.normal(size=1000)) output = KSTest.compute(data, data) @@ -15,7 +15,7 @@ def test_max(array_like): assert normalized == 1 -@pytest.mark.parametrize("array_like", [np.array, pd.Series]) +@pytest.mark.parametrize('array_like', [np.array, pd.Series]) def test_min(array_like): real = array_like(np.random.normal(size=1000)) synth = array_like(np.random.normal(loc=1000, scale=10, size=1000)) @@ -26,7 +26,7 @@ def test_min(array_like): assert normalized == 0 -@pytest.mark.parametrize("array_like", [np.array, pd.Series]) +@pytest.mark.parametrize('array_like', [np.array, pd.Series]) def test_good(array_like): real = array_like(np.random.normal(size=1000)) synth = array_like(np.random.normal(loc=0.1, size=1000)) @@ -37,7 +37,7 @@ def test_good(array_like): assert 0.5 < normalized <= 1.0 -@pytest.mark.parametrize("array_like", [np.array, pd.Series]) +@pytest.mark.parametrize('array_like', [np.array, pd.Series]) def test_bad(array_like): real = array_like(np.random.normal(size=1000)) synth = array_like(np.random.normal(loc=3, scale=3, size=1000))