Skip to content

Commit

Permalink
Make double quotes into single
Browse files Browse the repository at this point in the history
  • Loading branch information
fealho committed Nov 4, 2021
1 parent 270a73e commit 0a088f5
Show file tree
Hide file tree
Showing 8 changed files with 21 additions and 19 deletions.
6 changes: 3 additions & 3 deletions sdmetrics/goal.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,6 @@ class Goal(Enum):
minimized, or maximized.
"""

IGNORE = "ignore"
MAXIMIZE = "maximize"
MINIMIZE = "minimize"
IGNORE = 'ignore'
MAXIMIZE = 'maximize'
MINIMIZE = 'minimize'
2 changes: 1 addition & 1 deletion sdmetrics/single_table/bayesian_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def _likelihoods(cls, real_data, synthetic_data, metadata=None, structure=None):
try:
from pomegranate import BayesianNetwork
except ImportError:
raise ImportError("Please install pomegranate with `pip install pomegranate`")
raise ImportError('Please install pomegranate with `pip install pomegranate`')

metadata = cls._validate_inputs(real_data, synthetic_data, metadata)
structure = metadata.get('structure', structure)
Expand Down
6 changes: 3 additions & 3 deletions sdmetrics/single_table/detection/sklearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,11 @@ class LogisticDetection(ScikitLearnClassifierDetectionMetric):
The output of the metric is one minus the average ROC AUC score obtained.
"""

name = "LogisticRegression Detection"
name = 'LogisticRegression Detection'

@staticmethod
def _get_classifier():
return LogisticRegression(solver="lbfgs")
return LogisticRegression(solver='lbfgs')


class SVCDetection(ScikitLearnClassifierDetectionMetric):
Expand All @@ -62,7 +62,7 @@ class SVCDetection(ScikitLearnClassifierDetectionMetric):
The output of the metric is one minus the average ROC AUC score obtained.
"""

name = "SVC Detection"
name = 'SVC Detection'

@staticmethod
def _get_classifier():
Expand Down
8 changes: 4 additions & 4 deletions sdmetrics/single_table/privacy/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ class CategoricalType(Enum):
The value can be one-hot-encoded, or coded as class number.
"""

CLASS_NUM = "Class_num"
ONE_HOT = "One_hot"
CLASS_NUM = 'Class_num'
ONE_HOT = 'One_hot'


class CategoricalPrivacyMetric(SingleTableMetric):
Expand Down Expand Up @@ -320,7 +320,7 @@ def fit(self, synthetic_data, key_fields, sensitive_fields):
sensitive_fields(list[str]):
The names of the sensitive columns.
"""
raise NotImplementedError("Please implement fit method of attackers")
raise NotImplementedError('Please implement fit method of attackers')

def predict(self, key_data):
"""Make a prediction of the sensitive data given keys.
Expand All @@ -333,7 +333,7 @@ def predict(self, key_data):
tuple:
The predicted sensitive data.
"""
raise NotImplementedError("Please implement predict method of attackers")
raise NotImplementedError('Please implement predict method of attackers')

def score(self, key_data, sensitive_data):
"""Score based on the belief of the attacker, in the form P(sensitive_data|key|data).
Expand Down
1 change: 1 addition & 0 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ universal = 1
[flake8]
max-line-length = 99
exclude = docs, .tox, .git, __pycache__, .ipynb_checkpoints
inline-quotes = single

[isort]
include_trailing_comment = True
Expand Down
1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@
'flake8>=3.7.7,<4',
'flake8-absolute-import>=1.0,<2',
'isort>=4.3.4,<5',
'flake8-quotes>=3.3.0,<4',

# fix style issues
'autoflake>=1.1,<2',
Expand Down
8 changes: 4 additions & 4 deletions tests/integration/single_column/statistical/test_cstest.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from sdmetrics.single_column.statistical.cstest import CSTest


@pytest.mark.parametrize("array_like", [np.array, pd.Series])
@pytest.mark.parametrize('array_like', [np.array, pd.Series])
def test_max(array_like):
data = array_like(['a', 'b', 'b', 'c', 'c', 'c'] * 100)
output = CSTest.compute(data, data)
Expand All @@ -15,7 +15,7 @@ def test_max(array_like):
assert normalized == 1


@pytest.mark.parametrize("array_like", [np.array, pd.Series])
@pytest.mark.parametrize('array_like', [np.array, pd.Series])
def test_min(array_like):
real = array_like(['a', 'b', 'b', 'c', 'c', 'c'] * 100)
synth = array_like(['d', 'e', 'e', 'f', 'f', 'f'] * 100)
Expand All @@ -26,7 +26,7 @@ def test_min(array_like):
assert normalized == 0


@pytest.mark.parametrize("array_like", [np.array, pd.Series])
@pytest.mark.parametrize('array_like', [np.array, pd.Series])
def test_good(array_like):
real = array_like(['a', 'b', 'b', 'c', 'c', 'c'] * 100)
synth = array_like(['a', 'b', 'b', 'b', 'c', 'c'] * 100)
Expand All @@ -37,7 +37,7 @@ def test_good(array_like):
assert 0.5 < normalized <= 1.0


@pytest.mark.parametrize("array_like", [np.array, pd.Series])
@pytest.mark.parametrize('array_like', [np.array, pd.Series])
def test_bad(array_like):
real = array_like(['a', 'b', 'b', 'c', 'c', 'c'] * 100)
synth = array_like(['a', 'a', 'a', 'a', 'b', 'c'] * 100)
Expand Down
8 changes: 4 additions & 4 deletions tests/integration/single_column/statistical/test_kstest.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from sdmetrics.single_column.statistical.kstest import KSTest


@pytest.mark.parametrize("array_like", [np.array, pd.Series])
@pytest.mark.parametrize('array_like', [np.array, pd.Series])
def test_max(array_like):
data = array_like(np.random.normal(size=1000))
output = KSTest.compute(data, data)
Expand All @@ -15,7 +15,7 @@ def test_max(array_like):
assert normalized == 1


@pytest.mark.parametrize("array_like", [np.array, pd.Series])
@pytest.mark.parametrize('array_like', [np.array, pd.Series])
def test_min(array_like):
real = array_like(np.random.normal(size=1000))
synth = array_like(np.random.normal(loc=1000, scale=10, size=1000))
Expand All @@ -26,7 +26,7 @@ def test_min(array_like):
assert normalized == 0


@pytest.mark.parametrize("array_like", [np.array, pd.Series])
@pytest.mark.parametrize('array_like', [np.array, pd.Series])
def test_good(array_like):
real = array_like(np.random.normal(size=1000))
synth = array_like(np.random.normal(loc=0.1, size=1000))
Expand All @@ -37,7 +37,7 @@ def test_good(array_like):
assert 0.5 < normalized <= 1.0


@pytest.mark.parametrize("array_like", [np.array, pd.Series])
@pytest.mark.parametrize('array_like', [np.array, pd.Series])
def test_bad(array_like):
real = array_like(np.random.normal(size=1000))
synth = array_like(np.random.normal(loc=3, scale=3, size=1000))
Expand Down

0 comments on commit 0a088f5

Please sign in to comment.