Skip to content

Commit

Permalink
Format Python files with Black (#9742)
Browse files Browse the repository at this point in the history
* Format Python files with black (exclude test data)
* Fix DS test
* More DS test fixing
  • Loading branch information
kimadeline committed Jan 24, 2020
1 parent d788d18 commit a58eeff
Show file tree
Hide file tree
Showing 46 changed files with 5,277 additions and 4,550 deletions.
468 changes: 254 additions & 214 deletions pythonFiles/completion.py

Large diffs are not rendered by default.

4 changes: 4 additions & 0 deletions pythonFiles/datascience/daemon/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
LOG_FORMAT = "%(asctime)s UTC - %(levelname)s - %(name)s - %(message)s"
queue_handler = None


def add_arguments(parser):
parser.description = "Daemon"

Expand Down Expand Up @@ -47,16 +48,19 @@ class TemporaryQueueHandler(logging.Handler):
Later the messages are pushed back to the RPC client as a notification.
Once the RPC channel is up, we'll stop queuing messages and sending id directly.
"""

def __init__(self):
logging.Handler.__init__(self)
self.queue = []
self.server = None

def set_server(self, server):
# Send everything that has beeen queued until now.
self.server = server
for msg in self.queue:
self.server._endpoint.notify("log", msg)
self.queue = []

def emit(self, record):
data = {"level": record.levelname, "msg": self.format(record)}
# If we don't have the server, then queue it and send it later.
Expand Down
1 change: 0 additions & 1 deletion pythonFiles/datascience/daemon/daemon_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,4 +128,3 @@ def redirect_output(stdout_handler, stderr_handler):
_stderr_redirector = sys.stderr = IORedirector(
"stderr", sys.stderr, sys._vsc_err_buffer_, True
)

4 changes: 3 additions & 1 deletion pythonFiles/datascience/daemon/daemon_python.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,9 @@ class PythonDaemon(MethodDispatcher):
"""

def __init__(self, rx, tx):
self.log = logging.getLogger("{0}.{1}".format(self.__class__.__module__,self.__class__.__name__))
self.log = logging.getLogger(
"{0}.{1}".format(self.__class__.__module__, self.__class__.__name__)
)
self._jsonrpc_stream_reader = JsonRpcStreamReader(rx)
self._jsonrpc_stream_writer = JsonRpcStreamWriter(tx)
self._endpoint = Endpoint(
Expand Down
25 changes: 14 additions & 11 deletions pythonFiles/datascience/dummyJupyter.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,22 +4,25 @@
import argparse
import time


def main():
print('hello from dummy jupyter')
print("hello from dummy jupyter")
parser = argparse.ArgumentParser()
parser.add_argument('--version', type=bool, default=False, const=True, nargs='?')
parser.add_argument('notebook', type=bool, default=False, const=True, nargs='?')
parser.add_argument('--no-browser', type=bool, default=False, const=True, nargs='?')
parser.add_argument('--notebook-dir', default='')
parser.add_argument('--config', default='')
parser.add_argument("--version", type=bool, default=False, const=True, nargs="?")
parser.add_argument("notebook", type=bool, default=False, const=True, nargs="?")
parser.add_argument("--no-browser", type=bool, default=False, const=True, nargs="?")
parser.add_argument("--notebook-dir", default="")
parser.add_argument("--config", default="")
results = parser.parse_args()
if (results.version):
print('1.1.dummy')
if results.version:
print("1.1.dummy")
else:
print('http://localhost:8888/?token=012f08663a68e279fe0a5335e0b5dfe44759ddcccf0b3a56')
print(
"http://localhost:8888/?token=012f08663a68e279fe0a5335e0b5dfe44759ddcccf0b3a56"
)
time.sleep(5)
raise Exception('Dummy is dead')
raise Exception("Dummy is dead")


if __name__ == '__main__':
if __name__ == "__main__":
main()
4 changes: 1 addition & 3 deletions pythonFiles/datascience/getJupyterKernels.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,7 @@


specs = jupyter_client.kernelspec.KernelSpecManager().get_all_specs()
all_specs = {
"kernelspecs": specs
}
all_specs = {"kernelspecs": specs}

sys.stdout.write(json.dumps(all_specs))
sys.stdout.flush()
41 changes: 21 additions & 20 deletions pythonFiles/datascience/getJupyterVariableDataFrameInfo.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,31 +8,31 @@

# In IJupyterVariables.getValue this '_VSCode_JupyterTestValue' will be replaced with the json stringified value of the target variable
# Indexes off of _VSCODE_targetVariable need to index types that are part of IJupyterVariable
_VSCODE_targetVariable = _VSCODE_json.loads('_VSCode_JupyterTestValue')
_VSCODE_targetVariable = _VSCODE_json.loads("""_VSCode_JupyterTestValue""")

# First check to see if we are a supported type, this prevents us from adding types that are not supported
# and also keeps our types in sync with what the variable explorer says that we support
if _VSCODE_targetVariable['type'] not in _VSCode_supportsDataExplorer:
if _VSCODE_targetVariable["type"] not in _VSCode_supportsDataExplorer:
del _VSCode_supportsDataExplorer
print(_VSCODE_json.dumps(_VSCODE_targetVariable))
del _VSCODE_targetVariable
else:
del _VSCode_supportsDataExplorer
_VSCODE_evalResult = eval(_VSCODE_targetVariable['name'])
_VSCODE_evalResult = eval(_VSCODE_targetVariable["name"])

# Figure out shape if not already there. Use the shape to compute the row count
if (hasattr(_VSCODE_evalResult, 'shape')):
if hasattr(_VSCODE_evalResult, "shape"):
try:
# Get a bit more restrictive with exactly what we want to count as a shape, since anything can define it
if isinstance(_VSCODE_evalResult.shape, tuple):
_VSCODE_targetVariable['rowCount'] = _VSCODE_evalResult.shape[0]
_VSCODE_targetVariable["rowCount"] = _VSCODE_evalResult.shape[0]
except TypeError:
_VSCODE_targetVariable['rowCount'] = 0
elif (hasattr(_VSCODE_evalResult, '__len__')):
_VSCODE_targetVariable["rowCount"] = 0
elif hasattr(_VSCODE_evalResult, "__len__"):
try:
_VSCODE_targetVariable['rowCount'] = len(_VSCODE_evalResult)
_VSCODE_targetVariable["rowCount"] = len(_VSCODE_evalResult)
except TypeError:
_VSCODE_targetVariable['rowCount'] = 0
_VSCODE_targetVariable["rowCount"] = 0

# Turn the eval result into a df
_VSCODE_df = _VSCODE_evalResult
Expand All @@ -43,16 +43,18 @@
elif isinstance(_VSCODE_evalResult, dict):
_VSCODE_evalResult = _VSCODE_pd.Series(_VSCODE_evalResult)
_VSCODE_df = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult)
elif _VSCODE_targetVariable['type'] == 'ndarray':
elif _VSCODE_targetVariable["type"] == "ndarray":
_VSCODE_df = _VSCODE_pd.DataFrame(_VSCODE_evalResult)

# If any rows, use pandas json to convert a single row to json. Extract
# the column names and types from the json so we match what we'll fetch when
# we ask for all of the rows
if _VSCODE_targetVariable['rowCount']:
if _VSCODE_targetVariable["rowCount"]:
try:
_VSCODE_row = _VSCODE_df.iloc[0:1]
_VSCODE_json_row = _VSCODE_pd_json.to_json(None, _VSCODE_row, date_format='iso')
_VSCODE_json_row = _VSCODE_pd_json.to_json(
None, _VSCODE_row, date_format="iso"
)
_VSCODE_columnNames = list(_VSCODE_json.loads(_VSCODE_json_row))
del _VSCODE_row
del _VSCODE_json_row
Expand All @@ -62,24 +64,24 @@
_VSCODE_columnNames = list(_VSCODE_df)

# Compute the index column. It may have been renamed
_VSCODE_indexColumn = _VSCODE_df.index.name if _VSCODE_df.index.name else 'index'
_VSCODE_indexColumn = _VSCODE_df.index.name if _VSCODE_df.index.name else "index"
_VSCODE_columnTypes = list(_VSCODE_df.dtypes)
del _VSCODE_df

# Make sure the index column exists
if _VSCODE_indexColumn not in _VSCODE_columnNames:
_VSCODE_columnNames.insert(0, _VSCODE_indexColumn)
_VSCODE_columnTypes.insert(0, 'int64')
_VSCODE_columnTypes.insert(0, "int64")

# Then loop and generate our output json
_VSCODE_columns = []
for _VSCODE_n in range(0, len(_VSCODE_columnNames)):
_VSCODE_column_type = _VSCODE_columnTypes[_VSCODE_n]
_VSCODE_column_name = str(_VSCODE_columnNames[_VSCODE_n])
_VSCODE_colobj = {}
_VSCODE_colobj['key'] = _VSCODE_column_name
_VSCODE_colobj['name'] = _VSCODE_column_name
_VSCODE_colobj['type'] = str(_VSCODE_column_type)
_VSCODE_colobj["key"] = _VSCODE_column_name
_VSCODE_colobj["name"] = _VSCODE_column_name
_VSCODE_colobj["type"] = str(_VSCODE_column_type)
_VSCODE_columns.append(_VSCODE_colobj)
del _VSCODE_column_name
del _VSCODE_column_type
Expand All @@ -88,12 +90,11 @@
del _VSCODE_columnTypes

# Save this in our target
_VSCODE_targetVariable['columns'] = _VSCODE_columns
_VSCODE_targetVariable['indexColumn'] = _VSCODE_indexColumn
_VSCODE_targetVariable["columns"] = _VSCODE_columns
_VSCODE_targetVariable["indexColumn"] = _VSCODE_indexColumn
del _VSCODE_columns
del _VSCODE_indexColumn


# Transform this back into a string
print(_VSCODE_json.dumps(_VSCODE_targetVariable))
del _VSCODE_targetVariable
Expand Down
14 changes: 8 additions & 6 deletions pythonFiles/datascience/getJupyterVariableDataFrameRows.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,13 @@

# In IJupyterVariables.getValue this '_VSCode_JupyterTestValue' will be replaced with the json stringified value of the target variable
# Indexes off of _VSCODE_targetVariable need to index types that are part of IJupyterVariable
_VSCODE_targetVariable = _VSCODE_json.loads('_VSCode_JupyterTestValue')
_VSCODE_evalResult = eval(_VSCODE_targetVariable['name'])
_VSCODE_targetVariable = _VSCODE_json.loads("""_VSCode_JupyterTestValue""")
_VSCODE_evalResult = eval(_VSCODE_targetVariable["name"])

# _VSCode_JupyterStartRow and _VSCode_JupyterEndRow should be replaced dynamically with the literals
# for our start and end rows
_VSCODE_startRow = max(_VSCode_JupyterStartRow, 0)
_VSCODE_endRow = min(_VSCode_JupyterEndRow, _VSCODE_targetVariable['rowCount'])
_VSCODE_endRow = min(_VSCode_JupyterEndRow, _VSCODE_targetVariable["rowCount"])

# Assume we have a dataframe. If not, turn our eval result into a dataframe
_VSCODE_df = _VSCODE_evalResult
Expand All @@ -22,15 +22,17 @@
elif isinstance(_VSCODE_evalResult, dict):
_VSCODE_evalResult = _VSCODE_pd.Series(_VSCODE_evalResult)
_VSCODE_df = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult)
elif _VSCODE_targetVariable['type'] == 'ndarray':
elif _VSCODE_targetVariable["type"] == "ndarray":
_VSCODE_df = _VSCODE_pd.DataFrame(_VSCODE_evalResult)
# If not a known type, then just let pandas handle it.
elif not (hasattr(_VSCODE_df, 'iloc')):
elif not (hasattr(_VSCODE_df, "iloc")):
_VSCODE_df = _VSCODE_pd.DataFrame(_VSCODE_evalResult)

# Turn into JSON using pandas. We use pandas because it's about 3 orders of magnitude faster to turn into JSON
_VSCODE_rows = _VSCODE_df.iloc[_VSCODE_startRow:_VSCODE_endRow]
_VSCODE_result = _VSCODE_pd_json.to_json(None, _VSCODE_rows, orient='table', date_format='iso')
_VSCODE_result = _VSCODE_pd_json.to_json(
None, _VSCODE_rows, orient="table", date_format="iso"
)
print(_VSCODE_result)

# Cleanup our variables
Expand Down
12 changes: 10 additions & 2 deletions pythonFiles/datascience/getJupyterVariableList.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,21 @@
_VSCode_supportsDataExplorer = "['list', 'Series', 'dict', 'ndarray', 'DataFrame']"

# who_ls is a Jupyter line magic to fetch currently defined vars
_VSCode_JupyterVars = _VSCODE_get_ipython().run_line_magic('who_ls', '')
_VSCode_JupyterVars = _VSCODE_get_ipython().run_line_magic("who_ls", "")

_VSCode_output = []
for _VSCode_var in _VSCode_JupyterVars:
try:
_VSCode_type = type(eval(_VSCode_var))
_VSCode_output.append({'name': _VSCode_var, 'type': _VSCode_type.__name__, 'size': _VSCODE_getsizeof(_VSCode_var), 'supportsDataExplorer': _VSCode_type.__name__ in _VSCode_supportsDataExplorer })
_VSCode_output.append(
{
"name": _VSCode_var,
"type": _VSCode_type.__name__,
"size": _VSCODE_getsizeof(_VSCode_var),
"supportsDataExplorer": _VSCode_type.__name__
in _VSCode_supportsDataExplorer,
}
)
del _VSCode_type
del _VSCode_var
except:
Expand Down
Loading

0 comments on commit a58eeff

Please sign in to comment.