From 816fdf0ad599a2ae4eac14b9057583dc6f96264d Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Mon, 2 Sep 2024 16:29:21 +0000
Subject: [PATCH 01/12] [pre-commit.ci] pre-commit autoupdate
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
updates:
- [github.com/astral-sh/ruff-pre-commit: v0.5.0 → v0.6.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.0...v0.6.3)
- [github.com/asottile/pyupgrade: v3.16.0 → v3.17.0](https://github.com/asottile/pyupgrade/compare/v3.16.0...v3.17.0)
---
.pre-commit-config.yaml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index f6717dd503c9b..8161c96df8109 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -19,7 +19,7 @@ ci:
skip: [pyright, mypy]
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.5.0
+ rev: v0.6.3
hooks:
- id: ruff
args: [--exit-non-zero-on-fix]
@@ -74,7 +74,7 @@ repos:
hooks:
- id: isort
- repo: https://github.com/asottile/pyupgrade
- rev: v3.16.0
+ rev: v3.17.0
hooks:
- id: pyupgrade
args: [--py310-plus]
From 1117c6bf659cd7ec46e7c8f0a813c34bdf5487aa Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Mon, 2 Sep 2024 16:30:44 +0000
Subject: [PATCH 02/12] [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
---
doc/source/user_guide/style.ipynb | 651 +++++++++++++----------
pandas/_config/config.py | 4 +-
pandas/_config/localization.py | 2 +-
pandas/_testing/_warnings.py | 2 +-
pandas/_testing/contexts.py | 8 +-
pandas/compat/pickle_compat.py | 2 +-
pandas/core/_numba/kernels/min_max_.py | 6 +-
pandas/core/apply.py | 6 +-
pandas/core/arrays/datetimes.py | 2 +-
pandas/core/common.py | 2 +-
pandas/core/dtypes/cast.py | 8 +-
pandas/core/groupby/ops.py | 2 +-
pandas/core/indexes/multi.py | 2 +-
pandas/core/internals/blocks.py | 2 +-
pandas/core/internals/concat.py | 2 +-
pandas/core/internals/managers.py | 4 +-
pandas/core/methods/to_dict.py | 2 +-
pandas/io/excel/_odfreader.py | 3 +-
pandas/io/formats/css.py | 6 +-
pandas/io/formats/format.py | 2 +-
pandas/io/sql.py | 6 +-
pandas/plotting/_matplotlib/converter.py | 2 +-
pandas/plotting/_matplotlib/tools.py | 2 +-
pandas/plotting/_misc.py | 2 +-
pandas/tests/arithmetic/test_numeric.py | 4 +-
pandas/util/_exceptions.py | 4 +-
26 files changed, 407 insertions(+), 331 deletions(-)
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index daecfce6ecebc..5f99af1a0aee1 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -46,7 +46,6 @@
},
"outputs": [],
"source": [
- "import matplotlib.pyplot\n",
"# We have this here to trigger matplotlib's font cache stuff.\n",
"# This cell is hidden from the output"
]
@@ -78,17 +77,13 @@
"source": [
"import pandas as pd\n",
"import numpy as np\n",
- "import matplotlib as mpl\n",
"\n",
- "df = pd.DataFrame({\n",
- " \"strings\": [\"Adam\", \"Mike\"],\n",
- " \"ints\": [1, 3],\n",
- " \"floats\": [1.123, 1000.23]\n",
- "})\n",
- "df.style \\\n",
- " .format(precision=3, thousands=\".\", decimal=\",\") \\\n",
- " .format_index(str.upper, axis=1) \\\n",
- " .relabel_index([\"row 1\", \"row 2\"], axis=0)"
+ "df = pd.DataFrame(\n",
+ " {\"strings\": [\"Adam\", \"Mike\"], \"ints\": [1, 3], \"floats\": [1.123, 1000.23]}\n",
+ ")\n",
+ "df.style.format(precision=3, thousands=\".\", decimal=\",\").format_index(\n",
+ " str.upper, axis=1\n",
+ ").relabel_index([\"row 1\", \"row 2\"], axis=0)"
]
},
{
@@ -104,17 +99,21 @@
"metadata": {},
"outputs": [],
"source": [
- "weather_df = pd.DataFrame(np.random.rand(10,2)*5, \n",
- " index=pd.date_range(start=\"2021-01-01\", periods=10),\n",
- " columns=[\"Tokyo\", \"Beijing\"])\n",
+ "weather_df = pd.DataFrame(\n",
+ " np.random.rand(10, 2) * 5,\n",
+ " index=pd.date_range(start=\"2021-01-01\", periods=10),\n",
+ " columns=[\"Tokyo\", \"Beijing\"],\n",
+ ")\n",
"\n",
- "def rain_condition(v): \n",
+ "\n",
+ "def rain_condition(v):\n",
" if v < 1.75:\n",
" return \"Dry\"\n",
" elif v < 2.75:\n",
" return \"Rain\"\n",
" return \"Heavy Rain\"\n",
"\n",
+ "\n",
"def make_pretty(styler):\n",
" styler.set_caption(\"Weather Conditions\")\n",
" styler.format(rain_condition)\n",
@@ -122,6 +121,7 @@
" styler.background_gradient(axis=None, vmin=1, vmax=5, cmap=\"YlGnBu\")\n",
" return styler\n",
"\n",
+ "\n",
"weather_df"
]
},
@@ -158,9 +158,7 @@
"outputs": [],
"source": [
"df = pd.DataFrame(np.random.randn(5, 5))\n",
- "df.style \\\n",
- " .hide(subset=[0, 2, 4], axis=0) \\\n",
- " .hide(subset=[0, 2, 4], axis=1)"
+ "df.style.hide(subset=[0, 2, 4], axis=0).hide(subset=[0, 2, 4], axis=1)"
]
},
{
@@ -177,9 +175,9 @@
"outputs": [],
"source": [
"show = [0, 2, 4]\n",
- "df.style \\\n",
- " .hide([row for row in df.index if row not in show], axis=0) \\\n",
- " .hide([col for col in df.columns if col not in show], axis=1)"
+ "df.style.hide([row for row in df.index if row not in show], axis=0).hide(\n",
+ " [col for col in df.columns if col not in show], axis=1\n",
+ ")"
]
},
{
@@ -199,9 +197,9 @@
"metadata": {},
"outputs": [],
"source": [
- "summary_styler = df.agg([\"sum\", \"mean\"]).style \\\n",
- " .format(precision=3) \\\n",
- " .relabel_index([\"Sum\", \"Average\"])\n",
+ "summary_styler = (\n",
+ " df.agg([\"sum\", \"mean\"]).style.format(precision=3).relabel_index([\"Sum\", \"Average\"])\n",
+ ")\n",
"df.style.format(precision=1).concat(summary_styler)"
]
},
@@ -227,9 +225,16 @@
"metadata": {},
"outputs": [],
"source": [
- "df = pd.DataFrame([[38.0, 2.0, 18.0, 22.0, 21, np.nan],[19, 439, 6, 452, 226,232]], \n",
- " index=pd.Index(['Tumour (Positive)', 'Non-Tumour (Negative)'], name='Actual Label:'), \n",
- " columns=pd.MultiIndex.from_product([['Decision Tree', 'Regression', 'Random'],['Tumour', 'Non-Tumour']], names=['Model:', 'Predicted:']))\n",
+ "df = pd.DataFrame(\n",
+ " [[38.0, 2.0, 18.0, 22.0, 21, np.nan], [19, 439, 6, 452, 226, 232]],\n",
+ " index=pd.Index(\n",
+ " [\"Tumour (Positive)\", \"Non-Tumour (Negative)\"], name=\"Actual Label:\"\n",
+ " ),\n",
+ " columns=pd.MultiIndex.from_product(\n",
+ " [[\"Decision Tree\", \"Regression\", \"Random\"], [\"Tumour\", \"Non-Tumour\"]],\n",
+ " names=[\"Model:\", \"Predicted:\"],\n",
+ " ),\n",
+ ")\n",
"df.style"
]
},
@@ -242,63 +247,66 @@
"outputs": [],
"source": [
"# Hidden cell to just create the below example: code is covered throughout the guide.\n",
- "s = df.style\\\n",
- " .hide([('Random', 'Tumour'), ('Random', 'Non-Tumour')], axis='columns')\\\n",
- " .format('{:.0f}')\\\n",
- " .set_table_styles([{\n",
- " 'selector': '',\n",
- " 'props': 'border-collapse: separate;'\n",
- " },{\n",
- " 'selector': 'caption',\n",
- " 'props': 'caption-side: bottom; font-size:1.3em;'\n",
- " },{\n",
- " 'selector': '.index_name',\n",
- " 'props': 'font-style: italic; color: darkgrey; font-weight:normal;'\n",
- " },{\n",
- " 'selector': 'th:not(.index_name)',\n",
- " 'props': 'background-color: #000066; color: white;'\n",
- " },{\n",
- " 'selector': 'th.col_heading',\n",
- " 'props': 'text-align: center;'\n",
- " },{\n",
- " 'selector': 'th.col_heading.level0',\n",
- " 'props': 'font-size: 1.5em;'\n",
- " },{\n",
- " 'selector': 'th.col2',\n",
- " 'props': 'border-left: 1px solid white;'\n",
- " },{\n",
- " 'selector': '.col2',\n",
- " 'props': 'border-left: 1px solid #000066;'\n",
- " },{\n",
- " 'selector': 'td',\n",
- " 'props': 'text-align: center; font-weight:bold;'\n",
- " },{\n",
- " 'selector': '.true',\n",
- " 'props': 'background-color: #e6ffe6;'\n",
- " },{\n",
- " 'selector': '.false',\n",
- " 'props': 'background-color: #ffe6e6;'\n",
- " },{\n",
- " 'selector': '.border-red',\n",
- " 'props': 'border: 2px dashed red;'\n",
- " },{\n",
- " 'selector': '.border-green',\n",
- " 'props': 'border: 2px dashed green;'\n",
- " },{\n",
- " 'selector': 'td:hover',\n",
- " 'props': 'background-color: #ffffb3;'\n",
- " }])\\\n",
- " .set_td_classes(pd.DataFrame([['true border-green', 'false', 'true', 'false border-red', '', ''],\n",
- " ['false', 'true', 'false', 'true', '', '']], \n",
- " index=df.index, columns=df.columns))\\\n",
- " .set_caption(\"Confusion matrix for multiple cancer prediction models.\")\\\n",
- " .set_tooltips(pd.DataFrame([['This model has a very strong true positive rate', '', '', \"This model's total number of false negatives is too high\", '', ''],\n",
- " ['', '', '', '', '', '']], \n",
- " index=df.index, columns=df.columns),\n",
- " css_class='pd-tt', props=\n",
- " 'visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;'\n",
- " 'background-color: white; color: #000066; font-size: 0.8em;' \n",
- " 'transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;')\n"
+ "s = (\n",
+ " df.style.hide([(\"Random\", \"Tumour\"), (\"Random\", \"Non-Tumour\")], axis=\"columns\")\n",
+ " .format(\"{:.0f}\")\n",
+ " .set_table_styles(\n",
+ " [\n",
+ " {\"selector\": \"\", \"props\": \"border-collapse: separate;\"},\n",
+ " {\"selector\": \"caption\", \"props\": \"caption-side: bottom; font-size:1.3em;\"},\n",
+ " {\n",
+ " \"selector\": \".index_name\",\n",
+ " \"props\": \"font-style: italic; color: darkgrey; font-weight:normal;\",\n",
+ " },\n",
+ " {\n",
+ " \"selector\": \"th:not(.index_name)\",\n",
+ " \"props\": \"background-color: #000066; color: white;\",\n",
+ " },\n",
+ " {\"selector\": \"th.col_heading\", \"props\": \"text-align: center;\"},\n",
+ " {\"selector\": \"th.col_heading.level0\", \"props\": \"font-size: 1.5em;\"},\n",
+ " {\"selector\": \"th.col2\", \"props\": \"border-left: 1px solid white;\"},\n",
+ " {\"selector\": \".col2\", \"props\": \"border-left: 1px solid #000066;\"},\n",
+ " {\"selector\": \"td\", \"props\": \"text-align: center; font-weight:bold;\"},\n",
+ " {\"selector\": \".true\", \"props\": \"background-color: #e6ffe6;\"},\n",
+ " {\"selector\": \".false\", \"props\": \"background-color: #ffe6e6;\"},\n",
+ " {\"selector\": \".border-red\", \"props\": \"border: 2px dashed red;\"},\n",
+ " {\"selector\": \".border-green\", \"props\": \"border: 2px dashed green;\"},\n",
+ " {\"selector\": \"td:hover\", \"props\": \"background-color: #ffffb3;\"},\n",
+ " ]\n",
+ " )\n",
+ " .set_td_classes(\n",
+ " pd.DataFrame(\n",
+ " [\n",
+ " [\"true border-green\", \"false\", \"true\", \"false border-red\", \"\", \"\"],\n",
+ " [\"false\", \"true\", \"false\", \"true\", \"\", \"\"],\n",
+ " ],\n",
+ " index=df.index,\n",
+ " columns=df.columns,\n",
+ " )\n",
+ " )\n",
+ " .set_caption(\"Confusion matrix for multiple cancer prediction models.\")\n",
+ " .set_tooltips(\n",
+ " pd.DataFrame(\n",
+ " [\n",
+ " [\n",
+ " \"This model has a very strong true positive rate\",\n",
+ " \"\",\n",
+ " \"\",\n",
+ " \"This model's total number of false negatives is too high\",\n",
+ " \"\",\n",
+ " \"\",\n",
+ " ],\n",
+ " [\"\", \"\", \"\", \"\", \"\", \"\"],\n",
+ " ],\n",
+ " index=df.index,\n",
+ " columns=df.columns,\n",
+ " ),\n",
+ " css_class=\"pd-tt\",\n",
+ " props=\"visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;\"\n",
+ " \"background-color: white; color: #000066; font-size: 0.8em;\"\n",
+ " \"transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;\",\n",
+ " )\n",
+ ")"
]
},
{
@@ -325,7 +333,9 @@
"metadata": {},
"outputs": [],
"source": [
- "s = df.style.format('{:.0f}').hide([('Random', 'Tumour'), ('Random', 'Non-Tumour')], axis=\"columns\")\n",
+ "s = df.style.format(\"{:.0f}\").hide(\n",
+ " [(\"Random\", \"Tumour\"), (\"Random\", \"Non-Tumour\")], axis=\"columns\"\n",
+ ")\n",
"s"
]
},
@@ -337,8 +347,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s.set_uuid('after_hide')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s.set_uuid(\"after_hide\")"
]
},
{
@@ -395,16 +405,16 @@
"outputs": [],
"source": [
"cell_hover = { # for row hover use
instead of \n",
- " 'selector': 'td:hover',\n",
- " 'props': [('background-color', '#ffffb3')]\n",
+ " \"selector\": \"td:hover\",\n",
+ " \"props\": [(\"background-color\", \"#ffffb3\")],\n",
"}\n",
"index_names = {\n",
- " 'selector': '.index_name',\n",
- " 'props': 'font-style: italic; color: darkgrey; font-weight:normal;'\n",
+ " \"selector\": \".index_name\",\n",
+ " \"props\": \"font-style: italic; color: darkgrey; font-weight:normal;\",\n",
"}\n",
"headers = {\n",
- " 'selector': 'th:not(.index_name)',\n",
- " 'props': 'background-color: #000066; color: white;'\n",
+ " \"selector\": \"th:not(.index_name)\",\n",
+ " \"props\": \"background-color: #000066; color: white;\",\n",
"}\n",
"s.set_table_styles([cell_hover, index_names, headers])"
]
@@ -417,8 +427,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s.set_uuid('after_tab_styles1')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s.set_uuid(\"after_tab_styles1\")"
]
},
{
@@ -434,11 +444,14 @@
"metadata": {},
"outputs": [],
"source": [
- "s.set_table_styles([\n",
- " {'selector': 'th.col_heading', 'props': 'text-align: center;'},\n",
- " {'selector': 'th.col_heading.level0', 'props': 'font-size: 1.5em;'},\n",
- " {'selector': 'td', 'props': 'text-align: center; font-weight: bold;'},\n",
- "], overwrite=False)"
+ "s.set_table_styles(\n",
+ " [\n",
+ " {\"selector\": \"th.col_heading\", \"props\": \"text-align: center;\"},\n",
+ " {\"selector\": \"th.col_heading.level0\", \"props\": \"font-size: 1.5em;\"},\n",
+ " {\"selector\": \"td\", \"props\": \"text-align: center; font-weight: bold;\"},\n",
+ " ],\n",
+ " overwrite=False,\n",
+ ")"
]
},
{
@@ -449,8 +462,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s.set_uuid('after_tab_styles2')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s.set_uuid(\"after_tab_styles2\")"
]
},
{
@@ -468,10 +481,16 @@
"metadata": {},
"outputs": [],
"source": [
- "s.set_table_styles({\n",
- " ('Regression', 'Tumour'): [{'selector': 'th', 'props': 'border-left: 1px solid white'},\n",
- " {'selector': 'td', 'props': 'border-left: 1px solid #000066'}]\n",
- "}, overwrite=False, axis=0)"
+ "s.set_table_styles(\n",
+ " {\n",
+ " (\"Regression\", \"Tumour\"): [\n",
+ " {\"selector\": \"th\", \"props\": \"border-left: 1px solid white\"},\n",
+ " {\"selector\": \"td\", \"props\": \"border-left: 1px solid #000066\"},\n",
+ " ]\n",
+ " },\n",
+ " overwrite=False,\n",
+ " axis=0,\n",
+ ")"
]
},
{
@@ -482,8 +501,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s.set_uuid('xyz01')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s.set_uuid(\"xyz01\")"
]
},
{
@@ -508,7 +527,7 @@
"outputs": [],
"source": [
"out = s.set_table_attributes('class=\"my-table-cls\"').to_html()\n",
- "print(out[out.find(' -0.3) else None)\n",
+ "\n",
+ "\n",
+ "s2 = df2.style.map(style_negative, props=\"color:red;\").map(\n",
+ " lambda v: \"opacity: 20%;\" if (v < 0.3) and (v > -0.3) else None\n",
+ ")\n",
"s2"
]
},
@@ -612,8 +638,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s2.set_uuid('after_applymap')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s2.set_uuid(\"after_applymap\")"
]
},
{
@@ -629,9 +655,11 @@
"metadata": {},
"outputs": [],
"source": [
- "def highlight_max(s, props=''):\n",
- " return np.where(s == np.nanmax(s.values), props, '')\n",
- "s2.apply(highlight_max, props='color:white;background-color:darkblue', axis=0)"
+ "def highlight_max(s, props=\"\"):\n",
+ " return np.where(s == np.nanmax(s.values), props, \"\")\n",
+ "\n",
+ "\n",
+ "s2.apply(highlight_max, props=\"color:white;background-color:darkblue\", axis=0)"
]
},
{
@@ -642,8 +670,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s2.set_uuid('after_apply')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s2.set_uuid(\"after_apply\")"
]
},
{
@@ -659,8 +687,9 @@
"metadata": {},
"outputs": [],
"source": [
- "s2.apply(highlight_max, props='color:white;background-color:pink;', axis=1)\\\n",
- " .apply(highlight_max, props='color:white;background-color:purple', axis=None)"
+ "s2.apply(highlight_max, props=\"color:white;background-color:pink;\", axis=1).apply(\n",
+ " highlight_max, props=\"color:white;background-color:purple\", axis=None\n",
+ ")"
]
},
{
@@ -671,8 +700,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s2.set_uuid('after_apply_again')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s2.set_uuid(\"after_apply_again\")"
]
},
{
@@ -713,8 +742,10 @@
"metadata": {},
"outputs": [],
"source": [
- "s2.map_index(lambda v: \"color:pink;\" if v>4 else \"color:darkblue;\", axis=0)\n",
- "s2.apply_index(lambda s: np.where(s.isin([\"A\", \"B\"]), \"color:pink;\", \"color:darkblue;\"), axis=1)"
+ "s2.map_index(lambda v: \"color:pink;\" if v > 4 else \"color:darkblue;\", axis=0)\n",
+ "s2.apply_index(\n",
+ " lambda s: np.where(s.isin([\"A\", \"B\"]), \"color:pink;\", \"color:darkblue;\"), axis=1\n",
+ ")"
]
},
{
@@ -734,11 +765,12 @@
"metadata": {},
"outputs": [],
"source": [
- "s.set_caption(\"Confusion matrix for multiple cancer prediction models.\")\\\n",
- " .set_table_styles([{\n",
- " 'selector': 'caption',\n",
- " 'props': 'caption-side: bottom; font-size:1.25em;'\n",
- " }], overwrite=False)"
+ "s.set_caption(\n",
+ " \"Confusion matrix for multiple cancer prediction models.\"\n",
+ ").set_table_styles(\n",
+ " [{\"selector\": \"caption\", \"props\": \"caption-side: bottom; font-size:1.25em;\"}],\n",
+ " overwrite=False,\n",
+ ")"
]
},
{
@@ -749,8 +781,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s.set_uuid('after_caption')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s.set_uuid(\"after_caption\")"
]
},
{
@@ -768,12 +800,22 @@
"metadata": {},
"outputs": [],
"source": [
- "tt = pd.DataFrame([['This model has a very strong true positive rate', \n",
- " \"This model's total number of false negatives is too high\"]], \n",
- " index=['Tumour (Positive)'], columns=df.columns[[0,3]])\n",
- "s.set_tooltips(tt, props='visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;'\n",
- " 'background-color: white; color: #000066; font-size: 0.8em;' \n",
- " 'transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;')"
+ "tt = pd.DataFrame(\n",
+ " [\n",
+ " [\n",
+ " \"This model has a very strong true positive rate\",\n",
+ " \"This model's total number of false negatives is too high\",\n",
+ " ]\n",
+ " ],\n",
+ " index=[\"Tumour (Positive)\"],\n",
+ " columns=df.columns[[0, 3]],\n",
+ ")\n",
+ "s.set_tooltips(\n",
+ " tt,\n",
+ " props=\"visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;\"\n",
+ " \"background-color: white; color: #000066; font-size: 0.8em;\"\n",
+ " \"transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;\",\n",
+ ")"
]
},
{
@@ -784,8 +826,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s.set_uuid('after_tooltips')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s.set_uuid(\"after_tooltips\")"
]
},
{
@@ -801,14 +843,18 @@
"metadata": {},
"outputs": [],
"source": [
- "s.set_table_styles([ # create internal CSS classes\n",
- " {'selector': '.border-red', 'props': 'border: 2px dashed red;'},\n",
- " {'selector': '.border-green', 'props': 'border: 2px dashed green;'},\n",
- "], overwrite=False)\n",
- "cell_border = pd.DataFrame([['border-green ', ' ', ' ', 'border-red '], \n",
- " [' ', ' ', ' ', ' ']], \n",
- " index=df.index, \n",
- " columns=df.columns[:4])\n",
+ "s.set_table_styles(\n",
+ " [ # create internal CSS classes\n",
+ " {\"selector\": \".border-red\", \"props\": \"border: 2px dashed red;\"},\n",
+ " {\"selector\": \".border-green\", \"props\": \"border: 2px dashed green;\"},\n",
+ " ],\n",
+ " overwrite=False,\n",
+ ")\n",
+ "cell_border = pd.DataFrame(\n",
+ " [[\"border-green \", \" \", \" \", \"border-red \"], [\" \", \" \", \" \", \" \"]],\n",
+ " index=df.index,\n",
+ " columns=df.columns[:4],\n",
+ ")\n",
"s.set_td_classes(cell_color + cell_border)"
]
},
@@ -820,8 +866,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s.set_uuid('after_borders')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s.set_uuid(\"after_borders\")"
]
},
{
@@ -847,9 +893,11 @@
"metadata": {},
"outputs": [],
"source": [
- "df3 = pd.DataFrame(np.random.randn(4,4), \n",
- " pd.MultiIndex.from_product([['A', 'B'], ['r1', 'r2']]),\n",
- " columns=['c1','c2','c3','c4'])\n",
+ "df3 = pd.DataFrame(\n",
+ " np.random.randn(4, 4),\n",
+ " pd.MultiIndex.from_product([[\"A\", \"B\"], [\"r1\", \"r2\"]]),\n",
+ " columns=[\"c1\", \"c2\", \"c3\", \"c4\"],\n",
+ ")\n",
"df3"
]
},
@@ -866,9 +914,10 @@
"metadata": {},
"outputs": [],
"source": [
- "slice_ = ['c3', 'c4']\n",
- "df3.style.apply(highlight_max, props='color:red;', axis=0, subset=slice_)\\\n",
- " .set_properties(**{'background-color': '#ffffb3'}, subset=slice_)"
+ "slice_ = [\"c3\", \"c4\"]\n",
+ "df3.style.apply(\n",
+ " highlight_max, props=\"color:red;\", axis=0, subset=slice_\n",
+ ").set_properties(**{\"background-color\": \"#ffffb3\"}, subset=slice_)"
]
},
{
@@ -885,9 +934,10 @@
"outputs": [],
"source": [
"idx = pd.IndexSlice\n",
- "slice_ = idx[idx[:,'r1'], idx['c2':'c4']]\n",
- "df3.style.apply(highlight_max, props='color:red;', axis=0, subset=slice_)\\\n",
- " .set_properties(**{'background-color': '#ffffb3'}, subset=slice_)"
+ "slice_ = idx[idx[:, \"r1\"], idx[\"c2\":\"c4\"]]\n",
+ "df3.style.apply(\n",
+ " highlight_max, props=\"color:red;\", axis=0, subset=slice_\n",
+ ").set_properties(**{\"background-color\": \"#ffffb3\"}, subset=slice_)"
]
},
{
@@ -903,9 +953,10 @@
"metadata": {},
"outputs": [],
"source": [
- "slice_ = idx[idx[:,'r2'], :]\n",
- "df3.style.apply(highlight_max, props='color:red;', axis=1, subset=slice_)\\\n",
- " .set_properties(**{'background-color': '#ffffb3'}, subset=slice_)"
+ "slice_ = idx[idx[:, \"r2\"], :]\n",
+ "df3.style.apply(\n",
+ " highlight_max, props=\"color:red;\", axis=1, subset=slice_\n",
+ ").set_properties(**{\"background-color\": \"#ffffb3\"}, subset=slice_)"
]
},
{
@@ -923,9 +974,10 @@
"metadata": {},
"outputs": [],
"source": [
- "slice_ = idx[idx[(df3['c1'] + df3['c3']) < -2.0], ['c2', 'c4']]\n",
- "df3.style.apply(highlight_max, props='color:red;', axis=1, subset=slice_)\\\n",
- " .set_properties(**{'background-color': '#ffffb3'}, subset=slice_)"
+ "slice_ = idx[idx[(df3[\"c1\"] + df3[\"c3\"]) < -2.0], [\"c2\", \"c4\"]]\n",
+ "df3.style.apply(\n",
+ " highlight_max, props=\"color:red;\", axis=1, subset=slice_\n",
+ ").set_properties(**{\"background-color\": \"#ffffb3\"}, subset=slice_)"
]
},
{
@@ -981,7 +1033,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df4 = pd.DataFrame([[1,2],[3,4]])\n",
+ "df4 = pd.DataFrame([[1, 2], [3, 4]])\n",
"s4 = df4.style"
]
},
@@ -1003,6 +1055,7 @@
"outputs": [],
"source": [
"from pandas.io.formats.style import Styler\n",
+ "\n",
"s4 = Styler(df4, uuid_len=0, cell_ids=False)"
]
},
@@ -1053,7 +1106,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.set_table_styles([{'selector': 'td.col1', 'props': props}])"
+ "df4.style.set_table_styles([{\"selector\": \"td.col1\", \"props\": props}])"
]
},
{
@@ -1082,9 +1135,11 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.style.apply(highlight_max, props='color:white;background-color:darkblue;', axis=0)\\\n",
- " .apply(highlight_max, props='color:white;background-color:pink;', axis=1)\\\n",
- " .apply(highlight_max, props='color:white;background-color:purple', axis=None)"
+ "df2.style.apply(\n",
+ " highlight_max, props=\"color:white;background-color:darkblue;\", axis=0\n",
+ ").apply(highlight_max, props=\"color:white;background-color:pink;\", axis=1).apply(\n",
+ " highlight_max, props=\"color:white;background-color:purple\", axis=None\n",
+ ")"
]
},
{
@@ -1105,14 +1160,18 @@
"outputs": [],
"source": [
"build = lambda x: pd.DataFrame(x, index=df2.index, columns=df2.columns)\n",
- "cls1 = build(df2.apply(highlight_max, props='cls-1 ', axis=0))\n",
- "cls2 = build(df2.apply(highlight_max, props='cls-2 ', axis=1, result_type='expand').values)\n",
- "cls3 = build(highlight_max(df2, props='cls-3 '))\n",
- "df2.style.set_table_styles([\n",
- " {'selector': '.cls-1', 'props': 'color:white;background-color:darkblue;'},\n",
- " {'selector': '.cls-2', 'props': 'color:white;background-color:pink;'},\n",
- " {'selector': '.cls-3', 'props': 'color:white;background-color:purple;'}\n",
- "]).set_td_classes(cls1 + cls2 + cls3)"
+ "cls1 = build(df2.apply(highlight_max, props=\"cls-1 \", axis=0))\n",
+ "cls2 = build(\n",
+ " df2.apply(highlight_max, props=\"cls-2 \", axis=1, result_type=\"expand\").values\n",
+ ")\n",
+ "cls3 = build(highlight_max(df2, props=\"cls-3 \"))\n",
+ "df2.style.set_table_styles(\n",
+ " [\n",
+ " {\"selector\": \".cls-1\", \"props\": \"color:white;background-color:darkblue;\"},\n",
+ " {\"selector\": \".cls-2\", \"props\": \"color:white;background-color:pink;\"},\n",
+ " {\"selector\": \".cls-3\", \"props\": \"color:white;background-color:purple;\"},\n",
+ " ]\n",
+ ").set_td_classes(cls1 + cls2 + cls3)"
]
},
{
@@ -1152,10 +1211,14 @@
" \"blank\": \"\",\n",
"}\n",
"html = Styler(df4, uuid_len=0, cell_ids=False)\n",
- "html.set_table_styles([{'selector': 'td', 'props': props},\n",
- " {'selector': '.c1', 'props': 'color:green;'},\n",
- " {'selector': '.l0', 'props': 'color:blue;'}],\n",
- " css_class_names=my_css)\n",
+ "html.set_table_styles(\n",
+ " [\n",
+ " {\"selector\": \"td\", \"props\": props},\n",
+ " {\"selector\": \".c1\", \"props\": \"color:green;\"},\n",
+ " {\"selector\": \".l0\", \"props\": \"color:blue;\"},\n",
+ " ],\n",
+ " css_class_names=my_css,\n",
+ ")\n",
"print(html.to_html())"
]
},
@@ -1213,9 +1276,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.iloc[0,2] = np.nan\n",
- "df2.iloc[4,3] = np.nan\n",
- "df2.loc[:4].style.highlight_null(color='yellow')"
+ "df2.iloc[0, 2] = np.nan\n",
+ "df2.iloc[4, 3] = np.nan\n",
+ "df2.loc[:4].style.highlight_null(color=\"yellow\")"
]
},
{
@@ -1231,7 +1294,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.loc[:4].style.highlight_max(axis=1, props='color:white; font-weight:bold; background-color:darkblue;')"
+ "df2.loc[:4].style.highlight_max(\n",
+ " axis=1, props=\"color:white; font-weight:bold; background-color:darkblue;\"\n",
+ ")"
]
},
{
@@ -1249,7 +1314,9 @@
"outputs": [],
"source": [
"left = pd.Series([1.0, 0.0, 1.0], index=[\"A\", \"B\", \"D\"])\n",
- "df2.loc[:4].style.highlight_between(left=left, right=1.5, axis=1, props='color:white; background-color:purple;')"
+ "df2.loc[:4].style.highlight_between(\n",
+ " left=left, right=1.5, axis=1, props=\"color:white; background-color:purple;\"\n",
+ ")"
]
},
{
@@ -1266,7 +1333,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.loc[:4].style.highlight_quantile(q_left=0.85, axis=None, color='yellow')"
+ "df2.loc[:4].style.highlight_quantile(q_left=0.85, axis=None, color=\"yellow\")"
]
},
{
@@ -1290,6 +1357,7 @@
"outputs": [],
"source": [
"import seaborn as sns\n",
+ "\n",
"cm = sns.light_palette(\"green\", as_cmap=True)\n",
"\n",
"df2.style.background_gradient(cmap=cm)"
@@ -1329,9 +1397,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.loc[:4].style.set_properties(**{'background-color': 'black',\n",
- " 'color': 'lawngreen',\n",
- " 'border-color': 'white'})"
+ "df2.loc[:4].style.set_properties(\n",
+ " **{\"background-color\": \"black\", \"color\": \"lawngreen\", \"border-color\": \"white\"}\n",
+ ")"
]
},
{
@@ -1354,7 +1422,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.style.bar(subset=['A', 'B'], color='#d65f5f')"
+ "df2.style.bar(subset=[\"A\", \"B\"], color=\"#d65f5f\")"
]
},
{
@@ -1372,10 +1440,15 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.style.format('{:.3f}', na_rep=\"\")\\\n",
- " .bar(align=0, vmin=-2.5, vmax=2.5, cmap=\"bwr\", height=50,\n",
- " width=60, props=\"width: 120px; border-right: 1px solid black;\")\\\n",
- " .text_gradient(cmap=\"bwr\", vmin=-2.5, vmax=2.5)"
+ "df2.style.format(\"{:.3f}\", na_rep=\"\").bar(\n",
+ " align=0,\n",
+ " vmin=-2.5,\n",
+ " vmax=2.5,\n",
+ " cmap=\"bwr\",\n",
+ " height=50,\n",
+ " width=60,\n",
+ " props=\"width: 120px; border-right: 1px solid black;\",\n",
+ ").text_gradient(cmap=\"bwr\", vmin=-2.5, vmax=2.5)"
]
},
{
@@ -1398,10 +1471,10 @@
"from IPython.display import HTML\n",
"\n",
"# Test series\n",
- "test1 = pd.Series([-100,-60,-30,-20], name='All Negative')\n",
- "test2 = pd.Series([-10,-5,0,90], name='Both Pos and Neg')\n",
- "test3 = pd.Series([10,20,50,100], name='All Positive')\n",
- "test4 = pd.Series([100, 103, 101, 102], name='Large Positive')\n",
+ "test1 = pd.Series([-100, -60, -30, -20], name=\"All Negative\")\n",
+ "test2 = pd.Series([-10, -5, 0, 90], name=\"Both Pos and Neg\")\n",
+ "test3 = pd.Series([10, 20, 50, 100], name=\"All Positive\")\n",
+ "test4 = pd.Series([100, 103, 101, 102], name=\"Large Positive\")\n",
"\n",
"\n",
"head = \"\"\"\n",
@@ -1417,19 +1490,22 @@
"\n",
"\"\"\"\n",
"\n",
- "aligns = ['left', 'right', 'zero', 'mid', 'mean', 99]\n",
+ "aligns = [\"left\", \"right\", \"zero\", \"mid\", \"mean\", 99]\n",
"for align in aligns:\n",
" row = \"| {} | \".format(align)\n",
- " for series in [test1,test2,test3, test4]:\n",
+ " for series in [test1, test2, test3, test4]:\n",
" s = series.copy()\n",
- " s.name=''\n",
- " row += \"{} | \".format(s.to_frame().style.hide(axis='index').bar(align=align, \n",
- " color=['#d65f5f', '#5fba7d'], \n",
- " width=100).to_html()) #testn['width']\n",
- " row += ' '\n",
+ " s.name = \"\"\n",
+ " row += \"{} | \".format(\n",
+ " s.to_frame()\n",
+ " .style.hide(axis=\"index\")\n",
+ " .bar(align=align, color=[\"#d65f5f\", \"#5fba7d\"], width=100)\n",
+ " .to_html()\n",
+ " ) # testn['width']\n",
+ " row += \"\"\n",
" head += row\n",
- " \n",
- "head+= \"\"\"\n",
+ "\n",
+ "head += \"\"\"\n",
"\n",
" \"\"\""
]
@@ -1463,11 +1539,12 @@
"metadata": {},
"outputs": [],
"source": [
- "style1 = df2.style\\\n",
- " .map(style_negative, props='color:red;')\\\n",
- " .map(lambda v: 'opacity: 20%;' if (v < 0.3) and (v > -0.3) else None)\\\n",
- " .set_table_styles([{\"selector\": \"th\", \"props\": \"color: blue;\"}])\\\n",
- " .hide(axis=\"index\")\n",
+ "style1 = (\n",
+ " df2.style.map(style_negative, props=\"color:red;\")\n",
+ " .map(lambda v: \"opacity: 20%;\" if (v < 0.3) and (v > -0.3) else None)\n",
+ " .set_table_styles([{\"selector\": \"th\", \"props\": \"color: blue;\"}])\n",
+ " .hide(axis=\"index\")\n",
+ ")\n",
"style1"
]
},
@@ -1526,11 +1603,14 @@
"outputs": [],
"source": [
"from ipywidgets import widgets\n",
+ "\n",
+ "\n",
"@widgets.interact\n",
- "def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0., 99.9), l=(0., 99.9)):\n",
+ "def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0.0, 99.9), l=(0.0, 99.9)):\n",
" return df2.style.background_gradient(\n",
- " cmap=sns.palettes.diverging_palette(h_neg=h_neg, h_pos=h_pos, s=s, l=l,\n",
- " as_cmap=True)\n",
+ " cmap=sns.palettes.diverging_palette(\n",
+ " h_neg=h_neg, h_pos=h_pos, s=s, l=l, as_cmap=True\n",
+ " )\n",
" )"
]
},
@@ -1548,16 +1628,15 @@
"outputs": [],
"source": [
"def magnify():\n",
- " return [dict(selector=\"th\",\n",
- " props=[(\"font-size\", \"4pt\")]),\n",
- " dict(selector=\"td\",\n",
- " props=[('padding', \"0em 0em\")]),\n",
- " dict(selector=\"th:hover\",\n",
- " props=[(\"font-size\", \"12pt\")]),\n",
- " dict(selector=\"tr:hover td:hover\",\n",
- " props=[('max-width', '200px'),\n",
- " ('font-size', '12pt')])\n",
- "]"
+ " return [\n",
+ " dict(selector=\"th\", props=[(\"font-size\", \"4pt\")]),\n",
+ " dict(selector=\"td\", props=[(\"padding\", \"0em 0em\")]),\n",
+ " dict(selector=\"th:hover\", props=[(\"font-size\", \"12pt\")]),\n",
+ " dict(\n",
+ " selector=\"tr:hover td:hover\",\n",
+ " props=[(\"max-width\", \"200px\"), (\"font-size\", \"12pt\")],\n",
+ " ),\n",
+ " ]"
]
},
{
@@ -1567,14 +1646,12 @@
"outputs": [],
"source": [
"np.random.seed(25)\n",
- "cmap = cmap=sns.diverging_palette(5, 250, as_cmap=True)\n",
+ "cmap = cmap = sns.diverging_palette(5, 250, as_cmap=True)\n",
"bigdf = pd.DataFrame(np.random.randn(20, 25)).cumsum()\n",
"\n",
- "bigdf.style.background_gradient(cmap, axis=1)\\\n",
- " .set_properties(**{'max-width': '80px', 'font-size': '1pt'})\\\n",
- " .set_caption(\"Hover to magnify\")\\\n",
- " .format(precision=2)\\\n",
- " .set_table_styles(magnify())"
+ "bigdf.style.background_gradient(cmap, axis=1).set_properties(\n",
+ " **{\"max-width\": \"80px\", \"font-size\": \"1pt\"}\n",
+ ").set_caption(\"Hover to magnify\").format(precision=2).set_table_styles(magnify())"
]
},
{
@@ -1611,8 +1688,8 @@
"metadata": {},
"outputs": [],
"source": [
- "bigdf.index = pd.MultiIndex.from_product([[\"A\",\"B\"],[0,1],[0,1,2,3]])\n",
- "bigdf.style.set_sticky(axis=\"index\", pixel_size=18, levels=[1,2])"
+ "bigdf.index = pd.MultiIndex.from_product([[\"A\", \"B\"], [0, 1], [0, 1, 2, 3]])\n",
+ "bigdf.style.set_sticky(axis=\"index\", pixel_size=18, levels=[1, 2])"
]
},
{
@@ -1632,7 +1709,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df4 = pd.DataFrame([['', '\"&other\"', '']])\n",
+ "df4 = pd.DataFrame([[\"\", '\"&other\"', \"\"]])\n",
"df4.style"
]
},
@@ -1651,7 +1728,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.format('{}', escape=\"html\")"
+ "df4.style.format(\n",
+ " '{}', escape=\"html\"\n",
+ ")"
]
},
{
@@ -1693,10 +1772,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.style.\\\n",
- " map(style_negative, props='color:red;').\\\n",
- " highlight_max(axis=0).\\\n",
- " to_excel('styled.xlsx', engine='openpyxl')"
+ "df2.style.map(style_negative, props=\"color:red;\").highlight_max(axis=0).to_excel(\n",
+ " \"styled.xlsx\", engine=\"openpyxl\"\n",
+ ")"
]
},
{
@@ -1765,7 +1843,11 @@
"metadata": {},
"outputs": [],
"source": [
- "print(pd.DataFrame([[1,2],[3,4]], index=['i1', 'i2'], columns=['c1', 'c2']).style.to_html())"
+ "print(\n",
+ " pd.DataFrame(\n",
+ " [[1, 2], [3, 4]], index=[\"i1\", \"i2\"], columns=[\"c1\", \"c2\"]\n",
+ " ).style.to_html()\n",
+ ")"
]
},
{
@@ -1783,9 +1865,8 @@
"metadata": {},
"outputs": [],
"source": [
- "df4 = pd.DataFrame([['text']])\n",
- "df4.style.map(lambda x: 'color:green;')\\\n",
- " .map(lambda x: 'color:red;')"
+ "df4 = pd.DataFrame([[\"text\"]])\n",
+ "df4.style.map(lambda x: \"color:green;\").map(lambda x: \"color:red;\")"
]
},
{
@@ -1794,8 +1875,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.map(lambda x: 'color:red;')\\\n",
- " .map(lambda x: 'color:green;')"
+ "df4.style.map(lambda x: \"color:red;\").map(lambda x: \"color:green;\")"
]
},
{
@@ -1820,9 +1900,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.set_uuid('a_')\\\n",
- " .set_table_styles([{'selector': 'td', 'props': 'color:red;'}])\\\n",
- " .map(lambda x: 'color:green;')"
+ "df4.style.set_uuid(\"a_\").set_table_styles(\n",
+ " [{\"selector\": \"td\", \"props\": \"color:red;\"}]\n",
+ ").map(lambda x: \"color:green;\")"
]
},
{
@@ -1838,11 +1918,12 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.set_uuid('b_')\\\n",
- " .set_table_styles([{'selector': 'td', 'props': 'color:red;'},\n",
- " {'selector': '.cls-1', 'props': 'color:blue;'}])\\\n",
- " .map(lambda x: 'color:green;')\\\n",
- " .set_td_classes(pd.DataFrame([['cls-1']]))"
+ "df4.style.set_uuid(\"b_\").set_table_styles(\n",
+ " [\n",
+ " {\"selector\": \"td\", \"props\": \"color:red;\"},\n",
+ " {\"selector\": \".cls-1\", \"props\": \"color:blue;\"},\n",
+ " ]\n",
+ ").map(lambda x: \"color:green;\").set_td_classes(pd.DataFrame([[\"cls-1\"]]))"
]
},
{
@@ -1858,12 +1939,13 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.set_uuid('c_')\\\n",
- " .set_table_styles([{'selector': 'td', 'props': 'color:red;'},\n",
- " {'selector': '.cls-1', 'props': 'color:blue;'},\n",
- " {'selector': 'td.data', 'props': 'color:yellow;'}])\\\n",
- " .map(lambda x: 'color:green;')\\\n",
- " .set_td_classes(pd.DataFrame([['cls-1']]))"
+ "df4.style.set_uuid(\"c_\").set_table_styles(\n",
+ " [\n",
+ " {\"selector\": \"td\", \"props\": \"color:red;\"},\n",
+ " {\"selector\": \".cls-1\", \"props\": \"color:blue;\"},\n",
+ " {\"selector\": \"td.data\", \"props\": \"color:yellow;\"},\n",
+ " ]\n",
+ ").map(lambda x: \"color:green;\").set_td_classes(pd.DataFrame([[\"cls-1\"]]))"
]
},
{
@@ -1881,12 +1963,13 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.set_uuid('d_')\\\n",
- " .set_table_styles([{'selector': 'td', 'props': 'color:red;'},\n",
- " {'selector': '.cls-1', 'props': 'color:blue;'},\n",
- " {'selector': 'td.data', 'props': 'color:yellow;'}])\\\n",
- " .map(lambda x: 'color:green !important;')\\\n",
- " .set_td_classes(pd.DataFrame([['cls-1']]))"
+ "df4.style.set_uuid(\"d_\").set_table_styles(\n",
+ " [\n",
+ " {\"selector\": \"td\", \"props\": \"color:red;\"},\n",
+ " {\"selector\": \".cls-1\", \"props\": \"color:blue;\"},\n",
+ " {\"selector\": \"td.data\", \"props\": \"color:yellow;\"},\n",
+ " ]\n",
+ ").map(lambda x: \"color:green !important;\").set_td_classes(pd.DataFrame([[\"cls-1\"]]))"
]
},
{
@@ -1960,10 +2043,12 @@
"source": [
"class MyStyler(Styler):\n",
" env = Environment(\n",
- " loader=ChoiceLoader([\n",
- " FileSystemLoader(\"templates\"), # contains ours\n",
- " Styler.loader, # the default\n",
- " ])\n",
+ " loader=ChoiceLoader(\n",
+ " [\n",
+ " FileSystemLoader(\"templates\"), # contains ours\n",
+ " Styler.loader, # the default\n",
+ " ]\n",
+ " )\n",
" )\n",
" template_html_table = env.get_template(\"myhtml.tpl\")"
]
@@ -2106,7 +2191,7 @@
"# from IPython.display import HTML\n",
"# with open(\"themes/nature_with_gtoc/static/nature.css_t\") as f:\n",
"# css = f.read()\n",
- " \n",
+ "\n",
"# HTML(''.format(css))"
]
}
diff --git a/pandas/_config/config.py b/pandas/_config/config.py
index 4ed2d4c3be692..25760df6bd7a4 100644
--- a/pandas/_config/config.py
+++ b/pandas/_config/config.py
@@ -411,7 +411,7 @@ def __dir__(self) -> list[str]:
@contextmanager
-def option_context(*args) -> Generator[None, None, None]:
+def option_context(*args) -> Generator[None]:
"""
Context manager to temporarily set options in a ``with`` statement.
@@ -718,7 +718,7 @@ def _build_option_description(k: str) -> str:
@contextmanager
-def config_prefix(prefix: str) -> Generator[None, None, None]:
+def config_prefix(prefix: str) -> Generator[None]:
"""
contextmanager for multiple invocations of API with a common prefix
diff --git a/pandas/_config/localization.py b/pandas/_config/localization.py
index 61d88c43f0e4a..6602633f20399 100644
--- a/pandas/_config/localization.py
+++ b/pandas/_config/localization.py
@@ -25,7 +25,7 @@
@contextmanager
def set_locale(
new_locale: str | tuple[str, str], lc_var: int = locale.LC_ALL
-) -> Generator[str | tuple[str, str], None, None]:
+) -> Generator[str | tuple[str, str]]:
"""
Context manager for temporarily setting a locale.
diff --git a/pandas/_testing/_warnings.py b/pandas/_testing/_warnings.py
index cd2e2b4141ffd..a752c8db90f38 100644
--- a/pandas/_testing/_warnings.py
+++ b/pandas/_testing/_warnings.py
@@ -35,7 +35,7 @@ def assert_produces_warning(
raise_on_extra_warnings: bool = True,
match: str | tuple[str | None, ...] | None = None,
must_find_all_warnings: bool = True,
-) -> Generator[list[warnings.WarningMessage], None, None]:
+) -> Generator[list[warnings.WarningMessage]]:
"""
Context manager for running code expected to either raise a specific warning,
multiple specific warnings, or not raise any warnings. Verifies that the code
diff --git a/pandas/_testing/contexts.py b/pandas/_testing/contexts.py
index 91b5d2a981bef..f041d8755bb64 100644
--- a/pandas/_testing/contexts.py
+++ b/pandas/_testing/contexts.py
@@ -29,7 +29,7 @@
@contextmanager
def decompress_file(
path: FilePath | BaseBuffer, compression: CompressionOptions
-) -> Generator[IO[bytes], None, None]:
+) -> Generator[IO[bytes]]:
"""
Open a compressed file and return a file object.
@@ -50,7 +50,7 @@ def decompress_file(
@contextmanager
-def set_timezone(tz: str) -> Generator[None, None, None]:
+def set_timezone(tz: str) -> Generator[None]:
"""
Context manager for temporarily setting a timezone.
@@ -91,7 +91,7 @@ def setTZ(tz) -> None:
@contextmanager
-def ensure_clean(filename=None) -> Generator[Any, None, None]:
+def ensure_clean(filename=None) -> Generator[Any]:
"""
Gets a temporary path and agrees to remove on close.
@@ -123,7 +123,7 @@ def ensure_clean(filename=None) -> Generator[Any, None, None]:
@contextmanager
-def with_csv_dialect(name: str, **kwargs) -> Generator[None, None, None]:
+def with_csv_dialect(name: str, **kwargs) -> Generator[None]:
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index 28985a1380bee..beaaa3f8ed3cc 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -131,7 +131,7 @@ def loads(
@contextlib.contextmanager
-def patch_pickle() -> Generator[None, None, None]:
+def patch_pickle() -> Generator[None]:
"""
Temporarily patch pickle to use our unpickler.
"""
diff --git a/pandas/core/_numba/kernels/min_max_.py b/pandas/core/_numba/kernels/min_max_.py
index 59d36732ebae6..6e57e62c13a6e 100644
--- a/pandas/core/_numba/kernels/min_max_.py
+++ b/pandas/core/_numba/kernels/min_max_.py
@@ -112,11 +112,9 @@ def grouped_min_max(
continue
if is_max:
- if val > output[lab]:
- output[lab] = val
+ output[lab] = max(val, output[lab])
else:
- if val < output[lab]:
- output[lab] = val
+ output[lab] = min(val, output[lab])
# Set labels that don't satisfy min_periods as np.nan
for lab, count in enumerate(nobs):
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 5959156d11123..ac336713a70f3 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -806,7 +806,7 @@ def result_columns(self) -> Index:
@property
@abc.abstractmethod
- def series_generator(self) -> Generator[Series, None, None]:
+ def series_generator(self) -> Generator[Series]:
pass
@staticmethod
@@ -1131,7 +1131,7 @@ class FrameRowApply(FrameApply):
axis: AxisInt = 0
@property
- def series_generator(self) -> Generator[Series, None, None]:
+ def series_generator(self) -> Generator[Series]:
return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))
@staticmethod
@@ -1243,7 +1243,7 @@ def apply_broadcast(self, target: DataFrame) -> DataFrame:
return result.T
@property
- def series_generator(self) -> Generator[Series, None, None]:
+ def series_generator(self) -> Generator[Series]:
values = self.values
values = ensure_wrapped_if_datetimelike(values)
assert len(values) > 0
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 201c449185057..058145e34e455 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -2914,7 +2914,7 @@ def _generate_range(
offset: BaseOffset,
*,
unit: str,
-) -> Generator[Timestamp, None, None]:
+) -> Generator[Timestamp]:
"""
Generates a sequence of dates corresponding to the specified time
offset. Similar to dateutil.rrule except uses pandas DateOffset
diff --git a/pandas/core/common.py b/pandas/core/common.py
index ec0473a20458b..642f14442b53e 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -562,7 +562,7 @@ def convert_to_list_like(
@contextlib.contextmanager
def temp_setattr(
obj, attr: str, value, condition: bool = True
-) -> Generator[None, None, None]:
+) -> Generator[None]:
"""
Temporarily set attribute on an object.
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 6ba07b1761557..9b780e512a11d 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -683,9 +683,7 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan):
elif dtype.kind == "f":
mst = np.min_scalar_type(fill_value)
- if mst > dtype:
- # e.g. mst is np.float64 and dtype is np.float32
- dtype = mst
+ dtype = max(mst, dtype)
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
@@ -718,9 +716,7 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan):
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
- if mst > dtype:
- # e.g. mst is np.complex128 and dtype is np.complex64
- dtype = mst
+ dtype = max(mst, dtype)
else:
dtype = np.dtype(np.object_)
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index da80969b613cd..008fe9bdee5bd 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -865,7 +865,7 @@ def _unob_index_and_ids(
return unob_index, unob_ids
@final
- def get_group_levels(self) -> Generator[Index, None, None]:
+ def get_group_levels(self) -> Generator[Index]:
# Note: only called from _insert_inaxis_grouper, which
# is only called for BaseGrouper, never for BinGrouper
result_index = self.result_index
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index a4b92e70427ce..1c57e6fe1a952 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2666,7 +2666,7 @@ def _reorder_ilevels(self, order) -> MultiIndex:
def _recode_for_new_levels(
self, new_levels, copy: bool = True
- ) -> Generator[np.ndarray, None, None]:
+ ) -> Generator[np.ndarray]:
if len(new_levels) > self.nlevels:
raise AssertionError(
f"Length of new_levels ({len(new_levels)}) "
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index dced92ba04520..d3ff7398864d5 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -388,7 +388,7 @@ def _split_op_result(self, result: ArrayLike) -> list[Block]:
return [nb]
@final
- def _split(self) -> Generator[Block, None, None]:
+ def _split(self) -> Generator[Block]:
"""
Split a block into a list of single-column blocks.
"""
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index b96d5a59effce..2ee7d3948a70f 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -250,7 +250,7 @@ def _concat_homogeneous_fastpath(
def _get_combined_plan(
mgrs: list[BlockManager],
-) -> Generator[tuple[BlockPlacement, list[JoinUnit]], None, None]:
+) -> Generator[tuple[BlockPlacement, list[JoinUnit]]]:
max_len = mgrs[0].shape[0]
blknos_list = [mgr.blknos for mgr in mgrs]
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index aa4a785519051..a3738bb25f56c 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -856,7 +856,7 @@ def _slice_take_blocks_ax0(
*,
use_na_proxy: bool = False,
ref_inplace_op: bool = False,
- ) -> Generator[Block, None, None]:
+ ) -> Generator[Block]:
"""
Slice/take blocks along axis=0.
@@ -1731,7 +1731,7 @@ def unstack(self, unstacker, fill_value) -> BlockManager:
bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False)
return bm
- def to_iter_dict(self) -> Generator[tuple[str, Self], None, None]:
+ def to_iter_dict(self) -> Generator[tuple[str, Self]]:
"""
Yield a tuple of (str(dtype), BlockManager)
diff --git a/pandas/core/methods/to_dict.py b/pandas/core/methods/to_dict.py
index 84202a4fcc840..aea95e4684573 100644
--- a/pandas/core/methods/to_dict.py
+++ b/pandas/core/methods/to_dict.py
@@ -33,7 +33,7 @@
def create_data_for_split(
df: DataFrame, are_all_object_dtype_cols: bool, object_dtype_indices: list[int]
-) -> Generator[list, None, None]:
+) -> Generator[list]:
"""
Simple helper method to create data for to ``to_dict(orient="split")``
to create the main output data
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
index f79417d11080d..62002e4844b32 100644
--- a/pandas/io/excel/_odfreader.py
+++ b/pandas/io/excel/_odfreader.py
@@ -142,8 +142,7 @@ def get_sheet_data(
empty_cells = 0
table_row.extend([value] * column_repeat)
- if max_row_len < len(table_row):
- max_row_len = len(table_row)
+ max_row_len = max(max_row_len, len(table_row))
row_repeat = self._get_row_repeat(sheet_row)
if len(table_row) == 0:
diff --git a/pandas/io/formats/css.py b/pandas/io/formats/css.py
index 0af04526ea96d..750c6dc1180e9 100644
--- a/pandas/io/formats/css.py
+++ b/pandas/io/formats/css.py
@@ -36,7 +36,7 @@ def _side_expander(prop_fmt: str) -> Callable:
def expand(
self: CSSResolver, prop: str, value: str
- ) -> Generator[tuple[str, str], None, None]:
+ ) -> Generator[tuple[str, str]]:
"""
Expand shorthand property into side-specific property (top, right, bottom, left)
@@ -83,7 +83,7 @@ def _border_expander(side: str = "") -> Callable:
def expand(
self: CSSResolver, prop: str, value: str
- ) -> Generator[tuple[str, str], None, None]:
+ ) -> Generator[tuple[str, str]]:
"""
Expand border into color, style, and width tuples
@@ -392,7 +392,7 @@ def _error() -> str:
size_fmt = f"{val:f}pt"
return size_fmt
- def atomize(self, declarations: Iterable) -> Generator[tuple[str, str], None, None]:
+ def atomize(self, declarations: Iterable) -> Generator[tuple[str, str]]:
for prop, value in declarations:
prop = prop.lower()
value = value.lower()
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 9ad5ac83e9eae..5aecc6af712e5 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1024,7 +1024,7 @@ def save_to_buffer(
@contextmanager
def _get_buffer(
buf: FilePath | WriteBuffer[str] | None, encoding: str | None = None
-) -> Generator[WriteBuffer[str], None, None] | Generator[StringIO, None, None]:
+) -> Generator[WriteBuffer[str]] | Generator[StringIO]:
"""
Context manager to open, yield and close buffer for filenames or Path-like
objects, otherwise yield buf unchanged.
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 99dd06568fa01..d424ba09804af 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -1119,7 +1119,7 @@ def _query_iterator(
coerce_float: bool = True,
parse_dates=None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
- ) -> Generator[DataFrame, None, None]:
+ ) -> Generator[DataFrame]:
"""Return generator through chunked result set."""
has_read_data = False
with exit_stack:
@@ -1732,7 +1732,7 @@ def _query_iterator(
parse_dates=None,
dtype: DtypeArg | None = None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
- ) -> Generator[DataFrame, None, None]:
+ ) -> Generator[DataFrame]:
"""Return generator through chunked result set"""
has_read_data = False
with exit_stack:
@@ -2682,7 +2682,7 @@ def _query_iterator(
parse_dates=None,
dtype: DtypeArg | None = None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
- ) -> Generator[DataFrame, None, None]:
+ ) -> Generator[DataFrame]:
"""Return generator through chunked result set"""
has_read_data = False
while True:
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py
index fc63d65f1e160..29f9b3ee116fc 100644
--- a/pandas/plotting/_matplotlib/converter.py
+++ b/pandas/plotting/_matplotlib/converter.py
@@ -92,7 +92,7 @@ def wrapper(*args, **kwargs):
@contextlib.contextmanager
-def pandas_converters() -> Generator[None, None, None]:
+def pandas_converters() -> Generator[None]:
"""
Context manager registering pandas' converters for a plot.
diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index f9c370b2486fd..d5624aecd1215 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -442,7 +442,7 @@ def handle_shared_axes(
_remove_labels_from_axis(ax.yaxis)
-def flatten_axes(axes: Axes | Iterable[Axes]) -> Generator[Axes, None, None]:
+def flatten_axes(axes: Axes | Iterable[Axes]) -> Generator[Axes]:
if not is_list_like(axes):
yield axes # type: ignore[misc]
elif isinstance(axes, (np.ndarray, ABCIndex)):
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index d8455f44ef0d1..1ebf1b16d419e 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -704,7 +704,7 @@ def _get_canonical_key(self, key: str) -> str:
return self._ALIASES.get(key, key)
@contextmanager
- def use(self, key, value) -> Generator[_Options, None, None]:
+ def use(self, key, value) -> Generator[_Options]:
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index d205569270705..d48a2d3c9c027 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -274,9 +274,7 @@ def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box_with_array
expected = TimedeltaIndex(["3 Days", "36 Hours"])
if isinstance(three_days, np.timedelta64):
dtype = three_days.dtype
- if dtype < np.dtype("m8[s]"):
- # i.e. resolution is lower -> use lowest supported resolution
- dtype = np.dtype("m8[s]")
+ dtype = max(dtype, np.dtype("m8[s]"))
expected = expected.astype(dtype)
elif type(three_days) is timedelta:
expected = expected.astype("m8[us]")
diff --git a/pandas/util/_exceptions.py b/pandas/util/_exceptions.py
index 5f50838d37315..f77f3f9083901 100644
--- a/pandas/util/_exceptions.py
+++ b/pandas/util/_exceptions.py
@@ -13,7 +13,7 @@
@contextlib.contextmanager
-def rewrite_exception(old_name: str, new_name: str) -> Generator[None, None, None]:
+def rewrite_exception(old_name: str, new_name: str) -> Generator[None]:
"""
Rewrite the message of an exception.
"""
@@ -66,7 +66,7 @@ def rewrite_warning(
target_category: type[Warning],
new_message: str,
new_category: type[Warning] | None = None,
-) -> Generator[None, None, None]:
+) -> Generator[None]:
"""
Rewrite the message of a warning.
From ba16174010f900a2114e998597307dc4c9e8fae1 Mon Sep 17 00:00:00 2001
From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com>
Date: Tue, 3 Sep 2024 10:04:25 -0700
Subject: [PATCH 03/12] Try bumping mypy
---
environment.yml | 3 +--
requirements-dev.txt | 3 +--
2 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/environment.yml b/environment.yml
index 34bc0591ca8df..78f32f5734b51 100644
--- a/environment.yml
+++ b/environment.yml
@@ -42,7 +42,6 @@ dependencies:
- numexpr>=2.8.4
- openpyxl>=3.1.0
- odfpy>=1.4.1
- - py
- psycopg2>=2.9.6
- pyarrow>=10.0.1
- pymysql>=1.0.2
@@ -77,7 +76,7 @@ dependencies:
# code checks
- flake8=6.1.0 # run in subprocess over docstring examples
- - mypy=1.9.0 # pre-commit uses locally installed mypy
+ - mypy=1.11.2 # pre-commit uses locally installed mypy
- tokenize-rt # scripts/check_for_inconsistent_pandas_namespace.py
- pre-commit>=3.6.0
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 52d2553fc4001..cfe40b14b75b7 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -31,7 +31,6 @@ numba>=0.56.4
numexpr>=2.8.4
openpyxl>=3.1.0
odfpy>=1.4.1
-py
psycopg2-binary>=2.9.6
pyarrow>=10.0.1
pymysql>=1.0.2
@@ -54,7 +53,7 @@ moto
flask
asv>=0.6.1
flake8==6.1.0
-mypy==1.9.0
+mypy==1.11.2
tokenize-rt
pre-commit>=3.6.0
gitpython
From 50481caa80b8c8d5487729577db68811fc7670a8 Mon Sep 17 00:00:00 2001
From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com>
Date: Tue, 3 Sep 2024 10:27:29 -0700
Subject: [PATCH 04/12] Address new ruff checks
---
doc/source/user_guide/style.ipynb | 72 +++++++++++++++++-------------
pandas/core/arrays/categorical.py | 4 +-
pandas/core/arrays/datetimelike.py | 40 ++++++++---------
3 files changed, 64 insertions(+), 52 deletions(-)
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index 5f99af1a0aee1..bb8d76e98fac3 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -100,7 +100,7 @@
"outputs": [],
"source": [
"weather_df = pd.DataFrame(\n",
- " np.random.rand(10, 2) * 5,\n",
+ " np.random.default_rng(2).standard_normal(10, 2) * 5,\n",
" index=pd.date_range(start=\"2021-01-01\", periods=10),\n",
" columns=[\"Tokyo\", \"Beijing\"],\n",
")\n",
@@ -157,7 +157,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df = pd.DataFrame(np.random.randn(5, 5))\n",
+ "df = pd.DataFrame(np.random.default_rng(2).standard_normal(5, 5))\n",
"df.style.hide(subset=[0, 2, 4], axis=0).hide(subset=[0, 2, 4], axis=1)"
]
},
@@ -302,9 +302,16 @@
" columns=df.columns,\n",
" ),\n",
" css_class=\"pd-tt\",\n",
- " props=\"visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;\"\n",
- " \"background-color: white; color: #000066; font-size: 0.8em;\"\n",
- " \"transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;\",\n",
+ " props=\"visibility: hidden;\"\n",
+ " \"position: absolute;\"\n",
+ " \"z-index: 1;\"\n",
+ " \"border: 1px solid #000066;\"\n",
+ " \"background-color: white;\"\n",
+ " \"color: #000066;\"\n",
+ " \"font-size: 0.8em;\"\n",
+ " \"transform: translate(0px, -24px);\"\n",
+ " \"padding: 0.6em;\"\n",
+ " \"border-radius: 0.5em;\",\n",
" )\n",
")"
]
@@ -602,8 +609,9 @@
"metadata": {},
"outputs": [],
"source": [
- "np.random.seed(0)\n",
- "df2 = pd.DataFrame(np.random.randn(10, 4), columns=[\"A\", \"B\", \"C\", \"D\"])\n",
+ "df2 = pd.DataFrame(\n",
+ " np.random.default_rng(2).standard_normal(10, 4), columns=[\"A\", \"B\", \"C\", \"D\"]\n",
+ ")\n",
"df2.style"
]
},
@@ -812,9 +820,14 @@
")\n",
"s.set_tooltips(\n",
" tt,\n",
- " props=\"visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;\"\n",
- " \"background-color: white; color: #000066; font-size: 0.8em;\"\n",
- " \"transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;\",\n",
+ " props=\"visibility: hidden;\"\n",
+ " \"position: absolute; z-index: 1;\"\n",
+ " \"border: 1px solid #000066;\"\n",
+ " \"background-color: white;\"\n",
+ " \"color: #000066;\"\n",
+ " \"font-size: 0.8em;\"\n",
+ " \"transform: translate(0px, -24px);\"\n",
+ " \"padding: 0.6em; border-radius: 0.5em;\",\n",
")"
]
},
@@ -894,7 +907,7 @@
"outputs": [],
"source": [
"df3 = pd.DataFrame(\n",
- " np.random.randn(4, 4),\n",
+ " np.random.default_rng(2).standard_normal(4, 4),\n",
" pd.MultiIndex.from_product([[\"A\", \"B\"], [\"r1\", \"r2\"]]),\n",
" columns=[\"c1\", \"c2\", \"c3\", \"c4\"],\n",
")\n",
@@ -1606,10 +1619,10 @@
"\n",
"\n",
"@widgets.interact\n",
- "def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0.0, 99.9), l=(0.0, 99.9)):\n",
+ "def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0.0, 99.9), l_var=(0.0, 99.9)):\n",
" return df2.style.background_gradient(\n",
" cmap=sns.palettes.diverging_palette(\n",
- " h_neg=h_neg, h_pos=h_pos, s=s, l=l, as_cmap=True\n",
+ " h_neg=h_neg, h_pos=h_pos, s=s, l=l_var, as_cmap=True\n",
" )\n",
" )"
]
@@ -1629,13 +1642,13 @@
"source": [
"def magnify():\n",
" return [\n",
- " dict(selector=\"th\", props=[(\"font-size\", \"4pt\")]),\n",
- " dict(selector=\"td\", props=[(\"padding\", \"0em 0em\")]),\n",
- " dict(selector=\"th:hover\", props=[(\"font-size\", \"12pt\")]),\n",
- " dict(\n",
- " selector=\"tr:hover td:hover\",\n",
- " props=[(\"max-width\", \"200px\"), (\"font-size\", \"12pt\")],\n",
- " ),\n",
+ " {\"selector\": \"th\", \"props\": [(\"font-size\", \"4pt\")]},\n",
+ " {\"selector\": \"td\", \"props\": [(\"padding\", \"0em 0em\")]},\n",
+ " {\"selector\": \"th:hover\", \"props\": [(\"font-size\", \"12pt\")]},\n",
+ " {\n",
+ " \"selector\": \"tr:hover td:hover\",\n",
+ " \"props\": [(\"max-width\", \"200px\"), (\"font-size\", \"12pt\")],\n",
+ " },\n",
" ]"
]
},
@@ -1645,9 +1658,8 @@
"metadata": {},
"outputs": [],
"source": [
- "np.random.seed(25)\n",
- "cmap = cmap = sns.diverging_palette(5, 250, as_cmap=True)\n",
- "bigdf = pd.DataFrame(np.random.randn(20, 25)).cumsum()\n",
+ "cmap = sns.diverging_palette(5, 250, as_cmap=True)\n",
+ "bigdf = pd.DataFrame(np.random.default_rng(2).standard_normal(20, 25)).cumsum()\n",
"\n",
"bigdf.style.background_gradient(cmap, axis=1).set_properties(\n",
" **{\"max-width\": \"80px\", \"font-size\": \"1pt\"}\n",
@@ -1671,7 +1683,7 @@
"metadata": {},
"outputs": [],
"source": [
- "bigdf = pd.DataFrame(np.random.randn(16, 100))\n",
+ "bigdf = pd.DataFrame(np.random.default_rng(2).standard_normal(16, 100))\n",
"bigdf.style.set_sticky(axis=\"index\")"
]
},
@@ -2023,8 +2035,8 @@
"metadata": {},
"outputs": [],
"source": [
- "with open(\"templates/myhtml.tpl\") as f:\n",
- " print(f.read())"
+ "with open(\"templates/myhtml.tpl\") as fle:\n",
+ " print(fle.read())"
]
},
{
@@ -2130,8 +2142,8 @@
},
"outputs": [],
"source": [
- "with open(\"templates/html_style_structure.html\") as f:\n",
- " style_structure = f.read()"
+ "with open(\"templates/html_style_structure.html\") as fl:\n",
+ " style_structure = fl.read()"
]
},
{
@@ -2158,8 +2170,8 @@
},
"outputs": [],
"source": [
- "with open(\"templates/html_table_structure.html\") as f:\n",
- " table_structure = f.read()"
+ "with open(\"templates/html_table_structure.html\") as f_tbl:\n",
+ " table_structure = f_tbl.read()"
]
},
{
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index c613a345686cc..223f3104464ad 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -569,8 +569,8 @@ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
elif isinstance(dtype, CategoricalDtype):
# GH 10696/18593/18630
dtype = self.dtype.update_dtype(dtype)
- self = self.copy() if copy else self
- result = self._set_dtype(dtype)
+ result = self.copy() if copy else self
+ result = result._set_dtype(dtype)
elif isinstance(dtype, ExtensionDtype):
return super().astype(dtype, copy=copy)
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index fbe1677b95b33..1610a2b8e6cd1 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -454,7 +454,7 @@ def astype(self, dtype, copy: bool = True):
if dtype == object:
if self.dtype.kind == "M":
- self = cast("DatetimeArray", self)
+ self = cast("DatetimeArray", self) # noqa: PLW0642
# *much* faster than self._box_values
# for e.g. test_get_loc_tuple_monotonic_above_size_cutoff
i8data = self.asi8
@@ -776,7 +776,7 @@ def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
return np.zeros(self.shape, dtype=bool)
if self.dtype.kind in "mM":
- self = cast("DatetimeArray | TimedeltaArray", self)
+ self = cast("DatetimeArray | TimedeltaArray", self) # noqa: PLW0642
# error: "DatetimeLikeArrayMixin" has no attribute "as_unit"
values = values.as_unit(self.unit) # type: ignore[attr-defined]
@@ -977,7 +977,7 @@ def _cmp_method(self, other, op):
return result
if not isinstance(self.dtype, PeriodDtype):
- self = cast(TimelikeOps, self)
+ self = cast(TimelikeOps, self) # noqa: PLW0642
if self._creso != other._creso:
if not isinstance(other, type(self)):
# i.e. Timedelta/Timestamp, cast to ndarray and let
@@ -1063,7 +1063,7 @@ def _add_datetimelike_scalar(self, other) -> DatetimeArray:
f"cannot add {type(self).__name__} and {type(other).__name__}"
)
- self = cast("TimedeltaArray", self)
+ self = cast("TimedeltaArray", self) # noqa: PLW0642
from pandas.core.arrays import DatetimeArray
from pandas.core.arrays.datetimes import tz_to_dtype
@@ -1078,8 +1078,8 @@ def _add_datetimelike_scalar(self, other) -> DatetimeArray:
return DatetimeArray._simple_new(result, dtype=result.dtype)
other = Timestamp(other)
- self, other = self._ensure_matching_resos(other)
- self = cast("TimedeltaArray", self)
+ self, other = self._ensure_matching_resos(other) # noqa: PLW0642
+ self = cast("TimedeltaArray", self) # noqa: PLW0642
other_i8, o_mask = self._get_i8_values_and_mask(other)
result = add_overflowsafe(self.asi8, np.asarray(other_i8, dtype="i8"))
@@ -1107,7 +1107,7 @@ def _sub_datetimelike_scalar(
if self.dtype.kind != "M":
raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}")
- self = cast("DatetimeArray", self)
+ self = cast("DatetimeArray", self) # noqa: PLW0642
# subtract a datetime from myself, yielding a ndarray[timedelta64[ns]]
if isna(other):
@@ -1116,7 +1116,7 @@ def _sub_datetimelike_scalar(
ts = Timestamp(other)
- self, ts = self._ensure_matching_resos(ts)
+ self, ts = self._ensure_matching_resos(ts) # noqa: PLW0642
return self._sub_datetimelike(ts)
@final
@@ -1127,14 +1127,14 @@ def _sub_datetime_arraylike(self, other: DatetimeArray) -> TimedeltaArray:
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
- self = cast("DatetimeArray", self)
+ self = cast("DatetimeArray", self) # noqa: PLW0642
- self, other = self._ensure_matching_resos(other)
+ self, other = self._ensure_matching_resos(other) # noqa: PLW0642
return self._sub_datetimelike(other)
@final
def _sub_datetimelike(self, other: Timestamp | DatetimeArray) -> TimedeltaArray:
- self = cast("DatetimeArray", self)
+ self = cast("DatetimeArray", self) # noqa: PLW0642
from pandas.core.arrays import TimedeltaArray
@@ -1183,9 +1183,9 @@ def _add_timedeltalike_scalar(self, other):
return type(self)._simple_new(new_values, dtype=self.dtype)
# PeriodArray overrides, so we only get here with DTA/TDA
- self = cast("DatetimeArray | TimedeltaArray", self)
+ self = cast("DatetimeArray | TimedeltaArray", self) # noqa: PLW0642
other = Timedelta(other)
- self, other = self._ensure_matching_resos(other)
+ self, other = self._ensure_matching_resos(other) # noqa: PLW0642
return self._add_timedeltalike(other)
def _add_timedelta_arraylike(self, other: TimedeltaArray) -> Self:
@@ -1201,7 +1201,7 @@ def _add_timedelta_arraylike(self, other: TimedeltaArray) -> Self:
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
- self, other = cast(
+ self, other = cast( # noqa: PLW0642
"DatetimeArray | TimedeltaArray", self
)._ensure_matching_resos(other)
return self._add_timedeltalike(other)
@@ -1258,7 +1258,7 @@ def _sub_nat(self) -> np.ndarray:
result.fill(iNaT)
if self.dtype.kind in "mM":
# We can retain unit in dtype
- self = cast("DatetimeArray| TimedeltaArray", self)
+ self = cast("DatetimeArray| TimedeltaArray", self) # noqa: PLW0642
return result.view(f"timedelta64[{self.unit}]")
else:
return result.view("timedelta64[ns]")
@@ -1272,7 +1272,7 @@ def _sub_periodlike(self, other: Period | PeriodArray) -> npt.NDArray[np.object_
f"cannot subtract {type(other).__name__} from {type(self).__name__}"
)
- self = cast("PeriodArray", self)
+ self = cast("PeriodArray", self) # noqa: PLW0642
self._check_compatible_with(other)
other_i8, o_mask = self._get_i8_values_and_mask(other)
@@ -1478,7 +1478,7 @@ def __rsub__(self, other):
# TODO: Can we simplify/generalize these cases at all?
raise TypeError(f"cannot subtract {type(self).__name__} from {other.dtype}")
elif lib.is_np_dtype(self.dtype, "m"):
- self = cast("TimedeltaArray", self)
+ self = cast("TimedeltaArray", self) # noqa: PLW0642
return (-self) + other
# We get here with e.g. datetime objects
@@ -1697,7 +1697,7 @@ def _groupby_op(
if isinstance(self.dtype, PeriodDtype):
raise TypeError("'std' and 'sem' are not valid for PeriodDtype")
- self = cast("DatetimeArray | TimedeltaArray", self)
+ self = cast("DatetimeArray | TimedeltaArray", self) # noqa: PLW0642
new_dtype = f"m8[{self.unit}]"
res_values = res_values.view(new_dtype)
return TimedeltaArray._simple_new(res_values, dtype=res_values.dtype)
@@ -2133,7 +2133,7 @@ def _ensure_matching_resos(self, other):
if self._creso != other._creso:
# Just as with Timestamp/Timedelta, we cast to the higher resolution
if self._creso < other._creso:
- self = self.as_unit(other.unit)
+ self = self.as_unit(other.unit) # noqa: PLW0642
else:
other = other.as_unit(self.unit)
return self, other
@@ -2155,7 +2155,7 @@ def _round(self, freq, mode, ambiguous, nonexistent):
# round the local times
if isinstance(self.dtype, DatetimeTZDtype):
# operate on naive timestamps, then convert back to aware
- self = cast("DatetimeArray", self)
+ self = cast("DatetimeArray", self) # noqa: PLW0642
naive = self.tz_localize(None)
result = naive._round(freq, mode, ambiguous, nonexistent)
return result.tz_localize(
From 03a199864081f070b48445bdcb89d6fc3a61e211 Mon Sep 17 00:00:00 2001
From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com>
Date: Tue, 3 Sep 2024 13:12:47 -0700
Subject: [PATCH 05/12] Revert "Address new ruff checks"
This reverts commit 50481caa80b8c8d5487729577db68811fc7670a8.
---
doc/source/user_guide/style.ipynb | 72 +++++++++++++-----------------
pandas/core/arrays/categorical.py | 4 +-
pandas/core/arrays/datetimelike.py | 40 ++++++++---------
3 files changed, 52 insertions(+), 64 deletions(-)
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index bb8d76e98fac3..5f99af1a0aee1 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -100,7 +100,7 @@
"outputs": [],
"source": [
"weather_df = pd.DataFrame(\n",
- " np.random.default_rng(2).standard_normal(10, 2) * 5,\n",
+ " np.random.rand(10, 2) * 5,\n",
" index=pd.date_range(start=\"2021-01-01\", periods=10),\n",
" columns=[\"Tokyo\", \"Beijing\"],\n",
")\n",
@@ -157,7 +157,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df = pd.DataFrame(np.random.default_rng(2).standard_normal(5, 5))\n",
+ "df = pd.DataFrame(np.random.randn(5, 5))\n",
"df.style.hide(subset=[0, 2, 4], axis=0).hide(subset=[0, 2, 4], axis=1)"
]
},
@@ -302,16 +302,9 @@
" columns=df.columns,\n",
" ),\n",
" css_class=\"pd-tt\",\n",
- " props=\"visibility: hidden;\"\n",
- " \"position: absolute;\"\n",
- " \"z-index: 1;\"\n",
- " \"border: 1px solid #000066;\"\n",
- " \"background-color: white;\"\n",
- " \"color: #000066;\"\n",
- " \"font-size: 0.8em;\"\n",
- " \"transform: translate(0px, -24px);\"\n",
- " \"padding: 0.6em;\"\n",
- " \"border-radius: 0.5em;\",\n",
+ " props=\"visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;\"\n",
+ " \"background-color: white; color: #000066; font-size: 0.8em;\"\n",
+ " \"transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;\",\n",
" )\n",
")"
]
@@ -609,9 +602,8 @@
"metadata": {},
"outputs": [],
"source": [
- "df2 = pd.DataFrame(\n",
- " np.random.default_rng(2).standard_normal(10, 4), columns=[\"A\", \"B\", \"C\", \"D\"]\n",
- ")\n",
+ "np.random.seed(0)\n",
+ "df2 = pd.DataFrame(np.random.randn(10, 4), columns=[\"A\", \"B\", \"C\", \"D\"])\n",
"df2.style"
]
},
@@ -820,14 +812,9 @@
")\n",
"s.set_tooltips(\n",
" tt,\n",
- " props=\"visibility: hidden;\"\n",
- " \"position: absolute; z-index: 1;\"\n",
- " \"border: 1px solid #000066;\"\n",
- " \"background-color: white;\"\n",
- " \"color: #000066;\"\n",
- " \"font-size: 0.8em;\"\n",
- " \"transform: translate(0px, -24px);\"\n",
- " \"padding: 0.6em; border-radius: 0.5em;\",\n",
+ " props=\"visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;\"\n",
+ " \"background-color: white; color: #000066; font-size: 0.8em;\"\n",
+ " \"transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;\",\n",
")"
]
},
@@ -907,7 +894,7 @@
"outputs": [],
"source": [
"df3 = pd.DataFrame(\n",
- " np.random.default_rng(2).standard_normal(4, 4),\n",
+ " np.random.randn(4, 4),\n",
" pd.MultiIndex.from_product([[\"A\", \"B\"], [\"r1\", \"r2\"]]),\n",
" columns=[\"c1\", \"c2\", \"c3\", \"c4\"],\n",
")\n",
@@ -1619,10 +1606,10 @@
"\n",
"\n",
"@widgets.interact\n",
- "def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0.0, 99.9), l_var=(0.0, 99.9)):\n",
+ "def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0.0, 99.9), l=(0.0, 99.9)):\n",
" return df2.style.background_gradient(\n",
" cmap=sns.palettes.diverging_palette(\n",
- " h_neg=h_neg, h_pos=h_pos, s=s, l=l_var, as_cmap=True\n",
+ " h_neg=h_neg, h_pos=h_pos, s=s, l=l, as_cmap=True\n",
" )\n",
" )"
]
@@ -1642,13 +1629,13 @@
"source": [
"def magnify():\n",
" return [\n",
- " {\"selector\": \"th\", \"props\": [(\"font-size\", \"4pt\")]},\n",
- " {\"selector\": \"td\", \"props\": [(\"padding\", \"0em 0em\")]},\n",
- " {\"selector\": \"th:hover\", \"props\": [(\"font-size\", \"12pt\")]},\n",
- " {\n",
- " \"selector\": \"tr:hover td:hover\",\n",
- " \"props\": [(\"max-width\", \"200px\"), (\"font-size\", \"12pt\")],\n",
- " },\n",
+ " dict(selector=\"th\", props=[(\"font-size\", \"4pt\")]),\n",
+ " dict(selector=\"td\", props=[(\"padding\", \"0em 0em\")]),\n",
+ " dict(selector=\"th:hover\", props=[(\"font-size\", \"12pt\")]),\n",
+ " dict(\n",
+ " selector=\"tr:hover td:hover\",\n",
+ " props=[(\"max-width\", \"200px\"), (\"font-size\", \"12pt\")],\n",
+ " ),\n",
" ]"
]
},
@@ -1658,8 +1645,9 @@
"metadata": {},
"outputs": [],
"source": [
- "cmap = sns.diverging_palette(5, 250, as_cmap=True)\n",
- "bigdf = pd.DataFrame(np.random.default_rng(2).standard_normal(20, 25)).cumsum()\n",
+ "np.random.seed(25)\n",
+ "cmap = cmap = sns.diverging_palette(5, 250, as_cmap=True)\n",
+ "bigdf = pd.DataFrame(np.random.randn(20, 25)).cumsum()\n",
"\n",
"bigdf.style.background_gradient(cmap, axis=1).set_properties(\n",
" **{\"max-width\": \"80px\", \"font-size\": \"1pt\"}\n",
@@ -1683,7 +1671,7 @@
"metadata": {},
"outputs": [],
"source": [
- "bigdf = pd.DataFrame(np.random.default_rng(2).standard_normal(16, 100))\n",
+ "bigdf = pd.DataFrame(np.random.randn(16, 100))\n",
"bigdf.style.set_sticky(axis=\"index\")"
]
},
@@ -2035,8 +2023,8 @@
"metadata": {},
"outputs": [],
"source": [
- "with open(\"templates/myhtml.tpl\") as fle:\n",
- " print(fle.read())"
+ "with open(\"templates/myhtml.tpl\") as f:\n",
+ " print(f.read())"
]
},
{
@@ -2142,8 +2130,8 @@
},
"outputs": [],
"source": [
- "with open(\"templates/html_style_structure.html\") as fl:\n",
- " style_structure = fl.read()"
+ "with open(\"templates/html_style_structure.html\") as f:\n",
+ " style_structure = f.read()"
]
},
{
@@ -2170,8 +2158,8 @@
},
"outputs": [],
"source": [
- "with open(\"templates/html_table_structure.html\") as f_tbl:\n",
- " table_structure = f_tbl.read()"
+ "with open(\"templates/html_table_structure.html\") as f:\n",
+ " table_structure = f.read()"
]
},
{
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 223f3104464ad..c613a345686cc 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -569,8 +569,8 @@ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
elif isinstance(dtype, CategoricalDtype):
# GH 10696/18593/18630
dtype = self.dtype.update_dtype(dtype)
- result = self.copy() if copy else self
- result = result._set_dtype(dtype)
+ self = self.copy() if copy else self
+ result = self._set_dtype(dtype)
elif isinstance(dtype, ExtensionDtype):
return super().astype(dtype, copy=copy)
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 1610a2b8e6cd1..fbe1677b95b33 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -454,7 +454,7 @@ def astype(self, dtype, copy: bool = True):
if dtype == object:
if self.dtype.kind == "M":
- self = cast("DatetimeArray", self) # noqa: PLW0642
+ self = cast("DatetimeArray", self)
# *much* faster than self._box_values
# for e.g. test_get_loc_tuple_monotonic_above_size_cutoff
i8data = self.asi8
@@ -776,7 +776,7 @@ def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
return np.zeros(self.shape, dtype=bool)
if self.dtype.kind in "mM":
- self = cast("DatetimeArray | TimedeltaArray", self) # noqa: PLW0642
+ self = cast("DatetimeArray | TimedeltaArray", self)
# error: "DatetimeLikeArrayMixin" has no attribute "as_unit"
values = values.as_unit(self.unit) # type: ignore[attr-defined]
@@ -977,7 +977,7 @@ def _cmp_method(self, other, op):
return result
if not isinstance(self.dtype, PeriodDtype):
- self = cast(TimelikeOps, self) # noqa: PLW0642
+ self = cast(TimelikeOps, self)
if self._creso != other._creso:
if not isinstance(other, type(self)):
# i.e. Timedelta/Timestamp, cast to ndarray and let
@@ -1063,7 +1063,7 @@ def _add_datetimelike_scalar(self, other) -> DatetimeArray:
f"cannot add {type(self).__name__} and {type(other).__name__}"
)
- self = cast("TimedeltaArray", self) # noqa: PLW0642
+ self = cast("TimedeltaArray", self)
from pandas.core.arrays import DatetimeArray
from pandas.core.arrays.datetimes import tz_to_dtype
@@ -1078,8 +1078,8 @@ def _add_datetimelike_scalar(self, other) -> DatetimeArray:
return DatetimeArray._simple_new(result, dtype=result.dtype)
other = Timestamp(other)
- self, other = self._ensure_matching_resos(other) # noqa: PLW0642
- self = cast("TimedeltaArray", self) # noqa: PLW0642
+ self, other = self._ensure_matching_resos(other)
+ self = cast("TimedeltaArray", self)
other_i8, o_mask = self._get_i8_values_and_mask(other)
result = add_overflowsafe(self.asi8, np.asarray(other_i8, dtype="i8"))
@@ -1107,7 +1107,7 @@ def _sub_datetimelike_scalar(
if self.dtype.kind != "M":
raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}")
- self = cast("DatetimeArray", self) # noqa: PLW0642
+ self = cast("DatetimeArray", self)
# subtract a datetime from myself, yielding a ndarray[timedelta64[ns]]
if isna(other):
@@ -1116,7 +1116,7 @@ def _sub_datetimelike_scalar(
ts = Timestamp(other)
- self, ts = self._ensure_matching_resos(ts) # noqa: PLW0642
+ self, ts = self._ensure_matching_resos(ts)
return self._sub_datetimelike(ts)
@final
@@ -1127,14 +1127,14 @@ def _sub_datetime_arraylike(self, other: DatetimeArray) -> TimedeltaArray:
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
- self = cast("DatetimeArray", self) # noqa: PLW0642
+ self = cast("DatetimeArray", self)
- self, other = self._ensure_matching_resos(other) # noqa: PLW0642
+ self, other = self._ensure_matching_resos(other)
return self._sub_datetimelike(other)
@final
def _sub_datetimelike(self, other: Timestamp | DatetimeArray) -> TimedeltaArray:
- self = cast("DatetimeArray", self) # noqa: PLW0642
+ self = cast("DatetimeArray", self)
from pandas.core.arrays import TimedeltaArray
@@ -1183,9 +1183,9 @@ def _add_timedeltalike_scalar(self, other):
return type(self)._simple_new(new_values, dtype=self.dtype)
# PeriodArray overrides, so we only get here with DTA/TDA
- self = cast("DatetimeArray | TimedeltaArray", self) # noqa: PLW0642
+ self = cast("DatetimeArray | TimedeltaArray", self)
other = Timedelta(other)
- self, other = self._ensure_matching_resos(other) # noqa: PLW0642
+ self, other = self._ensure_matching_resos(other)
return self._add_timedeltalike(other)
def _add_timedelta_arraylike(self, other: TimedeltaArray) -> Self:
@@ -1201,7 +1201,7 @@ def _add_timedelta_arraylike(self, other: TimedeltaArray) -> Self:
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
- self, other = cast( # noqa: PLW0642
+ self, other = cast(
"DatetimeArray | TimedeltaArray", self
)._ensure_matching_resos(other)
return self._add_timedeltalike(other)
@@ -1258,7 +1258,7 @@ def _sub_nat(self) -> np.ndarray:
result.fill(iNaT)
if self.dtype.kind in "mM":
# We can retain unit in dtype
- self = cast("DatetimeArray| TimedeltaArray", self) # noqa: PLW0642
+ self = cast("DatetimeArray| TimedeltaArray", self)
return result.view(f"timedelta64[{self.unit}]")
else:
return result.view("timedelta64[ns]")
@@ -1272,7 +1272,7 @@ def _sub_periodlike(self, other: Period | PeriodArray) -> npt.NDArray[np.object_
f"cannot subtract {type(other).__name__} from {type(self).__name__}"
)
- self = cast("PeriodArray", self) # noqa: PLW0642
+ self = cast("PeriodArray", self)
self._check_compatible_with(other)
other_i8, o_mask = self._get_i8_values_and_mask(other)
@@ -1478,7 +1478,7 @@ def __rsub__(self, other):
# TODO: Can we simplify/generalize these cases at all?
raise TypeError(f"cannot subtract {type(self).__name__} from {other.dtype}")
elif lib.is_np_dtype(self.dtype, "m"):
- self = cast("TimedeltaArray", self) # noqa: PLW0642
+ self = cast("TimedeltaArray", self)
return (-self) + other
# We get here with e.g. datetime objects
@@ -1697,7 +1697,7 @@ def _groupby_op(
if isinstance(self.dtype, PeriodDtype):
raise TypeError("'std' and 'sem' are not valid for PeriodDtype")
- self = cast("DatetimeArray | TimedeltaArray", self) # noqa: PLW0642
+ self = cast("DatetimeArray | TimedeltaArray", self)
new_dtype = f"m8[{self.unit}]"
res_values = res_values.view(new_dtype)
return TimedeltaArray._simple_new(res_values, dtype=res_values.dtype)
@@ -2133,7 +2133,7 @@ def _ensure_matching_resos(self, other):
if self._creso != other._creso:
# Just as with Timestamp/Timedelta, we cast to the higher resolution
if self._creso < other._creso:
- self = self.as_unit(other.unit) # noqa: PLW0642
+ self = self.as_unit(other.unit)
else:
other = other.as_unit(self.unit)
return self, other
@@ -2155,7 +2155,7 @@ def _round(self, freq, mode, ambiguous, nonexistent):
# round the local times
if isinstance(self.dtype, DatetimeTZDtype):
# operate on naive timestamps, then convert back to aware
- self = cast("DatetimeArray", self) # noqa: PLW0642
+ self = cast("DatetimeArray", self)
naive = self.tz_localize(None)
result = naive._round(freq, mode, ambiguous, nonexistent)
return result.tz_localize(
From f7b6564624e7d14f9d0e94f3064fd48cd506fad3 Mon Sep 17 00:00:00 2001
From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com>
Date: Tue, 3 Sep 2024 13:12:56 -0700
Subject: [PATCH 06/12] Revert "Try bumping mypy"
This reverts commit ba16174010f900a2114e998597307dc4c9e8fae1.
---
environment.yml | 3 ++-
requirements-dev.txt | 3 ++-
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/environment.yml b/environment.yml
index 78f32f5734b51..34bc0591ca8df 100644
--- a/environment.yml
+++ b/environment.yml
@@ -42,6 +42,7 @@ dependencies:
- numexpr>=2.8.4
- openpyxl>=3.1.0
- odfpy>=1.4.1
+ - py
- psycopg2>=2.9.6
- pyarrow>=10.0.1
- pymysql>=1.0.2
@@ -76,7 +77,7 @@ dependencies:
# code checks
- flake8=6.1.0 # run in subprocess over docstring examples
- - mypy=1.11.2 # pre-commit uses locally installed mypy
+ - mypy=1.9.0 # pre-commit uses locally installed mypy
- tokenize-rt # scripts/check_for_inconsistent_pandas_namespace.py
- pre-commit>=3.6.0
diff --git a/requirements-dev.txt b/requirements-dev.txt
index cfe40b14b75b7..52d2553fc4001 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -31,6 +31,7 @@ numba>=0.56.4
numexpr>=2.8.4
openpyxl>=3.1.0
odfpy>=1.4.1
+py
psycopg2-binary>=2.9.6
pyarrow>=10.0.1
pymysql>=1.0.2
@@ -53,7 +54,7 @@ moto
flask
asv>=0.6.1
flake8==6.1.0
-mypy==1.11.2
+mypy==1.9.0
tokenize-rt
pre-commit>=3.6.0
gitpython
From 1f061d3d8146f445b708400c7bd170d038b56bda Mon Sep 17 00:00:00 2001
From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com>
Date: Tue, 3 Sep 2024 13:27:40 -0700
Subject: [PATCH 07/12] ignore renaming and equality changes
---
doc/source/user_guide/style.ipynb | 73 ++++++++++++++++++-------------
pyproject.toml | 4 ++
2 files changed, 47 insertions(+), 30 deletions(-)
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index 5f99af1a0aee1..03ea7065ae875 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -78,6 +78,8 @@
"import pandas as pd\n",
"import numpy as np\n",
"\n",
+ "rng = np.random.default_rng(2)\n",
+ "\n",
"df = pd.DataFrame(\n",
" {\"strings\": [\"Adam\", \"Mike\"], \"ints\": [1, 3], \"floats\": [1.123, 1000.23]}\n",
")\n",
@@ -100,7 +102,7 @@
"outputs": [],
"source": [
"weather_df = pd.DataFrame(\n",
- " np.random.rand(10, 2) * 5,\n",
+ " rng.standard_normal(10, 2) * 5,\n",
" index=pd.date_range(start=\"2021-01-01\", periods=10),\n",
" columns=[\"Tokyo\", \"Beijing\"],\n",
")\n",
@@ -157,7 +159,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df = pd.DataFrame(np.random.randn(5, 5))\n",
+ "df = pd.DataFrame(rng.standard_normal(5, 5))\n",
"df.style.hide(subset=[0, 2, 4], axis=0).hide(subset=[0, 2, 4], axis=1)"
]
},
@@ -302,9 +304,16 @@
" columns=df.columns,\n",
" ),\n",
" css_class=\"pd-tt\",\n",
- " props=\"visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;\"\n",
- " \"background-color: white; color: #000066; font-size: 0.8em;\"\n",
- " \"transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;\",\n",
+ " props=\"visibility: hidden;\"\n",
+ " \"position: absolute;\"\n",
+ " \"z-index: 1;\"\n",
+ " \"border: 1px solid #000066;\"\n",
+ " \"background-color: white;\"\n",
+ " \"color: #000066;\"\n",
+ " \"font-size: 0.8em;\"\n",
+ " \"transform: translate(0px, -24px);\"\n",
+ " \"padding: 0.6em;\"\n",
+ " \"border-radius: 0.5em;\",\n",
" )\n",
")"
]
@@ -602,8 +611,7 @@
"metadata": {},
"outputs": [],
"source": [
- "np.random.seed(0)\n",
- "df2 = pd.DataFrame(np.random.randn(10, 4), columns=[\"A\", \"B\", \"C\", \"D\"])\n",
+ "df2 = pd.DataFrame(rng.standard_normal(10, 4), columns=[\"A\", \"B\", \"C\", \"D\"])\n",
"df2.style"
]
},
@@ -812,9 +820,15 @@
")\n",
"s.set_tooltips(\n",
" tt,\n",
- " props=\"visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;\"\n",
- " \"background-color: white; color: #000066; font-size: 0.8em;\"\n",
- " \"transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;\",\n",
+ " props=\"visibility: hidden;\"\n",
+ " \"position: absolute; z-index:\"\n",
+ " \"1; border: 1px solid #000066;\"\n",
+ " \"background-color: white;\"\n",
+ " \"color: #000066;\"\n",
+ " \"font-size: 0.8em;\"\n",
+ " \"transform: translate(0px, -24px);\"\n",
+ " \"padding: 0.6em;\"\n",
+ " \"border-radius: 0.5em;\",\n",
")"
]
},
@@ -894,7 +908,7 @@
"outputs": [],
"source": [
"df3 = pd.DataFrame(\n",
- " np.random.randn(4, 4),\n",
+ " rng.standard_normal(4, 4),\n",
" pd.MultiIndex.from_product([[\"A\", \"B\"], [\"r1\", \"r2\"]]),\n",
" columns=[\"c1\", \"c2\", \"c3\", \"c4\"],\n",
")\n",
@@ -1606,10 +1620,10 @@
"\n",
"\n",
"@widgets.interact\n",
- "def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0.0, 99.9), l=(0.0, 99.9)):\n",
+ "def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0.0, 99.9), l_var=(0.0, 99.9)):\n",
" return df2.style.background_gradient(\n",
" cmap=sns.palettes.diverging_palette(\n",
- " h_neg=h_neg, h_pos=h_pos, s=s, l=l, as_cmap=True\n",
+ " h_neg=h_neg, h_pos=h_pos, s=s, l=l_var, as_cmap=True\n",
" )\n",
" )"
]
@@ -1629,13 +1643,13 @@
"source": [
"def magnify():\n",
" return [\n",
- " dict(selector=\"th\", props=[(\"font-size\", \"4pt\")]),\n",
- " dict(selector=\"td\", props=[(\"padding\", \"0em 0em\")]),\n",
- " dict(selector=\"th:hover\", props=[(\"font-size\", \"12pt\")]),\n",
- " dict(\n",
- " selector=\"tr:hover td:hover\",\n",
- " props=[(\"max-width\", \"200px\"), (\"font-size\", \"12pt\")],\n",
- " ),\n",
+ " {\"selector\": \"th\", \"props\": [(\"font-size\", \"4pt\")]},\n",
+ " {\"selector\": \"td\", \"props\": [(\"padding\", \"0em 0em\")]},\n",
+ " {\"selector\": \"th:hover\", \"props\": [(\"font-size\", \"12pt\")]},\n",
+ " {\n",
+ " \"selector\": \"tr:hover td:hover\",\n",
+ " \"props\": [(\"max-width\", \"200px\"), (\"font-size\", \"12pt\")],\n",
+ " },\n",
" ]"
]
},
@@ -1645,9 +1659,8 @@
"metadata": {},
"outputs": [],
"source": [
- "np.random.seed(25)\n",
- "cmap = cmap = sns.diverging_palette(5, 250, as_cmap=True)\n",
- "bigdf = pd.DataFrame(np.random.randn(20, 25)).cumsum()\n",
+ "cmap = sns.diverging_palette(5, 250, as_cmap=True)\n",
+ "bigdf = pd.DataFrame(rng.standard_normal(20, 25)).cumsum()\n",
"\n",
"bigdf.style.background_gradient(cmap, axis=1).set_properties(\n",
" **{\"max-width\": \"80px\", \"font-size\": \"1pt\"}\n",
@@ -1671,7 +1684,7 @@
"metadata": {},
"outputs": [],
"source": [
- "bigdf = pd.DataFrame(np.random.randn(16, 100))\n",
+ "bigdf = pd.DataFrame(rng.standard_normal(16, 100))\n",
"bigdf.style.set_sticky(axis=\"index\")"
]
},
@@ -2023,8 +2036,8 @@
"metadata": {},
"outputs": [],
"source": [
- "with open(\"templates/myhtml.tpl\") as f:\n",
- " print(f.read())"
+ "with open(\"templates/myhtml.tpl\") as f_temp:\n",
+ " print(f_temp.read())"
]
},
{
@@ -2130,8 +2143,8 @@
},
"outputs": [],
"source": [
- "with open(\"templates/html_style_structure.html\") as f:\n",
- " style_structure = f.read()"
+ "with open(\"templates/html_style_structure.html\") as f_style:\n",
+ " style_structure = f_style.read()"
]
},
{
@@ -2158,8 +2171,8 @@
},
"outputs": [],
"source": [
- "with open(\"templates/html_table_structure.html\") as f:\n",
- " table_structure = f.read()"
+ "with open(\"templates/html_table_structure.html\") as f_table:\n",
+ " table_structure = f_table.read()"
]
},
{
diff --git a/pyproject.toml b/pyproject.toml
index 645ded35f3d18..835063d7ea87b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -334,6 +334,10 @@ ignore = [
"RUF012",
# type-comparison
"E721",
+ # self-or-cls-assignment
+ "PLW0642",
+ # repeated-equality-comparison
+ "PLR1714",
# Additional pylint rules
# literal-membership
From d568a1eb55b39d61a01252e11c0e0995e80ca66e Mon Sep 17 00:00:00 2001
From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com>
Date: Tue, 3 Sep 2024 13:31:23 -0700
Subject: [PATCH 08/12] Revert "ignore renaming and equality changes"
This reverts commit 1f061d3d8146f445b708400c7bd170d038b56bda.
---
doc/source/user_guide/style.ipynb | 73 +++++++++++++------------------
pyproject.toml | 4 --
2 files changed, 30 insertions(+), 47 deletions(-)
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index 03ea7065ae875..5f99af1a0aee1 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -78,8 +78,6 @@
"import pandas as pd\n",
"import numpy as np\n",
"\n",
- "rng = np.random.default_rng(2)\n",
- "\n",
"df = pd.DataFrame(\n",
" {\"strings\": [\"Adam\", \"Mike\"], \"ints\": [1, 3], \"floats\": [1.123, 1000.23]}\n",
")\n",
@@ -102,7 +100,7 @@
"outputs": [],
"source": [
"weather_df = pd.DataFrame(\n",
- " rng.standard_normal(10, 2) * 5,\n",
+ " np.random.rand(10, 2) * 5,\n",
" index=pd.date_range(start=\"2021-01-01\", periods=10),\n",
" columns=[\"Tokyo\", \"Beijing\"],\n",
")\n",
@@ -159,7 +157,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df = pd.DataFrame(rng.standard_normal(5, 5))\n",
+ "df = pd.DataFrame(np.random.randn(5, 5))\n",
"df.style.hide(subset=[0, 2, 4], axis=0).hide(subset=[0, 2, 4], axis=1)"
]
},
@@ -304,16 +302,9 @@
" columns=df.columns,\n",
" ),\n",
" css_class=\"pd-tt\",\n",
- " props=\"visibility: hidden;\"\n",
- " \"position: absolute;\"\n",
- " \"z-index: 1;\"\n",
- " \"border: 1px solid #000066;\"\n",
- " \"background-color: white;\"\n",
- " \"color: #000066;\"\n",
- " \"font-size: 0.8em;\"\n",
- " \"transform: translate(0px, -24px);\"\n",
- " \"padding: 0.6em;\"\n",
- " \"border-radius: 0.5em;\",\n",
+ " props=\"visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;\"\n",
+ " \"background-color: white; color: #000066; font-size: 0.8em;\"\n",
+ " \"transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;\",\n",
" )\n",
")"
]
@@ -611,7 +602,8 @@
"metadata": {},
"outputs": [],
"source": [
- "df2 = pd.DataFrame(rng.standard_normal(10, 4), columns=[\"A\", \"B\", \"C\", \"D\"])\n",
+ "np.random.seed(0)\n",
+ "df2 = pd.DataFrame(np.random.randn(10, 4), columns=[\"A\", \"B\", \"C\", \"D\"])\n",
"df2.style"
]
},
@@ -820,15 +812,9 @@
")\n",
"s.set_tooltips(\n",
" tt,\n",
- " props=\"visibility: hidden;\"\n",
- " \"position: absolute; z-index:\"\n",
- " \"1; border: 1px solid #000066;\"\n",
- " \"background-color: white;\"\n",
- " \"color: #000066;\"\n",
- " \"font-size: 0.8em;\"\n",
- " \"transform: translate(0px, -24px);\"\n",
- " \"padding: 0.6em;\"\n",
- " \"border-radius: 0.5em;\",\n",
+ " props=\"visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;\"\n",
+ " \"background-color: white; color: #000066; font-size: 0.8em;\"\n",
+ " \"transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;\",\n",
")"
]
},
@@ -908,7 +894,7 @@
"outputs": [],
"source": [
"df3 = pd.DataFrame(\n",
- " rng.standard_normal(4, 4),\n",
+ " np.random.randn(4, 4),\n",
" pd.MultiIndex.from_product([[\"A\", \"B\"], [\"r1\", \"r2\"]]),\n",
" columns=[\"c1\", \"c2\", \"c3\", \"c4\"],\n",
")\n",
@@ -1620,10 +1606,10 @@
"\n",
"\n",
"@widgets.interact\n",
- "def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0.0, 99.9), l_var=(0.0, 99.9)):\n",
+ "def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0.0, 99.9), l=(0.0, 99.9)):\n",
" return df2.style.background_gradient(\n",
" cmap=sns.palettes.diverging_palette(\n",
- " h_neg=h_neg, h_pos=h_pos, s=s, l=l_var, as_cmap=True\n",
+ " h_neg=h_neg, h_pos=h_pos, s=s, l=l, as_cmap=True\n",
" )\n",
" )"
]
@@ -1643,13 +1629,13 @@
"source": [
"def magnify():\n",
" return [\n",
- " {\"selector\": \"th\", \"props\": [(\"font-size\", \"4pt\")]},\n",
- " {\"selector\": \"td\", \"props\": [(\"padding\", \"0em 0em\")]},\n",
- " {\"selector\": \"th:hover\", \"props\": [(\"font-size\", \"12pt\")]},\n",
- " {\n",
- " \"selector\": \"tr:hover td:hover\",\n",
- " \"props\": [(\"max-width\", \"200px\"), (\"font-size\", \"12pt\")],\n",
- " },\n",
+ " dict(selector=\"th\", props=[(\"font-size\", \"4pt\")]),\n",
+ " dict(selector=\"td\", props=[(\"padding\", \"0em 0em\")]),\n",
+ " dict(selector=\"th:hover\", props=[(\"font-size\", \"12pt\")]),\n",
+ " dict(\n",
+ " selector=\"tr:hover td:hover\",\n",
+ " props=[(\"max-width\", \"200px\"), (\"font-size\", \"12pt\")],\n",
+ " ),\n",
" ]"
]
},
@@ -1659,8 +1645,9 @@
"metadata": {},
"outputs": [],
"source": [
- "cmap = sns.diverging_palette(5, 250, as_cmap=True)\n",
- "bigdf = pd.DataFrame(rng.standard_normal(20, 25)).cumsum()\n",
+ "np.random.seed(25)\n",
+ "cmap = cmap = sns.diverging_palette(5, 250, as_cmap=True)\n",
+ "bigdf = pd.DataFrame(np.random.randn(20, 25)).cumsum()\n",
"\n",
"bigdf.style.background_gradient(cmap, axis=1).set_properties(\n",
" **{\"max-width\": \"80px\", \"font-size\": \"1pt\"}\n",
@@ -1684,7 +1671,7 @@
"metadata": {},
"outputs": [],
"source": [
- "bigdf = pd.DataFrame(rng.standard_normal(16, 100))\n",
+ "bigdf = pd.DataFrame(np.random.randn(16, 100))\n",
"bigdf.style.set_sticky(axis=\"index\")"
]
},
@@ -2036,8 +2023,8 @@
"metadata": {},
"outputs": [],
"source": [
- "with open(\"templates/myhtml.tpl\") as f_temp:\n",
- " print(f_temp.read())"
+ "with open(\"templates/myhtml.tpl\") as f:\n",
+ " print(f.read())"
]
},
{
@@ -2143,8 +2130,8 @@
},
"outputs": [],
"source": [
- "with open(\"templates/html_style_structure.html\") as f_style:\n",
- " style_structure = f_style.read()"
+ "with open(\"templates/html_style_structure.html\") as f:\n",
+ " style_structure = f.read()"
]
},
{
@@ -2171,8 +2158,8 @@
},
"outputs": [],
"source": [
- "with open(\"templates/html_table_structure.html\") as f_table:\n",
- " table_structure = f_table.read()"
+ "with open(\"templates/html_table_structure.html\") as f:\n",
+ " table_structure = f.read()"
]
},
{
diff --git a/pyproject.toml b/pyproject.toml
index 835063d7ea87b..645ded35f3d18 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -334,10 +334,6 @@ ignore = [
"RUF012",
# type-comparison
"E721",
- # self-or-cls-assignment
- "PLW0642",
- # repeated-equality-comparison
- "PLR1714",
# Additional pylint rules
# literal-membership
From 4118316b84d58a48478055d39a58503900eeeec2 Mon Sep 17 00:00:00 2001
From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com>
Date: Tue, 3 Sep 2024 13:31:48 -0700
Subject: [PATCH 09/12] Revert "[pre-commit.ci] auto fixes from pre-commit.com
hooks"
This reverts commit 1117c6bf659cd7ec46e7c8f0a813c34bdf5487aa.
---
doc/source/user_guide/style.ipynb | 651 ++++++++++-------------
pandas/_config/config.py | 4 +-
pandas/_config/localization.py | 2 +-
pandas/_testing/_warnings.py | 2 +-
pandas/_testing/contexts.py | 8 +-
pandas/compat/pickle_compat.py | 2 +-
pandas/core/_numba/kernels/min_max_.py | 6 +-
pandas/core/apply.py | 6 +-
pandas/core/arrays/datetimes.py | 2 +-
pandas/core/common.py | 2 +-
pandas/core/dtypes/cast.py | 8 +-
pandas/core/groupby/ops.py | 2 +-
pandas/core/indexes/multi.py | 2 +-
pandas/core/internals/blocks.py | 2 +-
pandas/core/internals/concat.py | 2 +-
pandas/core/internals/managers.py | 4 +-
pandas/core/methods/to_dict.py | 2 +-
pandas/io/excel/_odfreader.py | 3 +-
pandas/io/formats/css.py | 6 +-
pandas/io/formats/format.py | 2 +-
pandas/io/sql.py | 6 +-
pandas/plotting/_matplotlib/converter.py | 2 +-
pandas/plotting/_matplotlib/tools.py | 2 +-
pandas/plotting/_misc.py | 2 +-
pandas/tests/arithmetic/test_numeric.py | 4 +-
pandas/util/_exceptions.py | 4 +-
26 files changed, 331 insertions(+), 407 deletions(-)
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index 5f99af1a0aee1..daecfce6ecebc 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -46,6 +46,7 @@
},
"outputs": [],
"source": [
+ "import matplotlib.pyplot\n",
"# We have this here to trigger matplotlib's font cache stuff.\n",
"# This cell is hidden from the output"
]
@@ -77,13 +78,17 @@
"source": [
"import pandas as pd\n",
"import numpy as np\n",
+ "import matplotlib as mpl\n",
"\n",
- "df = pd.DataFrame(\n",
- " {\"strings\": [\"Adam\", \"Mike\"], \"ints\": [1, 3], \"floats\": [1.123, 1000.23]}\n",
- ")\n",
- "df.style.format(precision=3, thousands=\".\", decimal=\",\").format_index(\n",
- " str.upper, axis=1\n",
- ").relabel_index([\"row 1\", \"row 2\"], axis=0)"
+ "df = pd.DataFrame({\n",
+ " \"strings\": [\"Adam\", \"Mike\"],\n",
+ " \"ints\": [1, 3],\n",
+ " \"floats\": [1.123, 1000.23]\n",
+ "})\n",
+ "df.style \\\n",
+ " .format(precision=3, thousands=\".\", decimal=\",\") \\\n",
+ " .format_index(str.upper, axis=1) \\\n",
+ " .relabel_index([\"row 1\", \"row 2\"], axis=0)"
]
},
{
@@ -99,21 +104,17 @@
"metadata": {},
"outputs": [],
"source": [
- "weather_df = pd.DataFrame(\n",
- " np.random.rand(10, 2) * 5,\n",
- " index=pd.date_range(start=\"2021-01-01\", periods=10),\n",
- " columns=[\"Tokyo\", \"Beijing\"],\n",
- ")\n",
+ "weather_df = pd.DataFrame(np.random.rand(10,2)*5, \n",
+ " index=pd.date_range(start=\"2021-01-01\", periods=10),\n",
+ " columns=[\"Tokyo\", \"Beijing\"])\n",
"\n",
- "\n",
- "def rain_condition(v):\n",
+ "def rain_condition(v): \n",
" if v < 1.75:\n",
" return \"Dry\"\n",
" elif v < 2.75:\n",
" return \"Rain\"\n",
" return \"Heavy Rain\"\n",
"\n",
- "\n",
"def make_pretty(styler):\n",
" styler.set_caption(\"Weather Conditions\")\n",
" styler.format(rain_condition)\n",
@@ -121,7 +122,6 @@
" styler.background_gradient(axis=None, vmin=1, vmax=5, cmap=\"YlGnBu\")\n",
" return styler\n",
"\n",
- "\n",
"weather_df"
]
},
@@ -158,7 +158,9 @@
"outputs": [],
"source": [
"df = pd.DataFrame(np.random.randn(5, 5))\n",
- "df.style.hide(subset=[0, 2, 4], axis=0).hide(subset=[0, 2, 4], axis=1)"
+ "df.style \\\n",
+ " .hide(subset=[0, 2, 4], axis=0) \\\n",
+ " .hide(subset=[0, 2, 4], axis=1)"
]
},
{
@@ -175,9 +177,9 @@
"outputs": [],
"source": [
"show = [0, 2, 4]\n",
- "df.style.hide([row for row in df.index if row not in show], axis=0).hide(\n",
- " [col for col in df.columns if col not in show], axis=1\n",
- ")"
+ "df.style \\\n",
+ " .hide([row for row in df.index if row not in show], axis=0) \\\n",
+ " .hide([col for col in df.columns if col not in show], axis=1)"
]
},
{
@@ -197,9 +199,9 @@
"metadata": {},
"outputs": [],
"source": [
- "summary_styler = (\n",
- " df.agg([\"sum\", \"mean\"]).style.format(precision=3).relabel_index([\"Sum\", \"Average\"])\n",
- ")\n",
+ "summary_styler = df.agg([\"sum\", \"mean\"]).style \\\n",
+ " .format(precision=3) \\\n",
+ " .relabel_index([\"Sum\", \"Average\"])\n",
"df.style.format(precision=1).concat(summary_styler)"
]
},
@@ -225,16 +227,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df = pd.DataFrame(\n",
- " [[38.0, 2.0, 18.0, 22.0, 21, np.nan], [19, 439, 6, 452, 226, 232]],\n",
- " index=pd.Index(\n",
- " [\"Tumour (Positive)\", \"Non-Tumour (Negative)\"], name=\"Actual Label:\"\n",
- " ),\n",
- " columns=pd.MultiIndex.from_product(\n",
- " [[\"Decision Tree\", \"Regression\", \"Random\"], [\"Tumour\", \"Non-Tumour\"]],\n",
- " names=[\"Model:\", \"Predicted:\"],\n",
- " ),\n",
- ")\n",
+ "df = pd.DataFrame([[38.0, 2.0, 18.0, 22.0, 21, np.nan],[19, 439, 6, 452, 226,232]], \n",
+ " index=pd.Index(['Tumour (Positive)', 'Non-Tumour (Negative)'], name='Actual Label:'), \n",
+ " columns=pd.MultiIndex.from_product([['Decision Tree', 'Regression', 'Random'],['Tumour', 'Non-Tumour']], names=['Model:', 'Predicted:']))\n",
"df.style"
]
},
@@ -247,66 +242,63 @@
"outputs": [],
"source": [
"# Hidden cell to just create the below example: code is covered throughout the guide.\n",
- "s = (\n",
- " df.style.hide([(\"Random\", \"Tumour\"), (\"Random\", \"Non-Tumour\")], axis=\"columns\")\n",
- " .format(\"{:.0f}\")\n",
- " .set_table_styles(\n",
- " [\n",
- " {\"selector\": \"\", \"props\": \"border-collapse: separate;\"},\n",
- " {\"selector\": \"caption\", \"props\": \"caption-side: bottom; font-size:1.3em;\"},\n",
- " {\n",
- " \"selector\": \".index_name\",\n",
- " \"props\": \"font-style: italic; color: darkgrey; font-weight:normal;\",\n",
- " },\n",
- " {\n",
- " \"selector\": \"th:not(.index_name)\",\n",
- " \"props\": \"background-color: #000066; color: white;\",\n",
- " },\n",
- " {\"selector\": \"th.col_heading\", \"props\": \"text-align: center;\"},\n",
- " {\"selector\": \"th.col_heading.level0\", \"props\": \"font-size: 1.5em;\"},\n",
- " {\"selector\": \"th.col2\", \"props\": \"border-left: 1px solid white;\"},\n",
- " {\"selector\": \".col2\", \"props\": \"border-left: 1px solid #000066;\"},\n",
- " {\"selector\": \"td\", \"props\": \"text-align: center; font-weight:bold;\"},\n",
- " {\"selector\": \".true\", \"props\": \"background-color: #e6ffe6;\"},\n",
- " {\"selector\": \".false\", \"props\": \"background-color: #ffe6e6;\"},\n",
- " {\"selector\": \".border-red\", \"props\": \"border: 2px dashed red;\"},\n",
- " {\"selector\": \".border-green\", \"props\": \"border: 2px dashed green;\"},\n",
- " {\"selector\": \"td:hover\", \"props\": \"background-color: #ffffb3;\"},\n",
- " ]\n",
- " )\n",
- " .set_td_classes(\n",
- " pd.DataFrame(\n",
- " [\n",
- " [\"true border-green\", \"false\", \"true\", \"false border-red\", \"\", \"\"],\n",
- " [\"false\", \"true\", \"false\", \"true\", \"\", \"\"],\n",
- " ],\n",
- " index=df.index,\n",
- " columns=df.columns,\n",
- " )\n",
- " )\n",
- " .set_caption(\"Confusion matrix for multiple cancer prediction models.\")\n",
- " .set_tooltips(\n",
- " pd.DataFrame(\n",
- " [\n",
- " [\n",
- " \"This model has a very strong true positive rate\",\n",
- " \"\",\n",
- " \"\",\n",
- " \"This model's total number of false negatives is too high\",\n",
- " \"\",\n",
- " \"\",\n",
- " ],\n",
- " [\"\", \"\", \"\", \"\", \"\", \"\"],\n",
- " ],\n",
- " index=df.index,\n",
- " columns=df.columns,\n",
- " ),\n",
- " css_class=\"pd-tt\",\n",
- " props=\"visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;\"\n",
- " \"background-color: white; color: #000066; font-size: 0.8em;\"\n",
- " \"transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;\",\n",
- " )\n",
- ")"
+ "s = df.style\\\n",
+ " .hide([('Random', 'Tumour'), ('Random', 'Non-Tumour')], axis='columns')\\\n",
+ " .format('{:.0f}')\\\n",
+ " .set_table_styles([{\n",
+ " 'selector': '',\n",
+ " 'props': 'border-collapse: separate;'\n",
+ " },{\n",
+ " 'selector': 'caption',\n",
+ " 'props': 'caption-side: bottom; font-size:1.3em;'\n",
+ " },{\n",
+ " 'selector': '.index_name',\n",
+ " 'props': 'font-style: italic; color: darkgrey; font-weight:normal;'\n",
+ " },{\n",
+ " 'selector': 'th:not(.index_name)',\n",
+ " 'props': 'background-color: #000066; color: white;'\n",
+ " },{\n",
+ " 'selector': 'th.col_heading',\n",
+ " 'props': 'text-align: center;'\n",
+ " },{\n",
+ " 'selector': 'th.col_heading.level0',\n",
+ " 'props': 'font-size: 1.5em;'\n",
+ " },{\n",
+ " 'selector': 'th.col2',\n",
+ " 'props': 'border-left: 1px solid white;'\n",
+ " },{\n",
+ " 'selector': '.col2',\n",
+ " 'props': 'border-left: 1px solid #000066;'\n",
+ " },{\n",
+ " 'selector': 'td',\n",
+ " 'props': 'text-align: center; font-weight:bold;'\n",
+ " },{\n",
+ " 'selector': '.true',\n",
+ " 'props': 'background-color: #e6ffe6;'\n",
+ " },{\n",
+ " 'selector': '.false',\n",
+ " 'props': 'background-color: #ffe6e6;'\n",
+ " },{\n",
+ " 'selector': '.border-red',\n",
+ " 'props': 'border: 2px dashed red;'\n",
+ " },{\n",
+ " 'selector': '.border-green',\n",
+ " 'props': 'border: 2px dashed green;'\n",
+ " },{\n",
+ " 'selector': 'td:hover',\n",
+ " 'props': 'background-color: #ffffb3;'\n",
+ " }])\\\n",
+ " .set_td_classes(pd.DataFrame([['true border-green', 'false', 'true', 'false border-red', '', ''],\n",
+ " ['false', 'true', 'false', 'true', '', '']], \n",
+ " index=df.index, columns=df.columns))\\\n",
+ " .set_caption(\"Confusion matrix for multiple cancer prediction models.\")\\\n",
+ " .set_tooltips(pd.DataFrame([['This model has a very strong true positive rate', '', '', \"This model's total number of false negatives is too high\", '', ''],\n",
+ " ['', '', '', '', '', '']], \n",
+ " index=df.index, columns=df.columns),\n",
+ " css_class='pd-tt', props=\n",
+ " 'visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;'\n",
+ " 'background-color: white; color: #000066; font-size: 0.8em;' \n",
+ " 'transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;')\n"
]
},
{
@@ -333,9 +325,7 @@
"metadata": {},
"outputs": [],
"source": [
- "s = df.style.format(\"{:.0f}\").hide(\n",
- " [(\"Random\", \"Tumour\"), (\"Random\", \"Non-Tumour\")], axis=\"columns\"\n",
- ")\n",
+ "s = df.style.format('{:.0f}').hide([('Random', 'Tumour'), ('Random', 'Non-Tumour')], axis=\"columns\")\n",
"s"
]
},
@@ -347,8 +337,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
- "s.set_uuid(\"after_hide\")"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s.set_uuid('after_hide')"
]
},
{
@@ -405,16 +395,16 @@
"outputs": [],
"source": [
"cell_hover = { # for row hover use |
instead of \n",
- " \"selector\": \"td:hover\",\n",
- " \"props\": [(\"background-color\", \"#ffffb3\")],\n",
+ " 'selector': 'td:hover',\n",
+ " 'props': [('background-color', '#ffffb3')]\n",
"}\n",
"index_names = {\n",
- " \"selector\": \".index_name\",\n",
- " \"props\": \"font-style: italic; color: darkgrey; font-weight:normal;\",\n",
+ " 'selector': '.index_name',\n",
+ " 'props': 'font-style: italic; color: darkgrey; font-weight:normal;'\n",
"}\n",
"headers = {\n",
- " \"selector\": \"th:not(.index_name)\",\n",
- " \"props\": \"background-color: #000066; color: white;\",\n",
+ " 'selector': 'th:not(.index_name)',\n",
+ " 'props': 'background-color: #000066; color: white;'\n",
"}\n",
"s.set_table_styles([cell_hover, index_names, headers])"
]
@@ -427,8 +417,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
- "s.set_uuid(\"after_tab_styles1\")"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s.set_uuid('after_tab_styles1')"
]
},
{
@@ -444,14 +434,11 @@
"metadata": {},
"outputs": [],
"source": [
- "s.set_table_styles(\n",
- " [\n",
- " {\"selector\": \"th.col_heading\", \"props\": \"text-align: center;\"},\n",
- " {\"selector\": \"th.col_heading.level0\", \"props\": \"font-size: 1.5em;\"},\n",
- " {\"selector\": \"td\", \"props\": \"text-align: center; font-weight: bold;\"},\n",
- " ],\n",
- " overwrite=False,\n",
- ")"
+ "s.set_table_styles([\n",
+ " {'selector': 'th.col_heading', 'props': 'text-align: center;'},\n",
+ " {'selector': 'th.col_heading.level0', 'props': 'font-size: 1.5em;'},\n",
+ " {'selector': 'td', 'props': 'text-align: center; font-weight: bold;'},\n",
+ "], overwrite=False)"
]
},
{
@@ -462,8 +449,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
- "s.set_uuid(\"after_tab_styles2\")"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s.set_uuid('after_tab_styles2')"
]
},
{
@@ -481,16 +468,10 @@
"metadata": {},
"outputs": [],
"source": [
- "s.set_table_styles(\n",
- " {\n",
- " (\"Regression\", \"Tumour\"): [\n",
- " {\"selector\": \"th\", \"props\": \"border-left: 1px solid white\"},\n",
- " {\"selector\": \"td\", \"props\": \"border-left: 1px solid #000066\"},\n",
- " ]\n",
- " },\n",
- " overwrite=False,\n",
- " axis=0,\n",
- ")"
+ "s.set_table_styles({\n",
+ " ('Regression', 'Tumour'): [{'selector': 'th', 'props': 'border-left: 1px solid white'},\n",
+ " {'selector': 'td', 'props': 'border-left: 1px solid #000066'}]\n",
+ "}, overwrite=False, axis=0)"
]
},
{
@@ -501,8 +482,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
- "s.set_uuid(\"xyz01\")"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s.set_uuid('xyz01')"
]
},
{
@@ -527,7 +508,7 @@
"outputs": [],
"source": [
"out = s.set_table_attributes('class=\"my-table-cls\"').to_html()\n",
- "print(out[out.find(\" -0.3) else None\n",
- ")\n",
+ "s2 = df2.style.map(style_negative, props='color:red;')\\\n",
+ " .map(lambda v: 'opacity: 20%;' if (v < 0.3) and (v > -0.3) else None)\n",
"s2"
]
},
@@ -638,8 +612,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
- "s2.set_uuid(\"after_applymap\")"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s2.set_uuid('after_applymap')"
]
},
{
@@ -655,11 +629,9 @@
"metadata": {},
"outputs": [],
"source": [
- "def highlight_max(s, props=\"\"):\n",
- " return np.where(s == np.nanmax(s.values), props, \"\")\n",
- "\n",
- "\n",
- "s2.apply(highlight_max, props=\"color:white;background-color:darkblue\", axis=0)"
+ "def highlight_max(s, props=''):\n",
+ " return np.where(s == np.nanmax(s.values), props, '')\n",
+ "s2.apply(highlight_max, props='color:white;background-color:darkblue', axis=0)"
]
},
{
@@ -670,8 +642,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
- "s2.set_uuid(\"after_apply\")"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s2.set_uuid('after_apply')"
]
},
{
@@ -687,9 +659,8 @@
"metadata": {},
"outputs": [],
"source": [
- "s2.apply(highlight_max, props=\"color:white;background-color:pink;\", axis=1).apply(\n",
- " highlight_max, props=\"color:white;background-color:purple\", axis=None\n",
- ")"
+ "s2.apply(highlight_max, props='color:white;background-color:pink;', axis=1)\\\n",
+ " .apply(highlight_max, props='color:white;background-color:purple', axis=None)"
]
},
{
@@ -700,8 +671,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
- "s2.set_uuid(\"after_apply_again\")"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s2.set_uuid('after_apply_again')"
]
},
{
@@ -742,10 +713,8 @@
"metadata": {},
"outputs": [],
"source": [
- "s2.map_index(lambda v: \"color:pink;\" if v > 4 else \"color:darkblue;\", axis=0)\n",
- "s2.apply_index(\n",
- " lambda s: np.where(s.isin([\"A\", \"B\"]), \"color:pink;\", \"color:darkblue;\"), axis=1\n",
- ")"
+ "s2.map_index(lambda v: \"color:pink;\" if v>4 else \"color:darkblue;\", axis=0)\n",
+ "s2.apply_index(lambda s: np.where(s.isin([\"A\", \"B\"]), \"color:pink;\", \"color:darkblue;\"), axis=1)"
]
},
{
@@ -765,12 +734,11 @@
"metadata": {},
"outputs": [],
"source": [
- "s.set_caption(\n",
- " \"Confusion matrix for multiple cancer prediction models.\"\n",
- ").set_table_styles(\n",
- " [{\"selector\": \"caption\", \"props\": \"caption-side: bottom; font-size:1.25em;\"}],\n",
- " overwrite=False,\n",
- ")"
+ "s.set_caption(\"Confusion matrix for multiple cancer prediction models.\")\\\n",
+ " .set_table_styles([{\n",
+ " 'selector': 'caption',\n",
+ " 'props': 'caption-side: bottom; font-size:1.25em;'\n",
+ " }], overwrite=False)"
]
},
{
@@ -781,8 +749,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
- "s.set_uuid(\"after_caption\")"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s.set_uuid('after_caption')"
]
},
{
@@ -800,22 +768,12 @@
"metadata": {},
"outputs": [],
"source": [
- "tt = pd.DataFrame(\n",
- " [\n",
- " [\n",
- " \"This model has a very strong true positive rate\",\n",
- " \"This model's total number of false negatives is too high\",\n",
- " ]\n",
- " ],\n",
- " index=[\"Tumour (Positive)\"],\n",
- " columns=df.columns[[0, 3]],\n",
- ")\n",
- "s.set_tooltips(\n",
- " tt,\n",
- " props=\"visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;\"\n",
- " \"background-color: white; color: #000066; font-size: 0.8em;\"\n",
- " \"transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;\",\n",
- ")"
+ "tt = pd.DataFrame([['This model has a very strong true positive rate', \n",
+ " \"This model's total number of false negatives is too high\"]], \n",
+ " index=['Tumour (Positive)'], columns=df.columns[[0,3]])\n",
+ "s.set_tooltips(tt, props='visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;'\n",
+ " 'background-color: white; color: #000066; font-size: 0.8em;' \n",
+ " 'transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;')"
]
},
{
@@ -826,8 +784,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
- "s.set_uuid(\"after_tooltips\")"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s.set_uuid('after_tooltips')"
]
},
{
@@ -843,18 +801,14 @@
"metadata": {},
"outputs": [],
"source": [
- "s.set_table_styles(\n",
- " [ # create internal CSS classes\n",
- " {\"selector\": \".border-red\", \"props\": \"border: 2px dashed red;\"},\n",
- " {\"selector\": \".border-green\", \"props\": \"border: 2px dashed green;\"},\n",
- " ],\n",
- " overwrite=False,\n",
- ")\n",
- "cell_border = pd.DataFrame(\n",
- " [[\"border-green \", \" \", \" \", \"border-red \"], [\" \", \" \", \" \", \" \"]],\n",
- " index=df.index,\n",
- " columns=df.columns[:4],\n",
- ")\n",
+ "s.set_table_styles([ # create internal CSS classes\n",
+ " {'selector': '.border-red', 'props': 'border: 2px dashed red;'},\n",
+ " {'selector': '.border-green', 'props': 'border: 2px dashed green;'},\n",
+ "], overwrite=False)\n",
+ "cell_border = pd.DataFrame([['border-green ', ' ', ' ', 'border-red '], \n",
+ " [' ', ' ', ' ', ' ']], \n",
+ " index=df.index, \n",
+ " columns=df.columns[:4])\n",
"s.set_td_classes(cell_color + cell_border)"
]
},
@@ -866,8 +820,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
- "s.set_uuid(\"after_borders\")"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
+ "s.set_uuid('after_borders')"
]
},
{
@@ -893,11 +847,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df3 = pd.DataFrame(\n",
- " np.random.randn(4, 4),\n",
- " pd.MultiIndex.from_product([[\"A\", \"B\"], [\"r1\", \"r2\"]]),\n",
- " columns=[\"c1\", \"c2\", \"c3\", \"c4\"],\n",
- ")\n",
+ "df3 = pd.DataFrame(np.random.randn(4,4), \n",
+ " pd.MultiIndex.from_product([['A', 'B'], ['r1', 'r2']]),\n",
+ " columns=['c1','c2','c3','c4'])\n",
"df3"
]
},
@@ -914,10 +866,9 @@
"metadata": {},
"outputs": [],
"source": [
- "slice_ = [\"c3\", \"c4\"]\n",
- "df3.style.apply(\n",
- " highlight_max, props=\"color:red;\", axis=0, subset=slice_\n",
- ").set_properties(**{\"background-color\": \"#ffffb3\"}, subset=slice_)"
+ "slice_ = ['c3', 'c4']\n",
+ "df3.style.apply(highlight_max, props='color:red;', axis=0, subset=slice_)\\\n",
+ " .set_properties(**{'background-color': '#ffffb3'}, subset=slice_)"
]
},
{
@@ -934,10 +885,9 @@
"outputs": [],
"source": [
"idx = pd.IndexSlice\n",
- "slice_ = idx[idx[:, \"r1\"], idx[\"c2\":\"c4\"]]\n",
- "df3.style.apply(\n",
- " highlight_max, props=\"color:red;\", axis=0, subset=slice_\n",
- ").set_properties(**{\"background-color\": \"#ffffb3\"}, subset=slice_)"
+ "slice_ = idx[idx[:,'r1'], idx['c2':'c4']]\n",
+ "df3.style.apply(highlight_max, props='color:red;', axis=0, subset=slice_)\\\n",
+ " .set_properties(**{'background-color': '#ffffb3'}, subset=slice_)"
]
},
{
@@ -953,10 +903,9 @@
"metadata": {},
"outputs": [],
"source": [
- "slice_ = idx[idx[:, \"r2\"], :]\n",
- "df3.style.apply(\n",
- " highlight_max, props=\"color:red;\", axis=1, subset=slice_\n",
- ").set_properties(**{\"background-color\": \"#ffffb3\"}, subset=slice_)"
+ "slice_ = idx[idx[:,'r2'], :]\n",
+ "df3.style.apply(highlight_max, props='color:red;', axis=1, subset=slice_)\\\n",
+ " .set_properties(**{'background-color': '#ffffb3'}, subset=slice_)"
]
},
{
@@ -974,10 +923,9 @@
"metadata": {},
"outputs": [],
"source": [
- "slice_ = idx[idx[(df3[\"c1\"] + df3[\"c3\"]) < -2.0], [\"c2\", \"c4\"]]\n",
- "df3.style.apply(\n",
- " highlight_max, props=\"color:red;\", axis=1, subset=slice_\n",
- ").set_properties(**{\"background-color\": \"#ffffb3\"}, subset=slice_)"
+ "slice_ = idx[idx[(df3['c1'] + df3['c3']) < -2.0], ['c2', 'c4']]\n",
+ "df3.style.apply(highlight_max, props='color:red;', axis=1, subset=slice_)\\\n",
+ " .set_properties(**{'background-color': '#ffffb3'}, subset=slice_)"
]
},
{
@@ -1033,7 +981,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df4 = pd.DataFrame([[1, 2], [3, 4]])\n",
+ "df4 = pd.DataFrame([[1,2],[3,4]])\n",
"s4 = df4.style"
]
},
@@ -1055,7 +1003,6 @@
"outputs": [],
"source": [
"from pandas.io.formats.style import Styler\n",
- "\n",
"s4 = Styler(df4, uuid_len=0, cell_ids=False)"
]
},
@@ -1106,7 +1053,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.set_table_styles([{\"selector\": \"td.col1\", \"props\": props}])"
+ "df4.style.set_table_styles([{'selector': 'td.col1', 'props': props}])"
]
},
{
@@ -1135,11 +1082,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.style.apply(\n",
- " highlight_max, props=\"color:white;background-color:darkblue;\", axis=0\n",
- ").apply(highlight_max, props=\"color:white;background-color:pink;\", axis=1).apply(\n",
- " highlight_max, props=\"color:white;background-color:purple\", axis=None\n",
- ")"
+ "df2.style.apply(highlight_max, props='color:white;background-color:darkblue;', axis=0)\\\n",
+ " .apply(highlight_max, props='color:white;background-color:pink;', axis=1)\\\n",
+ " .apply(highlight_max, props='color:white;background-color:purple', axis=None)"
]
},
{
@@ -1160,18 +1105,14 @@
"outputs": [],
"source": [
"build = lambda x: pd.DataFrame(x, index=df2.index, columns=df2.columns)\n",
- "cls1 = build(df2.apply(highlight_max, props=\"cls-1 \", axis=0))\n",
- "cls2 = build(\n",
- " df2.apply(highlight_max, props=\"cls-2 \", axis=1, result_type=\"expand\").values\n",
- ")\n",
- "cls3 = build(highlight_max(df2, props=\"cls-3 \"))\n",
- "df2.style.set_table_styles(\n",
- " [\n",
- " {\"selector\": \".cls-1\", \"props\": \"color:white;background-color:darkblue;\"},\n",
- " {\"selector\": \".cls-2\", \"props\": \"color:white;background-color:pink;\"},\n",
- " {\"selector\": \".cls-3\", \"props\": \"color:white;background-color:purple;\"},\n",
- " ]\n",
- ").set_td_classes(cls1 + cls2 + cls3)"
+ "cls1 = build(df2.apply(highlight_max, props='cls-1 ', axis=0))\n",
+ "cls2 = build(df2.apply(highlight_max, props='cls-2 ', axis=1, result_type='expand').values)\n",
+ "cls3 = build(highlight_max(df2, props='cls-3 '))\n",
+ "df2.style.set_table_styles([\n",
+ " {'selector': '.cls-1', 'props': 'color:white;background-color:darkblue;'},\n",
+ " {'selector': '.cls-2', 'props': 'color:white;background-color:pink;'},\n",
+ " {'selector': '.cls-3', 'props': 'color:white;background-color:purple;'}\n",
+ "]).set_td_classes(cls1 + cls2 + cls3)"
]
},
{
@@ -1211,14 +1152,10 @@
" \"blank\": \"\",\n",
"}\n",
"html = Styler(df4, uuid_len=0, cell_ids=False)\n",
- "html.set_table_styles(\n",
- " [\n",
- " {\"selector\": \"td\", \"props\": props},\n",
- " {\"selector\": \".c1\", \"props\": \"color:green;\"},\n",
- " {\"selector\": \".l0\", \"props\": \"color:blue;\"},\n",
- " ],\n",
- " css_class_names=my_css,\n",
- ")\n",
+ "html.set_table_styles([{'selector': 'td', 'props': props},\n",
+ " {'selector': '.c1', 'props': 'color:green;'},\n",
+ " {'selector': '.l0', 'props': 'color:blue;'}],\n",
+ " css_class_names=my_css)\n",
"print(html.to_html())"
]
},
@@ -1276,9 +1213,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.iloc[0, 2] = np.nan\n",
- "df2.iloc[4, 3] = np.nan\n",
- "df2.loc[:4].style.highlight_null(color=\"yellow\")"
+ "df2.iloc[0,2] = np.nan\n",
+ "df2.iloc[4,3] = np.nan\n",
+ "df2.loc[:4].style.highlight_null(color='yellow')"
]
},
{
@@ -1294,9 +1231,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.loc[:4].style.highlight_max(\n",
- " axis=1, props=\"color:white; font-weight:bold; background-color:darkblue;\"\n",
- ")"
+ "df2.loc[:4].style.highlight_max(axis=1, props='color:white; font-weight:bold; background-color:darkblue;')"
]
},
{
@@ -1314,9 +1249,7 @@
"outputs": [],
"source": [
"left = pd.Series([1.0, 0.0, 1.0], index=[\"A\", \"B\", \"D\"])\n",
- "df2.loc[:4].style.highlight_between(\n",
- " left=left, right=1.5, axis=1, props=\"color:white; background-color:purple;\"\n",
- ")"
+ "df2.loc[:4].style.highlight_between(left=left, right=1.5, axis=1, props='color:white; background-color:purple;')"
]
},
{
@@ -1333,7 +1266,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.loc[:4].style.highlight_quantile(q_left=0.85, axis=None, color=\"yellow\")"
+ "df2.loc[:4].style.highlight_quantile(q_left=0.85, axis=None, color='yellow')"
]
},
{
@@ -1357,7 +1290,6 @@
"outputs": [],
"source": [
"import seaborn as sns\n",
- "\n",
"cm = sns.light_palette(\"green\", as_cmap=True)\n",
"\n",
"df2.style.background_gradient(cmap=cm)"
@@ -1397,9 +1329,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.loc[:4].style.set_properties(\n",
- " **{\"background-color\": \"black\", \"color\": \"lawngreen\", \"border-color\": \"white\"}\n",
- ")"
+ "df2.loc[:4].style.set_properties(**{'background-color': 'black',\n",
+ " 'color': 'lawngreen',\n",
+ " 'border-color': 'white'})"
]
},
{
@@ -1422,7 +1354,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.style.bar(subset=[\"A\", \"B\"], color=\"#d65f5f\")"
+ "df2.style.bar(subset=['A', 'B'], color='#d65f5f')"
]
},
{
@@ -1440,15 +1372,10 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.style.format(\"{:.3f}\", na_rep=\"\").bar(\n",
- " align=0,\n",
- " vmin=-2.5,\n",
- " vmax=2.5,\n",
- " cmap=\"bwr\",\n",
- " height=50,\n",
- " width=60,\n",
- " props=\"width: 120px; border-right: 1px solid black;\",\n",
- ").text_gradient(cmap=\"bwr\", vmin=-2.5, vmax=2.5)"
+ "df2.style.format('{:.3f}', na_rep=\"\")\\\n",
+ " .bar(align=0, vmin=-2.5, vmax=2.5, cmap=\"bwr\", height=50,\n",
+ " width=60, props=\"width: 120px; border-right: 1px solid black;\")\\\n",
+ " .text_gradient(cmap=\"bwr\", vmin=-2.5, vmax=2.5)"
]
},
{
@@ -1471,10 +1398,10 @@
"from IPython.display import HTML\n",
"\n",
"# Test series\n",
- "test1 = pd.Series([-100, -60, -30, -20], name=\"All Negative\")\n",
- "test2 = pd.Series([-10, -5, 0, 90], name=\"Both Pos and Neg\")\n",
- "test3 = pd.Series([10, 20, 50, 100], name=\"All Positive\")\n",
- "test4 = pd.Series([100, 103, 101, 102], name=\"Large Positive\")\n",
+ "test1 = pd.Series([-100,-60,-30,-20], name='All Negative')\n",
+ "test2 = pd.Series([-10,-5,0,90], name='Both Pos and Neg')\n",
+ "test3 = pd.Series([10,20,50,100], name='All Positive')\n",
+ "test4 = pd.Series([100, 103, 101, 102], name='Large Positive')\n",
"\n",
"\n",
"head = \"\"\"\n",
@@ -1490,22 +1417,19 @@
"\n",
"\"\"\"\n",
"\n",
- "aligns = [\"left\", \"right\", \"zero\", \"mid\", \"mean\", 99]\n",
+ "aligns = ['left', 'right', 'zero', 'mid', 'mean', 99]\n",
"for align in aligns:\n",
" row = \"| {} | \".format(align)\n",
- " for series in [test1, test2, test3, test4]:\n",
+ " for series in [test1,test2,test3, test4]:\n",
" s = series.copy()\n",
- " s.name = \"\"\n",
- " row += \"{} | \".format(\n",
- " s.to_frame()\n",
- " .style.hide(axis=\"index\")\n",
- " .bar(align=align, color=[\"#d65f5f\", \"#5fba7d\"], width=100)\n",
- " .to_html()\n",
- " ) # testn['width']\n",
- " row += \" \"\n",
+ " s.name=''\n",
+ " row += \"{} | \".format(s.to_frame().style.hide(axis='index').bar(align=align, \n",
+ " color=['#d65f5f', '#5fba7d'], \n",
+ " width=100).to_html()) #testn['width']\n",
+ " row += ''\n",
" head += row\n",
- "\n",
- "head += \"\"\"\n",
+ " \n",
+ "head+= \"\"\"\n",
"\n",
" \"\"\""
]
@@ -1539,12 +1463,11 @@
"metadata": {},
"outputs": [],
"source": [
- "style1 = (\n",
- " df2.style.map(style_negative, props=\"color:red;\")\n",
- " .map(lambda v: \"opacity: 20%;\" if (v < 0.3) and (v > -0.3) else None)\n",
- " .set_table_styles([{\"selector\": \"th\", \"props\": \"color: blue;\"}])\n",
- " .hide(axis=\"index\")\n",
- ")\n",
+ "style1 = df2.style\\\n",
+ " .map(style_negative, props='color:red;')\\\n",
+ " .map(lambda v: 'opacity: 20%;' if (v < 0.3) and (v > -0.3) else None)\\\n",
+ " .set_table_styles([{\"selector\": \"th\", \"props\": \"color: blue;\"}])\\\n",
+ " .hide(axis=\"index\")\n",
"style1"
]
},
@@ -1603,14 +1526,11 @@
"outputs": [],
"source": [
"from ipywidgets import widgets\n",
- "\n",
- "\n",
"@widgets.interact\n",
- "def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0.0, 99.9), l=(0.0, 99.9)):\n",
+ "def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0., 99.9), l=(0., 99.9)):\n",
" return df2.style.background_gradient(\n",
- " cmap=sns.palettes.diverging_palette(\n",
- " h_neg=h_neg, h_pos=h_pos, s=s, l=l, as_cmap=True\n",
- " )\n",
+ " cmap=sns.palettes.diverging_palette(h_neg=h_neg, h_pos=h_pos, s=s, l=l,\n",
+ " as_cmap=True)\n",
" )"
]
},
@@ -1628,15 +1548,16 @@
"outputs": [],
"source": [
"def magnify():\n",
- " return [\n",
- " dict(selector=\"th\", props=[(\"font-size\", \"4pt\")]),\n",
- " dict(selector=\"td\", props=[(\"padding\", \"0em 0em\")]),\n",
- " dict(selector=\"th:hover\", props=[(\"font-size\", \"12pt\")]),\n",
- " dict(\n",
- " selector=\"tr:hover td:hover\",\n",
- " props=[(\"max-width\", \"200px\"), (\"font-size\", \"12pt\")],\n",
- " ),\n",
- " ]"
+ " return [dict(selector=\"th\",\n",
+ " props=[(\"font-size\", \"4pt\")]),\n",
+ " dict(selector=\"td\",\n",
+ " props=[('padding', \"0em 0em\")]),\n",
+ " dict(selector=\"th:hover\",\n",
+ " props=[(\"font-size\", \"12pt\")]),\n",
+ " dict(selector=\"tr:hover td:hover\",\n",
+ " props=[('max-width', '200px'),\n",
+ " ('font-size', '12pt')])\n",
+ "]"
]
},
{
@@ -1646,12 +1567,14 @@
"outputs": [],
"source": [
"np.random.seed(25)\n",
- "cmap = cmap = sns.diverging_palette(5, 250, as_cmap=True)\n",
+ "cmap = cmap=sns.diverging_palette(5, 250, as_cmap=True)\n",
"bigdf = pd.DataFrame(np.random.randn(20, 25)).cumsum()\n",
"\n",
- "bigdf.style.background_gradient(cmap, axis=1).set_properties(\n",
- " **{\"max-width\": \"80px\", \"font-size\": \"1pt\"}\n",
- ").set_caption(\"Hover to magnify\").format(precision=2).set_table_styles(magnify())"
+ "bigdf.style.background_gradient(cmap, axis=1)\\\n",
+ " .set_properties(**{'max-width': '80px', 'font-size': '1pt'})\\\n",
+ " .set_caption(\"Hover to magnify\")\\\n",
+ " .format(precision=2)\\\n",
+ " .set_table_styles(magnify())"
]
},
{
@@ -1688,8 +1611,8 @@
"metadata": {},
"outputs": [],
"source": [
- "bigdf.index = pd.MultiIndex.from_product([[\"A\", \"B\"], [0, 1], [0, 1, 2, 3]])\n",
- "bigdf.style.set_sticky(axis=\"index\", pixel_size=18, levels=[1, 2])"
+ "bigdf.index = pd.MultiIndex.from_product([[\"A\",\"B\"],[0,1],[0,1,2,3]])\n",
+ "bigdf.style.set_sticky(axis=\"index\", pixel_size=18, levels=[1,2])"
]
},
{
@@ -1709,7 +1632,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df4 = pd.DataFrame([[\"\", '\"&other\"', \"\"]])\n",
+ "df4 = pd.DataFrame([['', '\"&other\"', '']])\n",
"df4.style"
]
},
@@ -1728,9 +1651,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.format(\n",
- " '{}', escape=\"html\"\n",
- ")"
+ "df4.style.format('{}', escape=\"html\")"
]
},
{
@@ -1772,9 +1693,10 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.style.map(style_negative, props=\"color:red;\").highlight_max(axis=0).to_excel(\n",
- " \"styled.xlsx\", engine=\"openpyxl\"\n",
- ")"
+ "df2.style.\\\n",
+ " map(style_negative, props='color:red;').\\\n",
+ " highlight_max(axis=0).\\\n",
+ " to_excel('styled.xlsx', engine='openpyxl')"
]
},
{
@@ -1843,11 +1765,7 @@
"metadata": {},
"outputs": [],
"source": [
- "print(\n",
- " pd.DataFrame(\n",
- " [[1, 2], [3, 4]], index=[\"i1\", \"i2\"], columns=[\"c1\", \"c2\"]\n",
- " ).style.to_html()\n",
- ")"
+ "print(pd.DataFrame([[1,2],[3,4]], index=['i1', 'i2'], columns=['c1', 'c2']).style.to_html())"
]
},
{
@@ -1865,8 +1783,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df4 = pd.DataFrame([[\"text\"]])\n",
- "df4.style.map(lambda x: \"color:green;\").map(lambda x: \"color:red;\")"
+ "df4 = pd.DataFrame([['text']])\n",
+ "df4.style.map(lambda x: 'color:green;')\\\n",
+ " .map(lambda x: 'color:red;')"
]
},
{
@@ -1875,7 +1794,8 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.map(lambda x: \"color:red;\").map(lambda x: \"color:green;\")"
+ "df4.style.map(lambda x: 'color:red;')\\\n",
+ " .map(lambda x: 'color:green;')"
]
},
{
@@ -1900,9 +1820,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.set_uuid(\"a_\").set_table_styles(\n",
- " [{\"selector\": \"td\", \"props\": \"color:red;\"}]\n",
- ").map(lambda x: \"color:green;\")"
+ "df4.style.set_uuid('a_')\\\n",
+ " .set_table_styles([{'selector': 'td', 'props': 'color:red;'}])\\\n",
+ " .map(lambda x: 'color:green;')"
]
},
{
@@ -1918,12 +1838,11 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.set_uuid(\"b_\").set_table_styles(\n",
- " [\n",
- " {\"selector\": \"td\", \"props\": \"color:red;\"},\n",
- " {\"selector\": \".cls-1\", \"props\": \"color:blue;\"},\n",
- " ]\n",
- ").map(lambda x: \"color:green;\").set_td_classes(pd.DataFrame([[\"cls-1\"]]))"
+ "df4.style.set_uuid('b_')\\\n",
+ " .set_table_styles([{'selector': 'td', 'props': 'color:red;'},\n",
+ " {'selector': '.cls-1', 'props': 'color:blue;'}])\\\n",
+ " .map(lambda x: 'color:green;')\\\n",
+ " .set_td_classes(pd.DataFrame([['cls-1']]))"
]
},
{
@@ -1939,13 +1858,12 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.set_uuid(\"c_\").set_table_styles(\n",
- " [\n",
- " {\"selector\": \"td\", \"props\": \"color:red;\"},\n",
- " {\"selector\": \".cls-1\", \"props\": \"color:blue;\"},\n",
- " {\"selector\": \"td.data\", \"props\": \"color:yellow;\"},\n",
- " ]\n",
- ").map(lambda x: \"color:green;\").set_td_classes(pd.DataFrame([[\"cls-1\"]]))"
+ "df4.style.set_uuid('c_')\\\n",
+ " .set_table_styles([{'selector': 'td', 'props': 'color:red;'},\n",
+ " {'selector': '.cls-1', 'props': 'color:blue;'},\n",
+ " {'selector': 'td.data', 'props': 'color:yellow;'}])\\\n",
+ " .map(lambda x: 'color:green;')\\\n",
+ " .set_td_classes(pd.DataFrame([['cls-1']]))"
]
},
{
@@ -1963,13 +1881,12 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.set_uuid(\"d_\").set_table_styles(\n",
- " [\n",
- " {\"selector\": \"td\", \"props\": \"color:red;\"},\n",
- " {\"selector\": \".cls-1\", \"props\": \"color:blue;\"},\n",
- " {\"selector\": \"td.data\", \"props\": \"color:yellow;\"},\n",
- " ]\n",
- ").map(lambda x: \"color:green !important;\").set_td_classes(pd.DataFrame([[\"cls-1\"]]))"
+ "df4.style.set_uuid('d_')\\\n",
+ " .set_table_styles([{'selector': 'td', 'props': 'color:red;'},\n",
+ " {'selector': '.cls-1', 'props': 'color:blue;'},\n",
+ " {'selector': 'td.data', 'props': 'color:yellow;'}])\\\n",
+ " .map(lambda x: 'color:green !important;')\\\n",
+ " .set_td_classes(pd.DataFrame([['cls-1']]))"
]
},
{
@@ -2043,12 +1960,10 @@
"source": [
"class MyStyler(Styler):\n",
" env = Environment(\n",
- " loader=ChoiceLoader(\n",
- " [\n",
- " FileSystemLoader(\"templates\"), # contains ours\n",
- " Styler.loader, # the default\n",
- " ]\n",
- " )\n",
+ " loader=ChoiceLoader([\n",
+ " FileSystemLoader(\"templates\"), # contains ours\n",
+ " Styler.loader, # the default\n",
+ " ])\n",
" )\n",
" template_html_table = env.get_template(\"myhtml.tpl\")"
]
@@ -2191,7 +2106,7 @@
"# from IPython.display import HTML\n",
"# with open(\"themes/nature_with_gtoc/static/nature.css_t\") as f:\n",
"# css = f.read()\n",
- "\n",
+ " \n",
"# HTML(''.format(css))"
]
}
diff --git a/pandas/_config/config.py b/pandas/_config/config.py
index 25760df6bd7a4..4ed2d4c3be692 100644
--- a/pandas/_config/config.py
+++ b/pandas/_config/config.py
@@ -411,7 +411,7 @@ def __dir__(self) -> list[str]:
@contextmanager
-def option_context(*args) -> Generator[None]:
+def option_context(*args) -> Generator[None, None, None]:
"""
Context manager to temporarily set options in a ``with`` statement.
@@ -718,7 +718,7 @@ def _build_option_description(k: str) -> str:
@contextmanager
-def config_prefix(prefix: str) -> Generator[None]:
+def config_prefix(prefix: str) -> Generator[None, None, None]:
"""
contextmanager for multiple invocations of API with a common prefix
diff --git a/pandas/_config/localization.py b/pandas/_config/localization.py
index 6602633f20399..61d88c43f0e4a 100644
--- a/pandas/_config/localization.py
+++ b/pandas/_config/localization.py
@@ -25,7 +25,7 @@
@contextmanager
def set_locale(
new_locale: str | tuple[str, str], lc_var: int = locale.LC_ALL
-) -> Generator[str | tuple[str, str]]:
+) -> Generator[str | tuple[str, str], None, None]:
"""
Context manager for temporarily setting a locale.
diff --git a/pandas/_testing/_warnings.py b/pandas/_testing/_warnings.py
index a752c8db90f38..cd2e2b4141ffd 100644
--- a/pandas/_testing/_warnings.py
+++ b/pandas/_testing/_warnings.py
@@ -35,7 +35,7 @@ def assert_produces_warning(
raise_on_extra_warnings: bool = True,
match: str | tuple[str | None, ...] | None = None,
must_find_all_warnings: bool = True,
-) -> Generator[list[warnings.WarningMessage]]:
+) -> Generator[list[warnings.WarningMessage], None, None]:
"""
Context manager for running code expected to either raise a specific warning,
multiple specific warnings, or not raise any warnings. Verifies that the code
diff --git a/pandas/_testing/contexts.py b/pandas/_testing/contexts.py
index f041d8755bb64..91b5d2a981bef 100644
--- a/pandas/_testing/contexts.py
+++ b/pandas/_testing/contexts.py
@@ -29,7 +29,7 @@
@contextmanager
def decompress_file(
path: FilePath | BaseBuffer, compression: CompressionOptions
-) -> Generator[IO[bytes]]:
+) -> Generator[IO[bytes], None, None]:
"""
Open a compressed file and return a file object.
@@ -50,7 +50,7 @@ def decompress_file(
@contextmanager
-def set_timezone(tz: str) -> Generator[None]:
+def set_timezone(tz: str) -> Generator[None, None, None]:
"""
Context manager for temporarily setting a timezone.
@@ -91,7 +91,7 @@ def setTZ(tz) -> None:
@contextmanager
-def ensure_clean(filename=None) -> Generator[Any]:
+def ensure_clean(filename=None) -> Generator[Any, None, None]:
"""
Gets a temporary path and agrees to remove on close.
@@ -123,7 +123,7 @@ def ensure_clean(filename=None) -> Generator[Any]:
@contextmanager
-def with_csv_dialect(name: str, **kwargs) -> Generator[None]:
+def with_csv_dialect(name: str, **kwargs) -> Generator[None, None, None]:
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index beaaa3f8ed3cc..28985a1380bee 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -131,7 +131,7 @@ def loads(
@contextlib.contextmanager
-def patch_pickle() -> Generator[None]:
+def patch_pickle() -> Generator[None, None, None]:
"""
Temporarily patch pickle to use our unpickler.
"""
diff --git a/pandas/core/_numba/kernels/min_max_.py b/pandas/core/_numba/kernels/min_max_.py
index 6e57e62c13a6e..59d36732ebae6 100644
--- a/pandas/core/_numba/kernels/min_max_.py
+++ b/pandas/core/_numba/kernels/min_max_.py
@@ -112,9 +112,11 @@ def grouped_min_max(
continue
if is_max:
- output[lab] = max(val, output[lab])
+ if val > output[lab]:
+ output[lab] = val
else:
- output[lab] = min(val, output[lab])
+ if val < output[lab]:
+ output[lab] = val
# Set labels that don't satisfy min_periods as np.nan
for lab, count in enumerate(nobs):
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index ac336713a70f3..5959156d11123 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -806,7 +806,7 @@ def result_columns(self) -> Index:
@property
@abc.abstractmethod
- def series_generator(self) -> Generator[Series]:
+ def series_generator(self) -> Generator[Series, None, None]:
pass
@staticmethod
@@ -1131,7 +1131,7 @@ class FrameRowApply(FrameApply):
axis: AxisInt = 0
@property
- def series_generator(self) -> Generator[Series]:
+ def series_generator(self) -> Generator[Series, None, None]:
return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))
@staticmethod
@@ -1243,7 +1243,7 @@ def apply_broadcast(self, target: DataFrame) -> DataFrame:
return result.T
@property
- def series_generator(self) -> Generator[Series]:
+ def series_generator(self) -> Generator[Series, None, None]:
values = self.values
values = ensure_wrapped_if_datetimelike(values)
assert len(values) > 0
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 058145e34e455..201c449185057 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -2914,7 +2914,7 @@ def _generate_range(
offset: BaseOffset,
*,
unit: str,
-) -> Generator[Timestamp]:
+) -> Generator[Timestamp, None, None]:
"""
Generates a sequence of dates corresponding to the specified time
offset. Similar to dateutil.rrule except uses pandas DateOffset
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 642f14442b53e..ec0473a20458b 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -562,7 +562,7 @@ def convert_to_list_like(
@contextlib.contextmanager
def temp_setattr(
obj, attr: str, value, condition: bool = True
-) -> Generator[None]:
+) -> Generator[None, None, None]:
"""
Temporarily set attribute on an object.
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 9b780e512a11d..6ba07b1761557 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -683,7 +683,9 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan):
elif dtype.kind == "f":
mst = np.min_scalar_type(fill_value)
- dtype = max(mst, dtype)
+ if mst > dtype:
+ # e.g. mst is np.float64 and dtype is np.float32
+ dtype = mst
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
@@ -716,7 +718,9 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan):
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
- dtype = max(mst, dtype)
+ if mst > dtype:
+ # e.g. mst is np.complex128 and dtype is np.complex64
+ dtype = mst
else:
dtype = np.dtype(np.object_)
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 008fe9bdee5bd..da80969b613cd 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -865,7 +865,7 @@ def _unob_index_and_ids(
return unob_index, unob_ids
@final
- def get_group_levels(self) -> Generator[Index]:
+ def get_group_levels(self) -> Generator[Index, None, None]:
# Note: only called from _insert_inaxis_grouper, which
# is only called for BaseGrouper, never for BinGrouper
result_index = self.result_index
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 1c57e6fe1a952..a4b92e70427ce 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2666,7 +2666,7 @@ def _reorder_ilevels(self, order) -> MultiIndex:
def _recode_for_new_levels(
self, new_levels, copy: bool = True
- ) -> Generator[np.ndarray]:
+ ) -> Generator[np.ndarray, None, None]:
if len(new_levels) > self.nlevels:
raise AssertionError(
f"Length of new_levels ({len(new_levels)}) "
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index d3ff7398864d5..dced92ba04520 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -388,7 +388,7 @@ def _split_op_result(self, result: ArrayLike) -> list[Block]:
return [nb]
@final
- def _split(self) -> Generator[Block]:
+ def _split(self) -> Generator[Block, None, None]:
"""
Split a block into a list of single-column blocks.
"""
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 2ee7d3948a70f..b96d5a59effce 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -250,7 +250,7 @@ def _concat_homogeneous_fastpath(
def _get_combined_plan(
mgrs: list[BlockManager],
-) -> Generator[tuple[BlockPlacement, list[JoinUnit]]]:
+) -> Generator[tuple[BlockPlacement, list[JoinUnit]], None, None]:
max_len = mgrs[0].shape[0]
blknos_list = [mgr.blknos for mgr in mgrs]
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index a3738bb25f56c..aa4a785519051 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -856,7 +856,7 @@ def _slice_take_blocks_ax0(
*,
use_na_proxy: bool = False,
ref_inplace_op: bool = False,
- ) -> Generator[Block]:
+ ) -> Generator[Block, None, None]:
"""
Slice/take blocks along axis=0.
@@ -1731,7 +1731,7 @@ def unstack(self, unstacker, fill_value) -> BlockManager:
bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False)
return bm
- def to_iter_dict(self) -> Generator[tuple[str, Self]]:
+ def to_iter_dict(self) -> Generator[tuple[str, Self], None, None]:
"""
Yield a tuple of (str(dtype), BlockManager)
diff --git a/pandas/core/methods/to_dict.py b/pandas/core/methods/to_dict.py
index aea95e4684573..84202a4fcc840 100644
--- a/pandas/core/methods/to_dict.py
+++ b/pandas/core/methods/to_dict.py
@@ -33,7 +33,7 @@
def create_data_for_split(
df: DataFrame, are_all_object_dtype_cols: bool, object_dtype_indices: list[int]
-) -> Generator[list]:
+) -> Generator[list, None, None]:
"""
Simple helper method to create data for to ``to_dict(orient="split")``
to create the main output data
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
index 62002e4844b32..f79417d11080d 100644
--- a/pandas/io/excel/_odfreader.py
+++ b/pandas/io/excel/_odfreader.py
@@ -142,7 +142,8 @@ def get_sheet_data(
empty_cells = 0
table_row.extend([value] * column_repeat)
- max_row_len = max(max_row_len, len(table_row))
+ if max_row_len < len(table_row):
+ max_row_len = len(table_row)
row_repeat = self._get_row_repeat(sheet_row)
if len(table_row) == 0:
diff --git a/pandas/io/formats/css.py b/pandas/io/formats/css.py
index 750c6dc1180e9..0af04526ea96d 100644
--- a/pandas/io/formats/css.py
+++ b/pandas/io/formats/css.py
@@ -36,7 +36,7 @@ def _side_expander(prop_fmt: str) -> Callable:
def expand(
self: CSSResolver, prop: str, value: str
- ) -> Generator[tuple[str, str]]:
+ ) -> Generator[tuple[str, str], None, None]:
"""
Expand shorthand property into side-specific property (top, right, bottom, left)
@@ -83,7 +83,7 @@ def _border_expander(side: str = "") -> Callable:
def expand(
self: CSSResolver, prop: str, value: str
- ) -> Generator[tuple[str, str]]:
+ ) -> Generator[tuple[str, str], None, None]:
"""
Expand border into color, style, and width tuples
@@ -392,7 +392,7 @@ def _error() -> str:
size_fmt = f"{val:f}pt"
return size_fmt
- def atomize(self, declarations: Iterable) -> Generator[tuple[str, str]]:
+ def atomize(self, declarations: Iterable) -> Generator[tuple[str, str], None, None]:
for prop, value in declarations:
prop = prop.lower()
value = value.lower()
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 5aecc6af712e5..9ad5ac83e9eae 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1024,7 +1024,7 @@ def save_to_buffer(
@contextmanager
def _get_buffer(
buf: FilePath | WriteBuffer[str] | None, encoding: str | None = None
-) -> Generator[WriteBuffer[str]] | Generator[StringIO]:
+) -> Generator[WriteBuffer[str], None, None] | Generator[StringIO, None, None]:
"""
Context manager to open, yield and close buffer for filenames or Path-like
objects, otherwise yield buf unchanged.
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index d424ba09804af..99dd06568fa01 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -1119,7 +1119,7 @@ def _query_iterator(
coerce_float: bool = True,
parse_dates=None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
- ) -> Generator[DataFrame]:
+ ) -> Generator[DataFrame, None, None]:
"""Return generator through chunked result set."""
has_read_data = False
with exit_stack:
@@ -1732,7 +1732,7 @@ def _query_iterator(
parse_dates=None,
dtype: DtypeArg | None = None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
- ) -> Generator[DataFrame]:
+ ) -> Generator[DataFrame, None, None]:
"""Return generator through chunked result set"""
has_read_data = False
with exit_stack:
@@ -2682,7 +2682,7 @@ def _query_iterator(
parse_dates=None,
dtype: DtypeArg | None = None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
- ) -> Generator[DataFrame]:
+ ) -> Generator[DataFrame, None, None]:
"""Return generator through chunked result set"""
has_read_data = False
while True:
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py
index 29f9b3ee116fc..fc63d65f1e160 100644
--- a/pandas/plotting/_matplotlib/converter.py
+++ b/pandas/plotting/_matplotlib/converter.py
@@ -92,7 +92,7 @@ def wrapper(*args, **kwargs):
@contextlib.contextmanager
-def pandas_converters() -> Generator[None]:
+def pandas_converters() -> Generator[None, None, None]:
"""
Context manager registering pandas' converters for a plot.
diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index d5624aecd1215..f9c370b2486fd 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -442,7 +442,7 @@ def handle_shared_axes(
_remove_labels_from_axis(ax.yaxis)
-def flatten_axes(axes: Axes | Iterable[Axes]) -> Generator[Axes]:
+def flatten_axes(axes: Axes | Iterable[Axes]) -> Generator[Axes, None, None]:
if not is_list_like(axes):
yield axes # type: ignore[misc]
elif isinstance(axes, (np.ndarray, ABCIndex)):
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index 1ebf1b16d419e..d8455f44ef0d1 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -704,7 +704,7 @@ def _get_canonical_key(self, key: str) -> str:
return self._ALIASES.get(key, key)
@contextmanager
- def use(self, key, value) -> Generator[_Options]:
+ def use(self, key, value) -> Generator[_Options, None, None]:
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index d48a2d3c9c027..d205569270705 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -274,7 +274,9 @@ def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box_with_array
expected = TimedeltaIndex(["3 Days", "36 Hours"])
if isinstance(three_days, np.timedelta64):
dtype = three_days.dtype
- dtype = max(dtype, np.dtype("m8[s]"))
+ if dtype < np.dtype("m8[s]"):
+ # i.e. resolution is lower -> use lowest supported resolution
+ dtype = np.dtype("m8[s]")
expected = expected.astype(dtype)
elif type(three_days) is timedelta:
expected = expected.astype("m8[us]")
diff --git a/pandas/util/_exceptions.py b/pandas/util/_exceptions.py
index f77f3f9083901..5f50838d37315 100644
--- a/pandas/util/_exceptions.py
+++ b/pandas/util/_exceptions.py
@@ -13,7 +13,7 @@
@contextlib.contextmanager
-def rewrite_exception(old_name: str, new_name: str) -> Generator[None]:
+def rewrite_exception(old_name: str, new_name: str) -> Generator[None, None, None]:
"""
Rewrite the message of an exception.
"""
@@ -66,7 +66,7 @@ def rewrite_warning(
target_category: type[Warning],
new_message: str,
new_category: type[Warning] | None = None,
-) -> Generator[None]:
+) -> Generator[None, None, None]:
"""
Rewrite the message of a warning.
From 29f4f89beeb62a904315bb9ff226bc9d63d6d1cf Mon Sep 17 00:00:00 2001
From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com>
Date: Tue, 3 Sep 2024 13:32:41 -0700
Subject: [PATCH 10/12] Ignore unnecessary-default-type-args
---
pyproject.toml | 2 ++
1 file changed, 2 insertions(+)
diff --git a/pyproject.toml b/pyproject.toml
index 645ded35f3d18..07b753b08fbc1 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -334,6 +334,8 @@ ignore = [
"RUF012",
# type-comparison
"E721",
+ # unnecessary-default-type-args
+ "UP043",
# Additional pylint rules
# literal-membership
From bba424200b30bbd7f9df1df9e342407be9cc0c1b Mon Sep 17 00:00:00 2001
From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com>
Date: Tue, 3 Sep 2024 13:40:38 -0700
Subject: [PATCH 11/12] redo formatting
---
doc/source/user_guide/style.ipynb | 687 ++++++++++++++----------
pandas/core/_numba/kernels/min_max_.py | 6 +-
pandas/core/arrays/string_.py | 2 +-
pandas/core/dtypes/cast.py | 8 +-
pandas/io/excel/_odfreader.py | 3 +-
pandas/tests/arithmetic/test_numeric.py | 4 +-
pyproject.toml | 4 +
7 files changed, 404 insertions(+), 310 deletions(-)
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index daecfce6ecebc..a38e8cf928916 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -46,7 +46,6 @@
},
"outputs": [],
"source": [
- "import matplotlib.pyplot\n",
"# We have this here to trigger matplotlib's font cache stuff.\n",
"# This cell is hidden from the output"
]
@@ -78,17 +77,15 @@
"source": [
"import pandas as pd\n",
"import numpy as np\n",
- "import matplotlib as mpl\n",
"\n",
- "df = pd.DataFrame({\n",
- " \"strings\": [\"Adam\", \"Mike\"],\n",
- " \"ints\": [1, 3],\n",
- " \"floats\": [1.123, 1000.23]\n",
- "})\n",
- "df.style \\\n",
- " .format(precision=3, thousands=\".\", decimal=\",\") \\\n",
- " .format_index(str.upper, axis=1) \\\n",
- " .relabel_index([\"row 1\", \"row 2\"], axis=0)"
+ "rng = np.random.default_rng(2)\n",
+ "\n",
+ "df = pd.DataFrame(\n",
+ " {\"strings\": [\"Adam\", \"Mike\"], \"ints\": [1, 3], \"floats\": [1.123, 1000.23]}\n",
+ ")\n",
+ "df.style.format(precision=3, thousands=\".\", decimal=\",\").format_index(\n",
+ " str.upper, axis=1\n",
+ ").relabel_index([\"row 1\", \"row 2\"], axis=0)"
]
},
{
@@ -104,17 +101,21 @@
"metadata": {},
"outputs": [],
"source": [
- "weather_df = pd.DataFrame(np.random.rand(10,2)*5, \n",
- " index=pd.date_range(start=\"2021-01-01\", periods=10),\n",
- " columns=[\"Tokyo\", \"Beijing\"])\n",
+ "weather_df = pd.DataFrame(\n",
+ " rng.standard_normal(10, 2) * 5,\n",
+ " index=pd.date_range(start=\"2021-01-01\", periods=10),\n",
+ " columns=[\"Tokyo\", \"Beijing\"],\n",
+ ")\n",
+ "\n",
"\n",
- "def rain_condition(v): \n",
+ "def rain_condition(v):\n",
" if v < 1.75:\n",
" return \"Dry\"\n",
" elif v < 2.75:\n",
" return \"Rain\"\n",
" return \"Heavy Rain\"\n",
"\n",
+ "\n",
"def make_pretty(styler):\n",
" styler.set_caption(\"Weather Conditions\")\n",
" styler.format(rain_condition)\n",
@@ -122,6 +123,7 @@
" styler.background_gradient(axis=None, vmin=1, vmax=5, cmap=\"YlGnBu\")\n",
" return styler\n",
"\n",
+ "\n",
"weather_df"
]
},
@@ -157,10 +159,8 @@
"metadata": {},
"outputs": [],
"source": [
- "df = pd.DataFrame(np.random.randn(5, 5))\n",
- "df.style \\\n",
- " .hide(subset=[0, 2, 4], axis=0) \\\n",
- " .hide(subset=[0, 2, 4], axis=1)"
+ "df = pd.DataFrame(rng.standard_normal(5, 5))\n",
+ "df.style.hide(subset=[0, 2, 4], axis=0).hide(subset=[0, 2, 4], axis=1)"
]
},
{
@@ -177,9 +177,9 @@
"outputs": [],
"source": [
"show = [0, 2, 4]\n",
- "df.style \\\n",
- " .hide([row for row in df.index if row not in show], axis=0) \\\n",
- " .hide([col for col in df.columns if col not in show], axis=1)"
+ "df.style.hide([row for row in df.index if row not in show], axis=0).hide(\n",
+ " [col for col in df.columns if col not in show], axis=1\n",
+ ")"
]
},
{
@@ -199,9 +199,9 @@
"metadata": {},
"outputs": [],
"source": [
- "summary_styler = df.agg([\"sum\", \"mean\"]).style \\\n",
- " .format(precision=3) \\\n",
- " .relabel_index([\"Sum\", \"Average\"])\n",
+ "summary_styler = (\n",
+ " df.agg([\"sum\", \"mean\"]).style.format(precision=3).relabel_index([\"Sum\", \"Average\"])\n",
+ ")\n",
"df.style.format(precision=1).concat(summary_styler)"
]
},
@@ -227,9 +227,16 @@
"metadata": {},
"outputs": [],
"source": [
- "df = pd.DataFrame([[38.0, 2.0, 18.0, 22.0, 21, np.nan],[19, 439, 6, 452, 226,232]], \n",
- " index=pd.Index(['Tumour (Positive)', 'Non-Tumour (Negative)'], name='Actual Label:'), \n",
- " columns=pd.MultiIndex.from_product([['Decision Tree', 'Regression', 'Random'],['Tumour', 'Non-Tumour']], names=['Model:', 'Predicted:']))\n",
+ "df = pd.DataFrame(\n",
+ " [[38.0, 2.0, 18.0, 22.0, 21, np.nan], [19, 439, 6, 452, 226, 232]],\n",
+ " index=pd.Index(\n",
+ " [\"Tumour (Positive)\", \"Non-Tumour (Negative)\"], name=\"Actual Label:\"\n",
+ " ),\n",
+ " columns=pd.MultiIndex.from_product(\n",
+ " [[\"Decision Tree\", \"Regression\", \"Random\"], [\"Tumour\", \"Non-Tumour\"]],\n",
+ " names=[\"Model:\", \"Predicted:\"],\n",
+ " ),\n",
+ ")\n",
"df.style"
]
},
@@ -242,63 +249,73 @@
"outputs": [],
"source": [
"# Hidden cell to just create the below example: code is covered throughout the guide.\n",
- "s = df.style\\\n",
- " .hide([('Random', 'Tumour'), ('Random', 'Non-Tumour')], axis='columns')\\\n",
- " .format('{:.0f}')\\\n",
- " .set_table_styles([{\n",
- " 'selector': '',\n",
- " 'props': 'border-collapse: separate;'\n",
- " },{\n",
- " 'selector': 'caption',\n",
- " 'props': 'caption-side: bottom; font-size:1.3em;'\n",
- " },{\n",
- " 'selector': '.index_name',\n",
- " 'props': 'font-style: italic; color: darkgrey; font-weight:normal;'\n",
- " },{\n",
- " 'selector': 'th:not(.index_name)',\n",
- " 'props': 'background-color: #000066; color: white;'\n",
- " },{\n",
- " 'selector': 'th.col_heading',\n",
- " 'props': 'text-align: center;'\n",
- " },{\n",
- " 'selector': 'th.col_heading.level0',\n",
- " 'props': 'font-size: 1.5em;'\n",
- " },{\n",
- " 'selector': 'th.col2',\n",
- " 'props': 'border-left: 1px solid white;'\n",
- " },{\n",
- " 'selector': '.col2',\n",
- " 'props': 'border-left: 1px solid #000066;'\n",
- " },{\n",
- " 'selector': 'td',\n",
- " 'props': 'text-align: center; font-weight:bold;'\n",
- " },{\n",
- " 'selector': '.true',\n",
- " 'props': 'background-color: #e6ffe6;'\n",
- " },{\n",
- " 'selector': '.false',\n",
- " 'props': 'background-color: #ffe6e6;'\n",
- " },{\n",
- " 'selector': '.border-red',\n",
- " 'props': 'border: 2px dashed red;'\n",
- " },{\n",
- " 'selector': '.border-green',\n",
- " 'props': 'border: 2px dashed green;'\n",
- " },{\n",
- " 'selector': 'td:hover',\n",
- " 'props': 'background-color: #ffffb3;'\n",
- " }])\\\n",
- " .set_td_classes(pd.DataFrame([['true border-green', 'false', 'true', 'false border-red', '', ''],\n",
- " ['false', 'true', 'false', 'true', '', '']], \n",
- " index=df.index, columns=df.columns))\\\n",
- " .set_caption(\"Confusion matrix for multiple cancer prediction models.\")\\\n",
- " .set_tooltips(pd.DataFrame([['This model has a very strong true positive rate', '', '', \"This model's total number of false negatives is too high\", '', ''],\n",
- " ['', '', '', '', '', '']], \n",
- " index=df.index, columns=df.columns),\n",
- " css_class='pd-tt', props=\n",
- " 'visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;'\n",
- " 'background-color: white; color: #000066; font-size: 0.8em;' \n",
- " 'transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;')\n"
+ "s = (\n",
+ " df.style.hide([(\"Random\", \"Tumour\"), (\"Random\", \"Non-Tumour\")], axis=\"columns\")\n",
+ " .format(\"{:.0f}\")\n",
+ " .set_table_styles(\n",
+ " [\n",
+ " {\"selector\": \"\", \"props\": \"border-collapse: separate;\"},\n",
+ " {\"selector\": \"caption\", \"props\": \"caption-side: bottom; font-size:1.3em;\"},\n",
+ " {\n",
+ " \"selector\": \".index_name\",\n",
+ " \"props\": \"font-style: italic; color: darkgrey; font-weight:normal;\",\n",
+ " },\n",
+ " {\n",
+ " \"selector\": \"th:not(.index_name)\",\n",
+ " \"props\": \"background-color: #000066; color: white;\",\n",
+ " },\n",
+ " {\"selector\": \"th.col_heading\", \"props\": \"text-align: center;\"},\n",
+ " {\"selector\": \"th.col_heading.level0\", \"props\": \"font-size: 1.5em;\"},\n",
+ " {\"selector\": \"th.col2\", \"props\": \"border-left: 1px solid white;\"},\n",
+ " {\"selector\": \".col2\", \"props\": \"border-left: 1px solid #000066;\"},\n",
+ " {\"selector\": \"td\", \"props\": \"text-align: center; font-weight:bold;\"},\n",
+ " {\"selector\": \".true\", \"props\": \"background-color: #e6ffe6;\"},\n",
+ " {\"selector\": \".false\", \"props\": \"background-color: #ffe6e6;\"},\n",
+ " {\"selector\": \".border-red\", \"props\": \"border: 2px dashed red;\"},\n",
+ " {\"selector\": \".border-green\", \"props\": \"border: 2px dashed green;\"},\n",
+ " {\"selector\": \"td:hover\", \"props\": \"background-color: #ffffb3;\"},\n",
+ " ]\n",
+ " )\n",
+ " .set_td_classes(\n",
+ " pd.DataFrame(\n",
+ " [\n",
+ " [\"true border-green\", \"false\", \"true\", \"false border-red\", \"\", \"\"],\n",
+ " [\"false\", \"true\", \"false\", \"true\", \"\", \"\"],\n",
+ " ],\n",
+ " index=df.index,\n",
+ " columns=df.columns,\n",
+ " )\n",
+ " )\n",
+ " .set_caption(\"Confusion matrix for multiple cancer prediction models.\")\n",
+ " .set_tooltips(\n",
+ " pd.DataFrame(\n",
+ " [\n",
+ " [\n",
+ " \"This model has a very strong true positive rate\",\n",
+ " \"\",\n",
+ " \"\",\n",
+ " \"This model's total number of false negatives is too high\",\n",
+ " \"\",\n",
+ " \"\",\n",
+ " ],\n",
+ " [\"\", \"\", \"\", \"\", \"\", \"\"],\n",
+ " ],\n",
+ " index=df.index,\n",
+ " columns=df.columns,\n",
+ " ),\n",
+ " css_class=\"pd-tt\",\n",
+ " props=\"visibility: hidden;\"\n",
+ " \"position: absolute;\"\n",
+ " \"z-index: 1;\"\n",
+ " \"border: 1px solid #000066;\"\n",
+ " \"background-color: white;\"\n",
+ " \"color: #000066;\"\n",
+ " \"font-size: 0.8em;\"\n",
+ " \"transform: translate(0px, -24px);\"\n",
+ " \"padding: 0.6em;\"\n",
+ " \"border-radius: 0.5em;\",\n",
+ " )\n",
+ ")"
]
},
{
@@ -325,7 +342,9 @@
"metadata": {},
"outputs": [],
"source": [
- "s = df.style.format('{:.0f}').hide([('Random', 'Tumour'), ('Random', 'Non-Tumour')], axis=\"columns\")\n",
+ "s = df.style.format(\"{:.0f}\").hide(\n",
+ " [(\"Random\", \"Tumour\"), (\"Random\", \"Non-Tumour\")], axis=\"columns\"\n",
+ ")\n",
"s"
]
},
@@ -337,8 +356,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s.set_uuid('after_hide')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s.set_uuid(\"after_hide\")"
]
},
{
@@ -395,16 +414,16 @@
"outputs": [],
"source": [
"cell_hover = { # for row hover use |
instead of \n",
- " 'selector': 'td:hover',\n",
- " 'props': [('background-color', '#ffffb3')]\n",
+ " \"selector\": \"td:hover\",\n",
+ " \"props\": [(\"background-color\", \"#ffffb3\")],\n",
"}\n",
"index_names = {\n",
- " 'selector': '.index_name',\n",
- " 'props': 'font-style: italic; color: darkgrey; font-weight:normal;'\n",
+ " \"selector\": \".index_name\",\n",
+ " \"props\": \"font-style: italic; color: darkgrey; font-weight:normal;\",\n",
"}\n",
"headers = {\n",
- " 'selector': 'th:not(.index_name)',\n",
- " 'props': 'background-color: #000066; color: white;'\n",
+ " \"selector\": \"th:not(.index_name)\",\n",
+ " \"props\": \"background-color: #000066; color: white;\",\n",
"}\n",
"s.set_table_styles([cell_hover, index_names, headers])"
]
@@ -417,8 +436,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s.set_uuid('after_tab_styles1')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s.set_uuid(\"after_tab_styles1\")"
]
},
{
@@ -434,11 +453,14 @@
"metadata": {},
"outputs": [],
"source": [
- "s.set_table_styles([\n",
- " {'selector': 'th.col_heading', 'props': 'text-align: center;'},\n",
- " {'selector': 'th.col_heading.level0', 'props': 'font-size: 1.5em;'},\n",
- " {'selector': 'td', 'props': 'text-align: center; font-weight: bold;'},\n",
- "], overwrite=False)"
+ "s.set_table_styles(\n",
+ " [\n",
+ " {\"selector\": \"th.col_heading\", \"props\": \"text-align: center;\"},\n",
+ " {\"selector\": \"th.col_heading.level0\", \"props\": \"font-size: 1.5em;\"},\n",
+ " {\"selector\": \"td\", \"props\": \"text-align: center; font-weight: bold;\"},\n",
+ " ],\n",
+ " overwrite=False,\n",
+ ")"
]
},
{
@@ -449,8 +471,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s.set_uuid('after_tab_styles2')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s.set_uuid(\"after_tab_styles2\")"
]
},
{
@@ -468,10 +490,16 @@
"metadata": {},
"outputs": [],
"source": [
- "s.set_table_styles({\n",
- " ('Regression', 'Tumour'): [{'selector': 'th', 'props': 'border-left: 1px solid white'},\n",
- " {'selector': 'td', 'props': 'border-left: 1px solid #000066'}]\n",
- "}, overwrite=False, axis=0)"
+ "s.set_table_styles(\n",
+ " {\n",
+ " (\"Regression\", \"Tumour\"): [\n",
+ " {\"selector\": \"th\", \"props\": \"border-left: 1px solid white\"},\n",
+ " {\"selector\": \"td\", \"props\": \"border-left: 1px solid #000066\"},\n",
+ " ]\n",
+ " },\n",
+ " overwrite=False,\n",
+ " axis=0,\n",
+ ")"
]
},
{
@@ -482,8 +510,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s.set_uuid('xyz01')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s.set_uuid(\"xyz01\")"
]
},
{
@@ -508,7 +536,7 @@
"outputs": [],
"source": [
"out = s.set_table_attributes('class=\"my-table-cls\"').to_html()\n",
- "print(out[out.find(' -0.3) else None)\n",
+ "\n",
+ "\n",
+ "s2 = df2.style.map(style_negative, props=\"color:red;\").map(\n",
+ " lambda v: \"opacity: 20%;\" if (v < 0.3) and (v > -0.3) else None\n",
+ ")\n",
"s2"
]
},
@@ -612,8 +646,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s2.set_uuid('after_applymap')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s2.set_uuid(\"after_applymap\")"
]
},
{
@@ -629,9 +663,11 @@
"metadata": {},
"outputs": [],
"source": [
- "def highlight_max(s, props=''):\n",
- " return np.where(s == np.nanmax(s.values), props, '')\n",
- "s2.apply(highlight_max, props='color:white;background-color:darkblue', axis=0)"
+ "def highlight_max(s, props=\"\"):\n",
+ " return np.where(s == np.nanmax(s.values), props, \"\")\n",
+ "\n",
+ "\n",
+ "s2.apply(highlight_max, props=\"color:white;background-color:darkblue\", axis=0)"
]
},
{
@@ -642,8 +678,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s2.set_uuid('after_apply')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s2.set_uuid(\"after_apply\")"
]
},
{
@@ -659,8 +695,9 @@
"metadata": {},
"outputs": [],
"source": [
- "s2.apply(highlight_max, props='color:white;background-color:pink;', axis=1)\\\n",
- " .apply(highlight_max, props='color:white;background-color:purple', axis=None)"
+ "s2.apply(highlight_max, props=\"color:white;background-color:pink;\", axis=1).apply(\n",
+ " highlight_max, props=\"color:white;background-color:purple\", axis=None\n",
+ ")"
]
},
{
@@ -671,8 +708,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s2.set_uuid('after_apply_again')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s2.set_uuid(\"after_apply_again\")"
]
},
{
@@ -713,8 +750,10 @@
"metadata": {},
"outputs": [],
"source": [
- "s2.map_index(lambda v: \"color:pink;\" if v>4 else \"color:darkblue;\", axis=0)\n",
- "s2.apply_index(lambda s: np.where(s.isin([\"A\", \"B\"]), \"color:pink;\", \"color:darkblue;\"), axis=1)"
+ "s2.map_index(lambda v: \"color:pink;\" if v > 4 else \"color:darkblue;\", axis=0)\n",
+ "s2.apply_index(\n",
+ " lambda s: np.where(s.isin([\"A\", \"B\"]), \"color:pink;\", \"color:darkblue;\"), axis=1\n",
+ ")"
]
},
{
@@ -734,11 +773,12 @@
"metadata": {},
"outputs": [],
"source": [
- "s.set_caption(\"Confusion matrix for multiple cancer prediction models.\")\\\n",
- " .set_table_styles([{\n",
- " 'selector': 'caption',\n",
- " 'props': 'caption-side: bottom; font-size:1.25em;'\n",
- " }], overwrite=False)"
+ "s.set_caption(\n",
+ " \"Confusion matrix for multiple cancer prediction models.\"\n",
+ ").set_table_styles(\n",
+ " [{\"selector\": \"caption\", \"props\": \"caption-side: bottom; font-size:1.25em;\"}],\n",
+ " overwrite=False,\n",
+ ")"
]
},
{
@@ -749,8 +789,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s.set_uuid('after_caption')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s.set_uuid(\"after_caption\")"
]
},
{
@@ -768,12 +808,29 @@
"metadata": {},
"outputs": [],
"source": [
- "tt = pd.DataFrame([['This model has a very strong true positive rate', \n",
- " \"This model's total number of false negatives is too high\"]], \n",
- " index=['Tumour (Positive)'], columns=df.columns[[0,3]])\n",
- "s.set_tooltips(tt, props='visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;'\n",
- " 'background-color: white; color: #000066; font-size: 0.8em;' \n",
- " 'transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;')"
+ "tt = pd.DataFrame(\n",
+ " [\n",
+ " [\n",
+ " \"This model has a very strong true positive rate\",\n",
+ " \"This model's total number of false negatives is too high\",\n",
+ " ]\n",
+ " ],\n",
+ " index=[\"Tumour (Positive)\"],\n",
+ " columns=df.columns[[0, 3]],\n",
+ ")\n",
+ "s.set_tooltips(\n",
+ " tt,\n",
+ " props=\"visibility: hidden;\"\n",
+ " \"position: absolute;\"\n",
+ " \"z-index: 1;\"\n",
+ " \"border: 1px solid #000066;\"\n",
+ " \"background-color: white;\"\n",
+ " \"color: #000066;\"\n",
+ " \"font-size: 0.8em;\"\n",
+ " \"transform: translate(0px, -24px);\"\n",
+ " \"padding: 0.6em;\"\n",
+ " \"border-radius: 0.5em;\",\n",
+ ")"
]
},
{
@@ -784,8 +841,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s.set_uuid('after_tooltips')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s.set_uuid(\"after_tooltips\")"
]
},
{
@@ -801,14 +858,18 @@
"metadata": {},
"outputs": [],
"source": [
- "s.set_table_styles([ # create internal CSS classes\n",
- " {'selector': '.border-red', 'props': 'border: 2px dashed red;'},\n",
- " {'selector': '.border-green', 'props': 'border: 2px dashed green;'},\n",
- "], overwrite=False)\n",
- "cell_border = pd.DataFrame([['border-green ', ' ', ' ', 'border-red '], \n",
- " [' ', ' ', ' ', ' ']], \n",
- " index=df.index, \n",
- " columns=df.columns[:4])\n",
+ "s.set_table_styles(\n",
+ " [ # create internal CSS classes\n",
+ " {\"selector\": \".border-red\", \"props\": \"border: 2px dashed red;\"},\n",
+ " {\"selector\": \".border-green\", \"props\": \"border: 2px dashed green;\"},\n",
+ " ],\n",
+ " overwrite=False,\n",
+ ")\n",
+ "cell_border = pd.DataFrame(\n",
+ " [[\"border-green \", \" \", \" \", \"border-red \"], [\" \", \" \", \" \", \" \"]],\n",
+ " index=df.index,\n",
+ " columns=df.columns[:4],\n",
+ ")\n",
"s.set_td_classes(cell_color + cell_border)"
]
},
@@ -820,8 +881,8 @@
},
"outputs": [],
"source": [
- "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting \n",
- "s.set_uuid('after_borders')"
+ "# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting\n",
+ "s.set_uuid(\"after_borders\")"
]
},
{
@@ -847,9 +908,11 @@
"metadata": {},
"outputs": [],
"source": [
- "df3 = pd.DataFrame(np.random.randn(4,4), \n",
- " pd.MultiIndex.from_product([['A', 'B'], ['r1', 'r2']]),\n",
- " columns=['c1','c2','c3','c4'])\n",
+ "df3 = pd.DataFrame(\n",
+ " rng.standard_normal(4, 4),\n",
+ " pd.MultiIndex.from_product([[\"A\", \"B\"], [\"r1\", \"r2\"]]),\n",
+ " columns=[\"c1\", \"c2\", \"c3\", \"c4\"],\n",
+ ")\n",
"df3"
]
},
@@ -866,9 +929,10 @@
"metadata": {},
"outputs": [],
"source": [
- "slice_ = ['c3', 'c4']\n",
- "df3.style.apply(highlight_max, props='color:red;', axis=0, subset=slice_)\\\n",
- " .set_properties(**{'background-color': '#ffffb3'}, subset=slice_)"
+ "slice_ = [\"c3\", \"c4\"]\n",
+ "df3.style.apply(\n",
+ " highlight_max, props=\"color:red;\", axis=0, subset=slice_\n",
+ ").set_properties(**{\"background-color\": \"#ffffb3\"}, subset=slice_)"
]
},
{
@@ -885,9 +949,10 @@
"outputs": [],
"source": [
"idx = pd.IndexSlice\n",
- "slice_ = idx[idx[:,'r1'], idx['c2':'c4']]\n",
- "df3.style.apply(highlight_max, props='color:red;', axis=0, subset=slice_)\\\n",
- " .set_properties(**{'background-color': '#ffffb3'}, subset=slice_)"
+ "slice_ = idx[idx[:, \"r1\"], idx[\"c2\":\"c4\"]]\n",
+ "df3.style.apply(\n",
+ " highlight_max, props=\"color:red;\", axis=0, subset=slice_\n",
+ ").set_properties(**{\"background-color\": \"#ffffb3\"}, subset=slice_)"
]
},
{
@@ -903,9 +968,10 @@
"metadata": {},
"outputs": [],
"source": [
- "slice_ = idx[idx[:,'r2'], :]\n",
- "df3.style.apply(highlight_max, props='color:red;', axis=1, subset=slice_)\\\n",
- " .set_properties(**{'background-color': '#ffffb3'}, subset=slice_)"
+ "slice_ = idx[idx[:, \"r2\"], :]\n",
+ "df3.style.apply(\n",
+ " highlight_max, props=\"color:red;\", axis=1, subset=slice_\n",
+ ").set_properties(**{\"background-color\": \"#ffffb3\"}, subset=slice_)"
]
},
{
@@ -923,9 +989,10 @@
"metadata": {},
"outputs": [],
"source": [
- "slice_ = idx[idx[(df3['c1'] + df3['c3']) < -2.0], ['c2', 'c4']]\n",
- "df3.style.apply(highlight_max, props='color:red;', axis=1, subset=slice_)\\\n",
- " .set_properties(**{'background-color': '#ffffb3'}, subset=slice_)"
+ "slice_ = idx[idx[(df3[\"c1\"] + df3[\"c3\"]) < -2.0], [\"c2\", \"c4\"]]\n",
+ "df3.style.apply(\n",
+ " highlight_max, props=\"color:red;\", axis=1, subset=slice_\n",
+ ").set_properties(**{\"background-color\": \"#ffffb3\"}, subset=slice_)"
]
},
{
@@ -981,7 +1048,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df4 = pd.DataFrame([[1,2],[3,4]])\n",
+ "df4 = pd.DataFrame([[1, 2], [3, 4]])\n",
"s4 = df4.style"
]
},
@@ -1003,6 +1070,7 @@
"outputs": [],
"source": [
"from pandas.io.formats.style import Styler\n",
+ "\n",
"s4 = Styler(df4, uuid_len=0, cell_ids=False)"
]
},
@@ -1053,7 +1121,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.set_table_styles([{'selector': 'td.col1', 'props': props}])"
+ "df4.style.set_table_styles([{\"selector\": \"td.col1\", \"props\": props}])"
]
},
{
@@ -1082,9 +1150,11 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.style.apply(highlight_max, props='color:white;background-color:darkblue;', axis=0)\\\n",
- " .apply(highlight_max, props='color:white;background-color:pink;', axis=1)\\\n",
- " .apply(highlight_max, props='color:white;background-color:purple', axis=None)"
+ "df2.style.apply(\n",
+ " highlight_max, props=\"color:white;background-color:darkblue;\", axis=0\n",
+ ").apply(highlight_max, props=\"color:white;background-color:pink;\", axis=1).apply(\n",
+ " highlight_max, props=\"color:white;background-color:purple\", axis=None\n",
+ ")"
]
},
{
@@ -1105,14 +1175,18 @@
"outputs": [],
"source": [
"build = lambda x: pd.DataFrame(x, index=df2.index, columns=df2.columns)\n",
- "cls1 = build(df2.apply(highlight_max, props='cls-1 ', axis=0))\n",
- "cls2 = build(df2.apply(highlight_max, props='cls-2 ', axis=1, result_type='expand').values)\n",
- "cls3 = build(highlight_max(df2, props='cls-3 '))\n",
- "df2.style.set_table_styles([\n",
- " {'selector': '.cls-1', 'props': 'color:white;background-color:darkblue;'},\n",
- " {'selector': '.cls-2', 'props': 'color:white;background-color:pink;'},\n",
- " {'selector': '.cls-3', 'props': 'color:white;background-color:purple;'}\n",
- "]).set_td_classes(cls1 + cls2 + cls3)"
+ "cls1 = build(df2.apply(highlight_max, props=\"cls-1 \", axis=0))\n",
+ "cls2 = build(\n",
+ " df2.apply(highlight_max, props=\"cls-2 \", axis=1, result_type=\"expand\").values\n",
+ ")\n",
+ "cls3 = build(highlight_max(df2, props=\"cls-3 \"))\n",
+ "df2.style.set_table_styles(\n",
+ " [\n",
+ " {\"selector\": \".cls-1\", \"props\": \"color:white;background-color:darkblue;\"},\n",
+ " {\"selector\": \".cls-2\", \"props\": \"color:white;background-color:pink;\"},\n",
+ " {\"selector\": \".cls-3\", \"props\": \"color:white;background-color:purple;\"},\n",
+ " ]\n",
+ ").set_td_classes(cls1 + cls2 + cls3)"
]
},
{
@@ -1152,10 +1226,14 @@
" \"blank\": \"\",\n",
"}\n",
"html = Styler(df4, uuid_len=0, cell_ids=False)\n",
- "html.set_table_styles([{'selector': 'td', 'props': props},\n",
- " {'selector': '.c1', 'props': 'color:green;'},\n",
- " {'selector': '.l0', 'props': 'color:blue;'}],\n",
- " css_class_names=my_css)\n",
+ "html.set_table_styles(\n",
+ " [\n",
+ " {\"selector\": \"td\", \"props\": props},\n",
+ " {\"selector\": \".c1\", \"props\": \"color:green;\"},\n",
+ " {\"selector\": \".l0\", \"props\": \"color:blue;\"},\n",
+ " ],\n",
+ " css_class_names=my_css,\n",
+ ")\n",
"print(html.to_html())"
]
},
@@ -1213,9 +1291,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.iloc[0,2] = np.nan\n",
- "df2.iloc[4,3] = np.nan\n",
- "df2.loc[:4].style.highlight_null(color='yellow')"
+ "df2.iloc[0, 2] = np.nan\n",
+ "df2.iloc[4, 3] = np.nan\n",
+ "df2.loc[:4].style.highlight_null(color=\"yellow\")"
]
},
{
@@ -1231,7 +1309,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.loc[:4].style.highlight_max(axis=1, props='color:white; font-weight:bold; background-color:darkblue;')"
+ "df2.loc[:4].style.highlight_max(\n",
+ " axis=1, props=\"color:white; font-weight:bold; background-color:darkblue;\"\n",
+ ")"
]
},
{
@@ -1249,7 +1329,9 @@
"outputs": [],
"source": [
"left = pd.Series([1.0, 0.0, 1.0], index=[\"A\", \"B\", \"D\"])\n",
- "df2.loc[:4].style.highlight_between(left=left, right=1.5, axis=1, props='color:white; background-color:purple;')"
+ "df2.loc[:4].style.highlight_between(\n",
+ " left=left, right=1.5, axis=1, props=\"color:white; background-color:purple;\"\n",
+ ")"
]
},
{
@@ -1266,7 +1348,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.loc[:4].style.highlight_quantile(q_left=0.85, axis=None, color='yellow')"
+ "df2.loc[:4].style.highlight_quantile(q_left=0.85, axis=None, color=\"yellow\")"
]
},
{
@@ -1290,6 +1372,7 @@
"outputs": [],
"source": [
"import seaborn as sns\n",
+ "\n",
"cm = sns.light_palette(\"green\", as_cmap=True)\n",
"\n",
"df2.style.background_gradient(cmap=cm)"
@@ -1329,9 +1412,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.loc[:4].style.set_properties(**{'background-color': 'black',\n",
- " 'color': 'lawngreen',\n",
- " 'border-color': 'white'})"
+ "df2.loc[:4].style.set_properties(\n",
+ " **{\"background-color\": \"black\", \"color\": \"lawngreen\", \"border-color\": \"white\"}\n",
+ ")"
]
},
{
@@ -1354,7 +1437,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.style.bar(subset=['A', 'B'], color='#d65f5f')"
+ "df2.style.bar(subset=[\"A\", \"B\"], color=\"#d65f5f\")"
]
},
{
@@ -1372,10 +1455,15 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.style.format('{:.3f}', na_rep=\"\")\\\n",
- " .bar(align=0, vmin=-2.5, vmax=2.5, cmap=\"bwr\", height=50,\n",
- " width=60, props=\"width: 120px; border-right: 1px solid black;\")\\\n",
- " .text_gradient(cmap=\"bwr\", vmin=-2.5, vmax=2.5)"
+ "df2.style.format(\"{:.3f}\", na_rep=\"\").bar(\n",
+ " align=0,\n",
+ " vmin=-2.5,\n",
+ " vmax=2.5,\n",
+ " cmap=\"bwr\",\n",
+ " height=50,\n",
+ " width=60,\n",
+ " props=\"width: 120px; border-right: 1px solid black;\",\n",
+ ").text_gradient(cmap=\"bwr\", vmin=-2.5, vmax=2.5)"
]
},
{
@@ -1398,10 +1486,10 @@
"from IPython.display import HTML\n",
"\n",
"# Test series\n",
- "test1 = pd.Series([-100,-60,-30,-20], name='All Negative')\n",
- "test2 = pd.Series([-10,-5,0,90], name='Both Pos and Neg')\n",
- "test3 = pd.Series([10,20,50,100], name='All Positive')\n",
- "test4 = pd.Series([100, 103, 101, 102], name='Large Positive')\n",
+ "test1 = pd.Series([-100, -60, -30, -20], name=\"All Negative\")\n",
+ "test2 = pd.Series([-10, -5, 0, 90], name=\"Both Pos and Neg\")\n",
+ "test3 = pd.Series([10, 20, 50, 100], name=\"All Positive\")\n",
+ "test4 = pd.Series([100, 103, 101, 102], name=\"Large Positive\")\n",
"\n",
"\n",
"head = \"\"\"\n",
@@ -1417,19 +1505,22 @@
"\n",
"\"\"\"\n",
"\n",
- "aligns = ['left', 'right', 'zero', 'mid', 'mean', 99]\n",
+ "aligns = [\"left\", \"right\", \"zero\", \"mid\", \"mean\", 99]\n",
"for align in aligns:\n",
" row = \"| {} | \".format(align)\n",
- " for series in [test1,test2,test3, test4]:\n",
+ " for series in [test1, test2, test3, test4]:\n",
" s = series.copy()\n",
- " s.name=''\n",
- " row += \"{} | \".format(s.to_frame().style.hide(axis='index').bar(align=align, \n",
- " color=['#d65f5f', '#5fba7d'], \n",
- " width=100).to_html()) #testn['width']\n",
- " row += ' '\n",
+ " s.name = \"\"\n",
+ " row += \"{} | \".format(\n",
+ " s.to_frame()\n",
+ " .style.hide(axis=\"index\")\n",
+ " .bar(align=align, color=[\"#d65f5f\", \"#5fba7d\"], width=100)\n",
+ " .to_html()\n",
+ " ) # testn['width']\n",
+ " row += \"\"\n",
" head += row\n",
- " \n",
- "head+= \"\"\"\n",
+ "\n",
+ "head += \"\"\"\n",
"\n",
" \"\"\""
]
@@ -1463,11 +1554,12 @@
"metadata": {},
"outputs": [],
"source": [
- "style1 = df2.style\\\n",
- " .map(style_negative, props='color:red;')\\\n",
- " .map(lambda v: 'opacity: 20%;' if (v < 0.3) and (v > -0.3) else None)\\\n",
- " .set_table_styles([{\"selector\": \"th\", \"props\": \"color: blue;\"}])\\\n",
- " .hide(axis=\"index\")\n",
+ "style1 = (\n",
+ " df2.style.map(style_negative, props=\"color:red;\")\n",
+ " .map(lambda v: \"opacity: 20%;\" if (v < 0.3) and (v > -0.3) else None)\n",
+ " .set_table_styles([{\"selector\": \"th\", \"props\": \"color: blue;\"}])\n",
+ " .hide(axis=\"index\")\n",
+ ")\n",
"style1"
]
},
@@ -1526,11 +1618,14 @@
"outputs": [],
"source": [
"from ipywidgets import widgets\n",
+ "\n",
+ "\n",
"@widgets.interact\n",
- "def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0., 99.9), l=(0., 99.9)):\n",
+ "def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0.0, 99.9), l_var=(0.0, 99.9)):\n",
" return df2.style.background_gradient(\n",
- " cmap=sns.palettes.diverging_palette(h_neg=h_neg, h_pos=h_pos, s=s, l=l,\n",
- " as_cmap=True)\n",
+ " cmap=sns.palettes.diverging_palette(\n",
+ " h_neg=h_neg, h_pos=h_pos, s=s, l=l_var, as_cmap=True\n",
+ " )\n",
" )"
]
},
@@ -1548,16 +1643,15 @@
"outputs": [],
"source": [
"def magnify():\n",
- " return [dict(selector=\"th\",\n",
- " props=[(\"font-size\", \"4pt\")]),\n",
- " dict(selector=\"td\",\n",
- " props=[('padding', \"0em 0em\")]),\n",
- " dict(selector=\"th:hover\",\n",
- " props=[(\"font-size\", \"12pt\")]),\n",
- " dict(selector=\"tr:hover td:hover\",\n",
- " props=[('max-width', '200px'),\n",
- " ('font-size', '12pt')])\n",
- "]"
+ " return [\n",
+ " {\"selector\": \"th\", \"props\": [(\"font-size\", \"4pt\")]},\n",
+ " {\"selector\": \"td\", \"props\": [(\"padding\", \"0em 0em\")]},\n",
+ " {\"selector\": \"th:hover\", \"props\": [(\"font-size\", \"12pt\")]},\n",
+ " {\n",
+ " \"selector\": \"tr:hover td:hover\",\n",
+ " \"props\": [(\"max-width\", \"200px\"), (\"font-size\", \"12pt\")],\n",
+ " },\n",
+ " ]"
]
},
{
@@ -1566,15 +1660,12 @@
"metadata": {},
"outputs": [],
"source": [
- "np.random.seed(25)\n",
- "cmap = cmap=sns.diverging_palette(5, 250, as_cmap=True)\n",
- "bigdf = pd.DataFrame(np.random.randn(20, 25)).cumsum()\n",
+ "cmap = sns.diverging_palette(5, 250, as_cmap=True)\n",
+ "bigdf = pd.DataFrame(rng.standard_normal(20, 25)).cumsum()\n",
"\n",
- "bigdf.style.background_gradient(cmap, axis=1)\\\n",
- " .set_properties(**{'max-width': '80px', 'font-size': '1pt'})\\\n",
- " .set_caption(\"Hover to magnify\")\\\n",
- " .format(precision=2)\\\n",
- " .set_table_styles(magnify())"
+ "bigdf.style.background_gradient(cmap, axis=1).set_properties(\n",
+ " **{\"max-width\": \"80px\", \"font-size\": \"1pt\"}\n",
+ ").set_caption(\"Hover to magnify\").format(precision=2).set_table_styles(magnify())"
]
},
{
@@ -1594,7 +1685,7 @@
"metadata": {},
"outputs": [],
"source": [
- "bigdf = pd.DataFrame(np.random.randn(16, 100))\n",
+ "bigdf = pd.DataFrame(rng.standard_normal(16, 100))\n",
"bigdf.style.set_sticky(axis=\"index\")"
]
},
@@ -1611,8 +1702,8 @@
"metadata": {},
"outputs": [],
"source": [
- "bigdf.index = pd.MultiIndex.from_product([[\"A\",\"B\"],[0,1],[0,1,2,3]])\n",
- "bigdf.style.set_sticky(axis=\"index\", pixel_size=18, levels=[1,2])"
+ "bigdf.index = pd.MultiIndex.from_product([[\"A\", \"B\"], [0, 1], [0, 1, 2, 3]])\n",
+ "bigdf.style.set_sticky(axis=\"index\", pixel_size=18, levels=[1, 2])"
]
},
{
@@ -1632,7 +1723,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df4 = pd.DataFrame([['', '\"&other\"', '']])\n",
+ "df4 = pd.DataFrame([[\"\", '\"&other\"', \"\"]])\n",
"df4.style"
]
},
@@ -1651,7 +1742,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.format('{}', escape=\"html\")"
+ "df4.style.format(\n",
+ " '{}', escape=\"html\"\n",
+ ")"
]
},
{
@@ -1693,10 +1786,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.style.\\\n",
- " map(style_negative, props='color:red;').\\\n",
- " highlight_max(axis=0).\\\n",
- " to_excel('styled.xlsx', engine='openpyxl')"
+ "df2.style.map(style_negative, props=\"color:red;\").highlight_max(axis=0).to_excel(\n",
+ " \"styled.xlsx\", engine=\"openpyxl\"\n",
+ ")"
]
},
{
@@ -1765,7 +1857,11 @@
"metadata": {},
"outputs": [],
"source": [
- "print(pd.DataFrame([[1,2],[3,4]], index=['i1', 'i2'], columns=['c1', 'c2']).style.to_html())"
+ "print(\n",
+ " pd.DataFrame(\n",
+ " [[1, 2], [3, 4]], index=[\"i1\", \"i2\"], columns=[\"c1\", \"c2\"]\n",
+ " ).style.to_html()\n",
+ ")"
]
},
{
@@ -1783,9 +1879,8 @@
"metadata": {},
"outputs": [],
"source": [
- "df4 = pd.DataFrame([['text']])\n",
- "df4.style.map(lambda x: 'color:green;')\\\n",
- " .map(lambda x: 'color:red;')"
+ "df4 = pd.DataFrame([[\"text\"]])\n",
+ "df4.style.map(lambda x: \"color:green;\").map(lambda x: \"color:red;\")"
]
},
{
@@ -1794,8 +1889,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.map(lambda x: 'color:red;')\\\n",
- " .map(lambda x: 'color:green;')"
+ "df4.style.map(lambda x: \"color:red;\").map(lambda x: \"color:green;\")"
]
},
{
@@ -1820,9 +1914,9 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.set_uuid('a_')\\\n",
- " .set_table_styles([{'selector': 'td', 'props': 'color:red;'}])\\\n",
- " .map(lambda x: 'color:green;')"
+ "df4.style.set_uuid(\"a_\").set_table_styles(\n",
+ " [{\"selector\": \"td\", \"props\": \"color:red;\"}]\n",
+ ").map(lambda x: \"color:green;\")"
]
},
{
@@ -1838,11 +1932,12 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.set_uuid('b_')\\\n",
- " .set_table_styles([{'selector': 'td', 'props': 'color:red;'},\n",
- " {'selector': '.cls-1', 'props': 'color:blue;'}])\\\n",
- " .map(lambda x: 'color:green;')\\\n",
- " .set_td_classes(pd.DataFrame([['cls-1']]))"
+ "df4.style.set_uuid(\"b_\").set_table_styles(\n",
+ " [\n",
+ " {\"selector\": \"td\", \"props\": \"color:red;\"},\n",
+ " {\"selector\": \".cls-1\", \"props\": \"color:blue;\"},\n",
+ " ]\n",
+ ").map(lambda x: \"color:green;\").set_td_classes(pd.DataFrame([[\"cls-1\"]]))"
]
},
{
@@ -1858,12 +1953,13 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.set_uuid('c_')\\\n",
- " .set_table_styles([{'selector': 'td', 'props': 'color:red;'},\n",
- " {'selector': '.cls-1', 'props': 'color:blue;'},\n",
- " {'selector': 'td.data', 'props': 'color:yellow;'}])\\\n",
- " .map(lambda x: 'color:green;')\\\n",
- " .set_td_classes(pd.DataFrame([['cls-1']]))"
+ "df4.style.set_uuid(\"c_\").set_table_styles(\n",
+ " [\n",
+ " {\"selector\": \"td\", \"props\": \"color:red;\"},\n",
+ " {\"selector\": \".cls-1\", \"props\": \"color:blue;\"},\n",
+ " {\"selector\": \"td.data\", \"props\": \"color:yellow;\"},\n",
+ " ]\n",
+ ").map(lambda x: \"color:green;\").set_td_classes(pd.DataFrame([[\"cls-1\"]]))"
]
},
{
@@ -1881,12 +1977,13 @@
"metadata": {},
"outputs": [],
"source": [
- "df4.style.set_uuid('d_')\\\n",
- " .set_table_styles([{'selector': 'td', 'props': 'color:red;'},\n",
- " {'selector': '.cls-1', 'props': 'color:blue;'},\n",
- " {'selector': 'td.data', 'props': 'color:yellow;'}])\\\n",
- " .map(lambda x: 'color:green !important;')\\\n",
- " .set_td_classes(pd.DataFrame([['cls-1']]))"
+ "df4.style.set_uuid(\"d_\").set_table_styles(\n",
+ " [\n",
+ " {\"selector\": \"td\", \"props\": \"color:red;\"},\n",
+ " {\"selector\": \".cls-1\", \"props\": \"color:blue;\"},\n",
+ " {\"selector\": \"td.data\", \"props\": \"color:yellow;\"},\n",
+ " ]\n",
+ ").map(lambda x: \"color:green !important;\").set_td_classes(pd.DataFrame([[\"cls-1\"]]))"
]
},
{
@@ -1940,8 +2037,8 @@
"metadata": {},
"outputs": [],
"source": [
- "with open(\"templates/myhtml.tpl\") as f:\n",
- " print(f.read())"
+ "with open(\"templates/myhtml.tpl\") as f_tpl:\n",
+ " print(f_tpl.read())"
]
},
{
@@ -1960,10 +2057,12 @@
"source": [
"class MyStyler(Styler):\n",
" env = Environment(\n",
- " loader=ChoiceLoader([\n",
- " FileSystemLoader(\"templates\"), # contains ours\n",
- " Styler.loader, # the default\n",
- " ])\n",
+ " loader=ChoiceLoader(\n",
+ " [\n",
+ " FileSystemLoader(\"templates\"), # contains ours\n",
+ " Styler.loader, # the default\n",
+ " ]\n",
+ " )\n",
" )\n",
" template_html_table = env.get_template(\"myhtml.tpl\")"
]
@@ -2045,8 +2144,8 @@
},
"outputs": [],
"source": [
- "with open(\"templates/html_style_structure.html\") as f:\n",
- " style_structure = f.read()"
+ "with open(\"templates/html_style_structure.html\") as f_style:\n",
+ " style_structure = f_style.read()"
]
},
{
@@ -2073,8 +2172,8 @@
},
"outputs": [],
"source": [
- "with open(\"templates/html_table_structure.html\") as f:\n",
- " table_structure = f.read()"
+ "with open(\"templates/html_table_structure.html\") as f_table:\n",
+ " table_structure = f_table.read()"
]
},
{
@@ -2106,7 +2205,7 @@
"# from IPython.display import HTML\n",
"# with open(\"themes/nature_with_gtoc/static/nature.css_t\") as f:\n",
"# css = f.read()\n",
- " \n",
+ "\n",
"# HTML(''.format(css))"
]
}
diff --git a/pandas/core/_numba/kernels/min_max_.py b/pandas/core/_numba/kernels/min_max_.py
index 59d36732ebae6..6e57e62c13a6e 100644
--- a/pandas/core/_numba/kernels/min_max_.py
+++ b/pandas/core/_numba/kernels/min_max_.py
@@ -112,11 +112,9 @@ def grouped_min_max(
continue
if is_max:
- if val > output[lab]:
- output[lab] = val
+ output[lab] = max(val, output[lab])
else:
- if val < output[lab]:
- output[lab] = val
+ output[lab] = min(val, output[lab])
# Set labels that don't satisfy min_periods as np.nan
for lab, count in enumerate(nobs):
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index 143a13c54dbbb..6b0e0bfc0318a 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -188,7 +188,7 @@ def __eq__(self, other: object) -> bool:
# cannot be checked with normal `==`
if isinstance(other, str):
# TODO should dtype == "string" work for the NaN variant?
- if other == "string" or other == self.name: # noqa: PLR1714
+ if other == "string" or other == self.name:
return True
try:
other = self.construct_from_string(other)
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 6ba07b1761557..9b780e512a11d 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -683,9 +683,7 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan):
elif dtype.kind == "f":
mst = np.min_scalar_type(fill_value)
- if mst > dtype:
- # e.g. mst is np.float64 and dtype is np.float32
- dtype = mst
+ dtype = max(mst, dtype)
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
@@ -718,9 +716,7 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan):
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
- if mst > dtype:
- # e.g. mst is np.complex128 and dtype is np.complex64
- dtype = mst
+ dtype = max(mst, dtype)
else:
dtype = np.dtype(np.object_)
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
index f79417d11080d..62002e4844b32 100644
--- a/pandas/io/excel/_odfreader.py
+++ b/pandas/io/excel/_odfreader.py
@@ -142,8 +142,7 @@ def get_sheet_data(
empty_cells = 0
table_row.extend([value] * column_repeat)
- if max_row_len < len(table_row):
- max_row_len = len(table_row)
+ max_row_len = max(max_row_len, len(table_row))
row_repeat = self._get_row_repeat(sheet_row)
if len(table_row) == 0:
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index d205569270705..d48a2d3c9c027 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -274,9 +274,7 @@ def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box_with_array
expected = TimedeltaIndex(["3 Days", "36 Hours"])
if isinstance(three_days, np.timedelta64):
dtype = three_days.dtype
- if dtype < np.dtype("m8[s]"):
- # i.e. resolution is lower -> use lowest supported resolution
- dtype = np.dtype("m8[s]")
+ dtype = max(dtype, np.dtype("m8[s]"))
expected = expected.astype(dtype)
elif type(three_days) is timedelta:
expected = expected.astype("m8[us]")
diff --git a/pyproject.toml b/pyproject.toml
index 07b753b08fbc1..02636196ea64a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -336,6 +336,10 @@ ignore = [
"E721",
# unnecessary-default-type-args
"UP043",
+ # repeated-equality-comparison
+ "PLR1714",
+ # self-or-cls-assignment
+ "PLW0642",
# Additional pylint rules
# literal-membership
From b5850eeb60290e052d20c1a55a24798eb6787a68 Mon Sep 17 00:00:00 2001
From: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com>
Date: Tue, 3 Sep 2024 14:18:12 -0700
Subject: [PATCH 12/12] Fix standard_normal call
---
doc/source/user_guide/style.ipynb | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index a38e8cf928916..0ea6d350b26d3 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -102,7 +102,7 @@
"outputs": [],
"source": [
"weather_df = pd.DataFrame(\n",
- " rng.standard_normal(10, 2) * 5,\n",
+ " rng.standard_normal((10, 2)) * 5,\n",
" index=pd.date_range(start=\"2021-01-01\", periods=10),\n",
" columns=[\"Tokyo\", \"Beijing\"],\n",
")\n",
@@ -159,7 +159,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df = pd.DataFrame(rng.standard_normal(5, 5))\n",
+ "df = pd.DataFrame(rng.standard_normal((5, 5)))\n",
"df.style.hide(subset=[0, 2, 4], axis=0).hide(subset=[0, 2, 4], axis=1)"
]
},
@@ -611,7 +611,7 @@
"metadata": {},
"outputs": [],
"source": [
- "df2 = pd.DataFrame(rng.standard_normal(10, 4), columns=[\"A\", \"B\", \"C\", \"D\"])\n",
+ "df2 = pd.DataFrame(rng.standard_normal((10, 4)), columns=[\"A\", \"B\", \"C\", \"D\"])\n",
"df2.style"
]
},
@@ -909,7 +909,7 @@
"outputs": [],
"source": [
"df3 = pd.DataFrame(\n",
- " rng.standard_normal(4, 4),\n",
+ " rng.standard_normal((4, 4)),\n",
" pd.MultiIndex.from_product([[\"A\", \"B\"], [\"r1\", \"r2\"]]),\n",
" columns=[\"c1\", \"c2\", \"c3\", \"c4\"],\n",
")\n",
@@ -1661,7 +1661,7 @@
"outputs": [],
"source": [
"cmap = sns.diverging_palette(5, 250, as_cmap=True)\n",
- "bigdf = pd.DataFrame(rng.standard_normal(20, 25)).cumsum()\n",
+ "bigdf = pd.DataFrame(rng.standard_normal((20, 25))).cumsum()\n",
"\n",
"bigdf.style.background_gradient(cmap, axis=1).set_properties(\n",
" **{\"max-width\": \"80px\", \"font-size\": \"1pt\"}\n",
@@ -1685,7 +1685,7 @@
"metadata": {},
"outputs": [],
"source": [
- "bigdf = pd.DataFrame(rng.standard_normal(16, 100))\n",
+ "bigdf = pd.DataFrame(rng.standard_normal((16, 100)))\n",
"bigdf.style.set_sticky(axis=\"index\")"
]
},
|