Skip to content

Commit

Permalink
Update pandas syntax to avoid deprecated behavior (#3040)
Browse files Browse the repository at this point in the history
  • Loading branch information
mwaskom committed Sep 22, 2022
1 parent d203329 commit f2fc4b5
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 18 deletions.
4 changes: 2 additions & 2 deletions seaborn/categorical.py
Expand Up @@ -289,7 +289,7 @@ def plot_strips(
jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0

adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move
sub_data.loc[:, self.cat_axis] = adjusted_data
sub_data[self.cat_axis] = adjusted_data

for var in "xy":
if self._log_scaled(var):
Expand Down Expand Up @@ -346,7 +346,7 @@ def plot_swarms(
dodge_move = offsets[sub_data["hue"].map(self._hue_map.levels.index)]

if not sub_data.empty:
sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move
sub_data[self.cat_axis] = sub_data[self.cat_axis] + dodge_move

for var in "xy":
if self._log_scaled(var):
Expand Down
18 changes: 7 additions & 11 deletions seaborn/distributions.py
Expand Up @@ -246,30 +246,26 @@ def _resolve_multiple(self, curves, multiple):

# Find column groups that are nested within col/row variables
column_groups = {}
for i, keyd in enumerate(map(dict, curves.columns.tolist())):
for i, keyd in enumerate(map(dict, curves.columns)):
facet_key = keyd.get("col", None), keyd.get("row", None)
column_groups.setdefault(facet_key, [])
column_groups[facet_key].append(i)

baselines = curves.copy()
for cols in column_groups.values():
for col_idxs in column_groups.values():
cols = curves.columns[col_idxs]

norm_constant = curves.iloc[:, cols].sum(axis="columns")
norm_constant = curves[cols].sum(axis="columns")

# Take the cumulative sum to stack
curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis="columns")
curves[cols] = curves[cols].cumsum(axis="columns")

# Normalize by row sum to fill
if multiple == "fill":
curves.iloc[:, cols] = (curves
.iloc[:, cols]
.div(norm_constant, axis="index"))
curves[cols] = curves[cols].div(norm_constant, axis="index")

# Define where each segment starts
baselines.iloc[:, cols] = (curves
.iloc[:, cols]
.shift(1, axis=1)
.fillna(0))
baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)

if multiple == "dodge":

Expand Down
4 changes: 2 additions & 2 deletions tests/_stats/test_histogram.py
Expand Up @@ -157,7 +157,7 @@ def test_common_norm_subset(self, long_df, triple_args):

h = Hist(stat="percent", common_norm=["a"])
out = h(long_df, *triple_args)
for _, out_part in out.groupby(["a"]):
for _, out_part in out.groupby("a"):
assert out_part["y"].sum() == pytest.approx(100)

def test_common_bins_default(self, long_df, triple_args):
Expand All @@ -183,7 +183,7 @@ def test_common_bins_subset(self, long_df, triple_args):
h = Hist(common_bins=False)
out = h(long_df, *triple_args)
bins = []
for _, out_part in out.groupby(["a"]):
for _, out_part in out.groupby("a"):
bins.append(tuple(out_part["x"]))
assert len(set(bins)) == out["a"].nunique()

Expand Down
6 changes: 3 additions & 3 deletions tests/test_categorical.py
Expand Up @@ -340,7 +340,7 @@ def test_longform_groupby(self):
p1 = cat._CategoricalPlotter()
p1.establish_variables(self.g, self.y, hue=self.h)
p2 = cat._CategoricalPlotter()
p2.establish_variables(self.g, self.y[::-1], self.h)
p2.establish_variables(self.g, self.y.iloc[::-1], self.h)
for i, (d1, d2) in enumerate(zip(p1.plot_data, p2.plot_data)):
assert np.array_equal(d1.sort_index(), d2.sort_index())

Expand Down Expand Up @@ -614,7 +614,7 @@ def test_nested_stats(self):
y.groupby([g, h]).mean().unstack())

for ci_g, (_, grp_y) in zip(p.confint, y.groupby(g)):
for ci, hue_y in zip(ci_g, [grp_y[::2], grp_y[1::2]]):
for ci, hue_y in zip(ci_g, [grp_y.iloc[::2], grp_y.iloc[1::2]]):
sem = hue_y.std() / np.sqrt(len(hue_y))
mean = hue_y.mean()
half_ci = _normal_quantile_func(.975) * sem
Expand Down Expand Up @@ -732,7 +732,7 @@ def test_nested_sd_error_bars(self):
y.groupby([g, h]).mean().unstack())

for ci_g, (_, grp_y) in zip(p.confint, y.groupby(g)):
for ci, hue_y in zip(ci_g, [grp_y[::2], grp_y[1::2]]):
for ci, hue_y in zip(ci_g, [grp_y.iloc[::2], grp_y.iloc[1::2]]):
mean = hue_y.mean()
half_ci = np.std(hue_y)
ci_want = mean - half_ci, mean + half_ci
Expand Down

0 comments on commit f2fc4b5

Please sign in to comment.