Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions awswrangler/s3.py
Original file line number Diff line number Diff line change
Expand Up @@ -677,6 +677,8 @@ def to_csv( # pylint: disable=too-many-arguments
raise exceptions.InvalidArgumentCombination("Please, pass dataset=True to be able to use partition_cols.")
if mode is not None:
raise exceptions.InvalidArgumentCombination("Please pass dataset=True to be able to use mode.")
if columns_comments:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch!

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for the good document :p

raise exceptions.InvalidArgumentCombination("Please pass dataset=True to be able to use columns_comments.")
if any(arg is not None for arg in (database, table, description, parameters)):
raise exceptions.InvalidArgumentCombination(
"Please pass dataset=True to be able to use any one of these "
Expand Down
102 changes: 101 additions & 1 deletion testing/test_awswrangler/test_moto.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,12 @@
import botocore
import mock
import moto
import pandas as pd
import pytest
from botocore.exceptions import ClientError

import awswrangler as wr

from awswrangler.exceptions import InvalidArgumentCombination, EmptyDataFrame
from ._utils import ensure_data_types, get_df_csv, get_df_list


Expand Down Expand Up @@ -217,6 +218,105 @@ def test_csv(s3):
assert len(df.columns) == 10


def test_to_csv_invalid_argument_combination_raise_when_dataset_false_succeed(s3):
path = "s3://bucket/test.csv"
with pytest.raises(InvalidArgumentCombination):
wr.s3.to_csv(df=get_df_csv(), path=path, index=False, database='foo')

with pytest.raises(InvalidArgumentCombination):
wr.s3.to_csv(df=get_df_csv(), path=path, index=False, table='foo')

with pytest.raises(InvalidArgumentCombination):
wr.s3.to_csv(df=get_df_csv(),
path=path,
index=False,
dataset=False,
partition_cols=["par0", "par1"])

with pytest.raises(InvalidArgumentCombination):
wr.s3.to_csv(df=get_df_csv(),
path=path,
index=False,
dataset=False,
mode='append')

with pytest.raises(InvalidArgumentCombination):
wr.s3.to_csv(df=get_df_csv(),
path=path,
index=False,
dataset=False,
partition_cols=["par0", "par1"])

with pytest.raises(InvalidArgumentCombination):
wr.s3.to_csv(df=get_df_csv(),
path=path,
index=False,
dataset=False,
database='default',
table='test')

with pytest.raises(InvalidArgumentCombination):
wr.s3.to_csv(df=get_df_csv(),
path=path,
index=False,
dataset=False,
description='raise exception')

with pytest.raises(InvalidArgumentCombination):
wr.s3.to_csv(df=get_df_csv(),
path=path,
index=False,
dataset=False,
parameters={'key': 'value'})

with pytest.raises(InvalidArgumentCombination):
wr.s3.to_csv(df=get_df_csv(),
path=path,
index=False,
dataset=False,
columns_comments={'col0': 'test'})


def test_to_csv_valid_argument_combination_when_dataset_true_succeed(s3):
path = "s3://bucket/test.csv"
wr.s3.to_csv(df=get_df_csv(), path=path, index=False)
wr.s3.to_csv(df=get_df_csv(),
path=path,
index=False,
dataset=True,
partition_cols=["par0", "par1"])

wr.s3.to_csv(df=get_df_csv(),
path=path,
index=False,
dataset=True,
mode='append')

wr.s3.to_csv(df=get_df_csv(),
path=path,
index=False,
dataset=True,
description='raise exception')

wr.s3.to_csv(df=get_df_csv(),
path=path,
index=False,
dataset=True,
parameters={'key': 'value'})

wr.s3.to_csv(df=get_df_csv(),
path=path,
index=False,
dataset=True,
columns_comments={'col0': 'test'})


def test_to_csv_data_empty_raise_succeed(s3):
path = "s3://bucket/test.csv"
with pytest.raises(EmptyDataFrame):
wr.s3.to_csv(df=pd.DataFrame(), path=path, index=False)


def test_parquet(s3):
path = "s3://bucket/test.parquet"
wr.s3.to_parquet(df=get_df_list(), path=path, index=False, dataset=True, partition_cols=["par0", "par1"])
Expand Down