diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 41a844e08..f8f647f4d 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 3.0.0rc1 +current_version = 3.0.0rc2 commit = False tag = False tag_name = {new_version} @@ -25,8 +25,6 @@ first_value = 1 [bumpversion:file:README.md] -[bumpversion:file:CONTRIBUTING.md] - [bumpversion:file:CONTRIBUTING_COMMON_ERRORS.md] [bumpversion:file:tests/unit/test_metadata.py] diff --git a/CONTRIBUTING_COMMON_ERRORS.md b/CONTRIBUTING_COMMON_ERRORS.md index e7a212e1c..041fbac34 100644 --- a/CONTRIBUTING_COMMON_ERRORS.md +++ b/CONTRIBUTING_COMMON_ERRORS.md @@ -13,9 +13,9 @@ Requirement already satisfied: pbr!=2.1.0,>=2.0.0 in ./.venv/lib/python3.7/site- Using legacy 'setup.py install' for python-Levenshtein, since package 'wheel' is not installed. Installing collected packages: awswrangler, python-Levenshtein Attempting uninstall: awswrangler - Found existing installation: awswrangler 3.0.0rc1 - Uninstalling awswrangler-3.0.0rc1: - Successfully uninstalled awswrangler-3.0.0rc1 + Found existing installation: awswrangler 3.0.0rc2 + Uninstalling awswrangler-3.0.0rc2: + Successfully uninstalled awswrangler-3.0.0rc2 Running setup.py develop for awswrangler Running setup.py install for python-Levenshtein ... error ERROR: Command errored out with exit status 1: diff --git a/README.md b/README.md index f75c9759f..34ca90ae4 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ Easy integration with Athena, Glue, Redshift, Timestream, OpenSearch, Neptune, Q > An [AWS Professional Service](https://aws.amazon.com/professional-services/) open source initiative | aws-proserve-opensource@amazon.com -[![Release](https://img.shields.io/badge/release-3.0.0rc1-brightgreen.svg)](https://pypi.org/project/awswrangler/) +[![Release](https://img.shields.io/badge/release-3.0.0rc2-brightgreen.svg)](https://pypi.org/project/awswrangler/) [![Python Version](https://img.shields.io/badge/python-3.7%20%7C%203.8%20%7C%203.9%20%7C%203.10-brightgreen.svg)](https://anaconda.org/conda-forge/awswrangler) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) @@ -26,7 +26,7 @@ Easy integration with Athena, Glue, Redshift, Timestream, OpenSearch, Neptune, Q | **[PyPi](https://pypi.org/project/awswrangler/)** | [![PyPI Downloads](https://pepy.tech/badge/awswrangler)](https://pypi.org/project/awswrangler/) | `pip install awswrangler` | | **[Conda](https://anaconda.org/conda-forge/awswrangler)** | [![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/awswrangler.svg)](https://anaconda.org/conda-forge/awswrangler) | `conda install -c conda-forge awswrangler` | -> ⚠️ **For platforms without PyArrow 3 support (e.g. [EMR](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#emr-cluster), [Glue PySpark Job](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#aws-glue-pyspark-jobs), MWAA):**
+> ⚠️ **For platforms without PyArrow 3 support (e.g. [EMR](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#emr-cluster), [Glue PySpark Job](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#aws-glue-pyspark-jobs), MWAA):**
➡️ `pip install pyarrow==2 awswrangler` Powered By [](https://arrow.apache.org/powered_by/) @@ -45,7 +45,7 @@ Powered By [](http Installation command: `pip install awswrangler` -> ⚠️ **For platforms without PyArrow 3 support (e.g. [EMR](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#emr-cluster), [Glue PySpark Job](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#aws-glue-pyspark-jobs), MWAA):**
+> ⚠️ **For platforms without PyArrow 3 support (e.g. [EMR](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#emr-cluster), [Glue PySpark Job](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#aws-glue-pyspark-jobs), MWAA):**
➡️`pip install pyarrow==2 awswrangler` ```py3 @@ -102,7 +102,7 @@ AWS SDK for pandas can also run your workflows at scale by leveraging [modin](ht ### Installation ``` -pip install "awswrangler[modin,ray]==3.0.0rc1" +pip install "awswrangler[modin,ray]==3.0.0rc2" ``` As a result existing scripts can run on significantly larger datasets with no code rewrite. Supported APIs are parallelized across cores on a single machine or across multiple nodes on a cluster in the cloud. @@ -133,17 +133,17 @@ As a result existing scripts can run on significantly larger datasets with no co ## [Read The Docs](https://aws-sdk-pandas.readthedocs.io/) -- [**What is AWS SDK for pandas?**](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/what.html) -- [**Install**](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html) - - [PyPi (pip)](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#pypi-pip) - - [Conda](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#conda) - - [AWS Lambda Layer](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#aws-lambda-layer) - - [AWS Glue Python Shell Jobs](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#aws-glue-python-shell-jobs) - - [AWS Glue PySpark Jobs](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#aws-glue-pyspark-jobs) - - [Amazon SageMaker Notebook](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#amazon-sagemaker-notebook) - - [Amazon SageMaker Notebook Lifecycle](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#amazon-sagemaker-notebook-lifecycle) - - [EMR](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#emr) - - [From source](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#from-source) +- [**What is AWS SDK for pandas?**](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/what.html) +- [**Install**](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html) + - [PyPi (pip)](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#pypi-pip) + - [Conda](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#conda) + - [AWS Lambda Layer](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#aws-lambda-layer) + - [AWS Glue Python Shell Jobs](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#aws-glue-python-shell-jobs) + - [AWS Glue PySpark Jobs](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#aws-glue-pyspark-jobs) + - [Amazon SageMaker Notebook](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#amazon-sagemaker-notebook) + - [Amazon SageMaker Notebook Lifecycle](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#amazon-sagemaker-notebook-lifecycle) + - [EMR](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#emr) + - [From source](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#from-source) - [**Tutorials**](https://github.com/aws/aws-sdk-pandas/tree/main/tutorials) - [001 - Introduction](https://github.com/aws/aws-sdk-pandas/blob/main/tutorials/001%20-%20Introduction.ipynb) - [002 - Sessions](https://github.com/aws/aws-sdk-pandas/blob/main/tutorials/002%20-%20Sessions.ipynb) @@ -178,31 +178,31 @@ As a result existing scripts can run on significantly larger datasets with no co - [031 - OpenSearch](https://github.com/aws/aws-sdk-pandas/blob/main/tutorials/031%20-%20OpenSearch.ipynb) - [032 - Lake Formation Governed Tables](https://github.com/aws/aws-sdk-pandas/blob/main/tutorials/032%20-%20Lake%20Formation%20Governed%20Tables.ipynb) - [033 - Amazon Neptune](https://github.com/aws/aws-sdk-pandas/blob/main/tutorials/033%20-%20Amazon%20Neptune.ipynb) - - [034 - Distributing Calls Using Ray](https://github.com/aws/aws-sdk-pandas/blob/release-3.0.0/tutorials/034%20-%20Distributing%20Calls%20using%20Ray.ipynb) - - [035 - Distributing Calls on Ray Remote Cluster](https://github.com/aws/aws-sdk-pandas/blob/release-3.0.0/tutorials/035%20-%20Distributing%20Calls%20on%20Ray%20Remote%20Cluster.ipynb) -- [**API Reference**](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html) - - [Amazon S3](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#amazon-s3) - - [AWS Glue Catalog](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#aws-glue-catalog) - - [Amazon Athena](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#amazon-athena) - - [AWS Lake Formation](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#aws-lake-formation) - - [Amazon Redshift](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#amazon-redshift) - - [PostgreSQL](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#postgresql) - - [MySQL](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#mysql) - - [SQL Server](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#sqlserver) - - [Oracle](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#oracle) - - [Data API Redshift](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#data-api-redshift) - - [Data API RDS](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#data-api-rds) - - [OpenSearch](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#opensearch) - - [Amazon Neptune](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#amazon-neptune) - - [DynamoDB](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#dynamodb) - - [Amazon Timestream](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#amazon-timestream) - - [Amazon EMR](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#amazon-emr) - - [Amazon CloudWatch Logs](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#amazon-cloudwatch-logs) - - [Amazon Chime](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#amazon-chime) - - [Amazon QuickSight](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#amazon-quicksight) - - [AWS STS](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#aws-sts) - - [AWS Secrets Manager](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#aws-secrets-manager) - - [Global Configurations](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html#global-configurations) + - [034 - Distributing Calls Using Ray](https://github.com/aws/aws-sdk-pandas/blob/release-3.0.0rc2/tutorials/034%20-%20Distributing%20Calls%20using%20Ray.ipynb) + - [035 - Distributing Calls on Ray Remote Cluster](https://github.com/aws/aws-sdk-pandas/blob/release-3.0.0rc2/tutorials/035%20-%20Distributing%20Calls%20on%20Ray%20Remote%20Cluster.ipynb) +- [**API Reference**](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html) + - [Amazon S3](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#amazon-s3) + - [AWS Glue Catalog](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#aws-glue-catalog) + - [Amazon Athena](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#amazon-athena) + - [AWS Lake Formation](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#aws-lake-formation) + - [Amazon Redshift](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#amazon-redshift) + - [PostgreSQL](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#postgresql) + - [MySQL](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#mysql) + - [SQL Server](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#sqlserver) + - [Oracle](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#oracle) + - [Data API Redshift](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#data-api-redshift) + - [Data API RDS](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#data-api-rds) + - [OpenSearch](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#opensearch) + - [Amazon Neptune](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#amazon-neptune) + - [DynamoDB](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#dynamodb) + - [Amazon Timestream](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#amazon-timestream) + - [Amazon EMR](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#amazon-emr) + - [Amazon CloudWatch Logs](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#amazon-cloudwatch-logs) + - [Amazon Chime](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#amazon-chime) + - [Amazon QuickSight](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#amazon-quicksight) + - [AWS STS](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#aws-sts) + - [AWS Secrets Manager](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#aws-secrets-manager) + - [Global Configurations](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html#global-configurations) - [**License**](https://github.com/aws/aws-sdk-pandas/blob/main/LICENSE.txt) - [**Contributing**](https://github.com/aws/aws-sdk-pandas/blob/main/CONTRIBUTING.md) diff --git a/VERSION b/VERSION index f7997803d..97c4cc408 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.0.0rc1 \ No newline at end of file +3.0.0rc2 \ No newline at end of file diff --git a/awswrangler/__metadata__.py b/awswrangler/__metadata__.py index 55f96c1d6..056e11018 100644 --- a/awswrangler/__metadata__.py +++ b/awswrangler/__metadata__.py @@ -7,5 +7,5 @@ __title__: str = "awswrangler" __description__: str = "Pandas on AWS." -__version__: str = "3.0.0rc1" +__version__: str = "3.0.0rc2" __license__: str = "Apache License 2.0" diff --git a/awswrangler/athena/_read.py b/awswrangler/athena/_read.py index 19c063c94..5fd674382 100644 --- a/awswrangler/athena/_read.py +++ b/awswrangler/athena/_read.py @@ -689,11 +689,11 @@ def read_sql_query( **Related tutorial:** - - `Amazon Athena `_ - - `Athena Cache `_ - - `Global Configurations `_ **There are three approaches available through ctas_approach and unload_approach parameters:** @@ -757,7 +757,7 @@ def read_sql_query( /athena.html#Athena.Client.get_query_execution>`_ . For a practical example check out the - `related tutorial `_! @@ -1002,11 +1002,11 @@ def read_sql_table( **Related tutorial:** - - `Amazon Athena `_ - - `Athena Cache `_ - - `Global Configurations `_ **There are two approaches to be defined through ctas_approach parameter:** @@ -1051,7 +1051,7 @@ def read_sql_table( /athena.html#Athena.Client.get_query_execution>`_ . For a practical example check out the - `related tutorial `_! diff --git a/awswrangler/s3/_read_parquet.py b/awswrangler/s3/_read_parquet.py index 8059ea2c3..61a1fa184 100644 --- a/awswrangler/s3/_read_parquet.py +++ b/awswrangler/s3/_read_parquet.py @@ -410,7 +410,7 @@ def read_parquet( must return a bool, True to read the partition or False to ignore it. Ignored if `dataset=False`. E.g ``lambda x: True if x["year"] == "2020" and x["month"] == "1" else False`` - https://aws-data-wrangler.readthedocs.io/en/3.0.0rc1/tutorials/023%20-%20Flexible%20Partitions%20Filter.html + https://aws-data-wrangler.readthedocs.io/en/3.0.0rc2/tutorials/023%20-%20Flexible%20Partitions%20Filter.html columns : List[str], optional List of columns to read from the file(s). validate_schema : bool, default False @@ -622,7 +622,7 @@ def read_parquet_table( must return a bool, True to read the partition or False to ignore it. Ignored if `dataset=False`. E.g ``lambda x: True if x["year"] == "2020" and x["month"] == "1" else False`` - https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/tutorials/023%20-%20Flexible%20Partitions%20Filter.html + https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/tutorials/023%20-%20Flexible%20Partitions%20Filter.html columns : List[str], optional List of columns to read from the file(s). validate_schema : bool, default False diff --git a/awswrangler/s3/_read_text.py b/awswrangler/s3/_read_text.py index aafb0d82d..6cebab094 100644 --- a/awswrangler/s3/_read_text.py +++ b/awswrangler/s3/_read_text.py @@ -225,7 +225,7 @@ def read_csv( This function MUST return a bool, True to read the partition or False to ignore it. Ignored if `dataset=False`. E.g ``lambda x: True if x["year"] == "2020" and x["month"] == "1" else False`` - https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/tutorials/023%20-%20Flexible%20Partitions%20Filter.html + https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/tutorials/023%20-%20Flexible%20Partitions%20Filter.html parallelism : int, optional The requested parallelism of the read. Only used when `distributed` add-on is installed. Parallelism may be limited by the number of files of the dataset. 200 by default. @@ -378,7 +378,7 @@ def read_fwf( This function MUST return a bool, True to read the partition or False to ignore it. Ignored if `dataset=False`. E.g ``lambda x: True if x["year"] == "2020" and x["month"] == "1" else False`` - https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/tutorials/023%20-%20Flexible%20Partitions%20Filter.html + https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/tutorials/023%20-%20Flexible%20Partitions%20Filter.html parallelism : int, optional The requested parallelism of the read. Only used when `distributed` add-on is installed. Parallelism may be limited by the number of files of the dataset. 200 by default. @@ -535,7 +535,7 @@ def read_json( This function MUST return a bool, True to read the partition or False to ignore it. Ignored if `dataset=False`. E.g ``lambda x: True if x["year"] == "2020" and x["month"] == "1" else False`` - https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/tutorials/023%20-%20Flexible%20Partitions%20Filter.html + https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/tutorials/023%20-%20Flexible%20Partitions%20Filter.html parallelism : int, optional The requested parallelism of the read. Only used when `distributed` add-on is installed. Parallelism may be limited by the number of files of the dataset. 200 by default. diff --git a/awswrangler/s3/_write_parquet.py b/awswrangler/s3/_write_parquet.py index 32d78b8d4..3e474c59a 100644 --- a/awswrangler/s3/_write_parquet.py +++ b/awswrangler/s3/_write_parquet.py @@ -306,18 +306,18 @@ def to_parquet( # pylint: disable=too-many-arguments,too-many-locals,too-many-b concurrent_partitioning: bool If True will increase the parallelism level during the partitions writing. It will decrease the writing time and increase the memory usage. - https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/tutorials/022%20-%20Writing%20Partitions%20Concurrently.html + https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/tutorials/022%20-%20Writing%20Partitions%20Concurrently.html mode: str, optional ``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True. For details check the related tutorial: - https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/tutorials/004%20-%20Parquet%20Datasets.html + https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/tutorials/004%20-%20Parquet%20Datasets.html catalog_versioning : bool If True and `mode="overwrite"`, creates an archived version of the table catalog before updating it. schema_evolution : bool If True allows schema evolution (new or missing columns), otherwise a exception will be raised. True by default. (Only considered if dataset=True and mode in ("append", "overwrite_partitions")) Related tutorial: - https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/tutorials/014%20-%20Schema%20Evolution.html + https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/tutorials/014%20-%20Schema%20Evolution.html database : str, optional Glue/Athena catalog: Database name. table : str, optional diff --git a/awswrangler/s3/_write_text.py b/awswrangler/s3/_write_text.py index 937759d07..40d0bfc73 100644 --- a/awswrangler/s3/_write_text.py +++ b/awswrangler/s3/_write_text.py @@ -178,18 +178,18 @@ def to_csv( # pylint: disable=too-many-arguments,too-many-locals,too-many-state concurrent_partitioning: bool If True will increase the parallelism level during the partitions writing. It will decrease the writing time and increase the memory usage. - https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/tutorials/022%20-%20Writing%20Partitions%20Concurrently.html + https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/tutorials/022%20-%20Writing%20Partitions%20Concurrently.html mode : str, optional ``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True. For details check the related tutorial: - https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.s3.to_parquet.html#awswrangler.s3.to_parquet + https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.s3.to_parquet.html#awswrangler.s3.to_parquet catalog_versioning : bool If True and `mode="overwrite"`, creates an archived version of the table catalog before updating it. schema_evolution : bool If True allows schema evolution (new or missing columns), otherwise a exception will be raised. (Only considered if dataset=True and mode in ("append", "overwrite_partitions")). False by default. Related tutorial: - https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/tutorials/014%20-%20Schema%20Evolution.html + https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/tutorials/014%20-%20Schema%20Evolution.html database : str, optional Glue/Athena catalog: Database name. table : str, optional @@ -734,18 +734,18 @@ def to_json( # pylint: disable=too-many-arguments,too-many-locals,too-many-stat concurrent_partitioning: bool If True will increase the parallelism level during the partitions writing. It will decrease the writing time and increase the memory usage. - https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/tutorials/022%20-%20Writing%20Partitions%20Concurrently.html + https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/tutorials/022%20-%20Writing%20Partitions%20Concurrently.html mode : str, optional ``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True. For details check the related tutorial: - https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.s3.to_parquet.html#awswrangler.s3.to_parquet + https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.s3.to_parquet.html#awswrangler.s3.to_parquet catalog_versioning : bool If True and `mode="overwrite"`, creates an archived version of the table catalog before updating it. schema_evolution : bool If True allows schema evolution (new or missing columns), otherwise a exception will be raised. (Only considered if dataset=True and mode in ("append", "overwrite_partitions")) Related tutorial: - https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/tutorials/014%20-%20Schema%20Evolution.html + https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/tutorials/014%20-%20Schema%20Evolution.html database : str, optional Glue/Athena catalog: Database name. table : str, optional diff --git a/docs/source/about.rst b/docs/source/about.rst index b2733d3b0..c3d2ff5da 100644 --- a/docs/source/about.rst +++ b/docs/source/about.rst @@ -6,6 +6,6 @@ An `AWS Professional Service `_ `o Easy integration with Athena, Glue, Redshift, Timestream, OpenSearch, Neptune, QuickSight, Chime, CloudWatchLogs, DynamoDB, EMR, SecretManager, PostgreSQL, MySQL, SQLServer and S3 (Parquet, CSV, JSON and EXCEL). -Built on top of other open-source projects like `Pandas `_, `Apache Arrow `_ and `Boto3 `_, it offers abstracted functions to execute your usual ETL tasks like load/unloading data from **Data Lakes**, **Data Warehouses** and **Databases**, even `at scale `_. +Built on top of other open-source projects like `Pandas `_, `Apache Arrow `_ and `Boto3 `_, it offers abstracted functions to execute your usual ETL tasks like load/unloading data from **Data Lakes**, **Data Warehouses** and **Databases**, even `at scale `_. -Check our `tutorials `_ or the `list of functionalities `_. +Check our `tutorials `_ or the `list of functionalities `_. diff --git a/docs/source/install.rst b/docs/source/install.rst index 39fa2b063..25505ebc0 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -27,7 +27,7 @@ At scale AWS SDK for pandas can also run your workflows at scale by leveraging `modin `_ and `ray `_. - >>> pip install "awswrangler[modin,ray]==3.0.0rc1" + >>> pip install "awswrangler[modin,ray]==3.0.0rc2" As a result existing scripts can run on significantly larger datasets with no code rewrite. @@ -156,7 +156,7 @@ Go to your Glue PySpark job and create a new *Job parameters* key/value: To install a specific version, set the value for the above Job parameter as follows: -* Value: ``cython==0.29.21,pg8000==1.21.0,pyarrow==2,pandas==1.3.0,awswrangler==3.0.0rc1`` +* Value: ``cython==0.29.21,pg8000==1.21.0,pyarrow==2,pandas==1.3.0,awswrangler==3.0.0rc2`` .. note:: Pyarrow 3 is not currently supported in Glue PySpark Jobs, which is why an installation of pyarrow 2 is required. @@ -175,7 +175,7 @@ Lambda zipped layers and Python wheels are stored in a publicly accessible S3 bu * Python wheel: ``awswrangler--py3-none-any.whl`` -For example: ``s3://aws-data-wrangler-public-artifacts/releases/3.0.0rc1/awswrangler-layer-3.0.0rc1-py3.8.zip`` +For example: ``s3://aws-data-wrangler-public-artifacts/releases/3.0.0rc2/awswrangler-layer-3.0.0rc2-py3.8.zip`` Amazon SageMaker Notebook ------------------------- @@ -265,7 +265,7 @@ Despite not being a distributed library, AWS SDK for pandas could be used to com sudo pip install pyarrow==2 awswrangler .. note:: Make sure to freeze the library version in the bootstrap for production - environments (e.g. awswrangler==3.0.0rc1) + environments (e.g. awswrangler==3.0.0rc2) .. note:: Pyarrow 3 is not currently supported in the default EMR image, which is why an installation of pyarrow 2 is required. diff --git a/docs/source/layers.rst b/docs/source/layers.rst index 900c1775c..ee18f798a 100644 --- a/docs/source/layers.rst +++ b/docs/source/layers.rst @@ -2,7 +2,7 @@ AWS Lambda Managed Layers ========================== -Version 3.0.0rc1 +Version 3.0.0rc2 ^^^^^^^^^^^^^^^^^ +----------------+--------+-------+-----------------------------------------------------------------------------------+ diff --git a/pyproject.toml b/pyproject.toml index f316b84d3..c61af13c4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "awswrangler" -version = "3.0.0rc1" +version = "3.0.0rc2" description = "Pandas on AWS." authors = ["Amazon Web Services"] license = "Apache License 2.0" diff --git a/test_infra/pyproject.toml b/test_infra/pyproject.toml index c4a6f789d..b5aeb76e4 100644 --- a/test_infra/pyproject.toml +++ b/test_infra/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "awswrangler - test infrastructure" -version = "3.0.0rc1" +version = "3.0.0rc2" description = "CDK test infrastructure for AWS" authors = ["Amazon Web Services"] license = "Apache License 2.0" diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index e150514f1..dc39924c8 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -2,7 +2,7 @@ def test_metadata(): - assert wr.__version__ == "3.0.0rc1" + assert wr.__version__ == "3.0.0rc2" assert wr.__title__ == "awswrangler" assert wr.__description__ == "Pandas on AWS." assert wr.__license__ == "Apache License 2.0" diff --git a/tutorials/001 - Introduction.ipynb b/tutorials/001 - Introduction.ipynb index a9237c331..82be87825 100644 --- a/tutorials/001 - Introduction.ipynb +++ b/tutorials/001 - Introduction.ipynb @@ -23,7 +23,7 @@ "\n", "Built on top of other open-source projects like [Pandas](https://github.com/pandas-dev/pandas), [Apache Arrow](https://github.com/apache/arrow) and [Boto3](https://github.com/boto/boto3), it offers abstracted functions to execute usual ETL tasks like load/unload data from **Data Lakes**, **Data Warehouses** and **Databases**.\n", "\n", - "Check our [list of functionalities](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html)." + "Check our [list of functionalities](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html)." ], "metadata": { "collapsed": false, @@ -39,15 +39,15 @@ "\n", "awswrangler runs almost anywhere over Python 3.7, 3.8, 3.9 and 3.10, so there are several different ways to install it in the desired environment.\n", "\n", - " - [PyPi (pip)](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#pypi-pip)\n", - " - [Conda](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#conda)\n", - " - [AWS Lambda Layer](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#aws-lambda-layer)\n", - " - [AWS Glue Python Shell Jobs](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#aws-glue-python-shell-jobs)\n", - " - [AWS Glue PySpark Jobs](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#aws-glue-pyspark-jobs)\n", - " - [Amazon SageMaker Notebook](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#amazon-sagemaker-notebook)\n", - " - [Amazon SageMaker Notebook Lifecycle](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#amazon-sagemaker-notebook-lifecycle)\n", - " - [EMR Cluster](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#emr-cluster)\n", - " - [From source](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/install.html#from-source)\n", + " - [PyPi (pip)](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#pypi-pip)\n", + " - [Conda](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#conda)\n", + " - [AWS Lambda Layer](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#aws-lambda-layer)\n", + " - [AWS Glue Python Shell Jobs](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#aws-glue-python-shell-jobs)\n", + " - [AWS Glue PySpark Jobs](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#aws-glue-pyspark-jobs)\n", + " - [Amazon SageMaker Notebook](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#amazon-sagemaker-notebook)\n", + " - [Amazon SageMaker Notebook Lifecycle](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#amazon-sagemaker-notebook-lifecycle)\n", + " - [EMR Cluster](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#emr-cluster)\n", + " - [From source](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/install.html#from-source)\n", "\n", "Some good practices for most of the above methods are:\n", " - Use new and individual Virtual Environments for each project ([venv](https://docs.python.org/3/library/venv.html))\n", diff --git a/tutorials/007 - Redshift, MySQL, PostgreSQL, SQL Server, Oracle.ipynb b/tutorials/007 - Redshift, MySQL, PostgreSQL, SQL Server, Oracle.ipynb index 9fae9944b..52229f2e1 100644 --- a/tutorials/007 - Redshift, MySQL, PostgreSQL, SQL Server, Oracle.ipynb +++ b/tutorials/007 - Redshift, MySQL, PostgreSQL, SQL Server, Oracle.ipynb @@ -10,16 +10,16 @@ "\n", "[awswrangler](https://github.com/aws/aws-sdk-pandas)'s Redshift, MySQL and PostgreSQL have two basic functions in common that try to follow Pandas conventions, but add more data type consistency.\n", "\n", - "- [wr.redshift.to_sql()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.redshift.to_sql.html)\n", - "- [wr.redshift.read_sql_query()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.redshift.read_sql_query.html)\n", - "- [wr.mysql.to_sql()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.mysql.to_sql.html)\n", - "- [wr.mysql.read_sql_query()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.mysql.read_sql_query.html)\n", - "- [wr.postgresql.to_sql()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.postgresql.to_sql.html)\n", - "- [wr.postgresql.read_sql_query()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.postgresql.read_sql_query.html)\n", - "- [wr.sqlserver.to_sql()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.sqlserver.to_sql.html)\n", - "- [wr.sqlserver.read_sql_query()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.sqlserver.read_sql_query.html)", - "- [wr.oracle.to_sql()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.oracle.to_sql.html)\n", - "- [wr.oracle.read_sql_query()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.oracle.read_sql_query.html)" + "- [wr.redshift.to_sql()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.redshift.to_sql.html)\n", + "- [wr.redshift.read_sql_query()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.redshift.read_sql_query.html)\n", + "- [wr.mysql.to_sql()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.mysql.to_sql.html)\n", + "- [wr.mysql.read_sql_query()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.mysql.read_sql_query.html)\n", + "- [wr.postgresql.to_sql()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.postgresql.to_sql.html)\n", + "- [wr.postgresql.read_sql_query()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.postgresql.read_sql_query.html)\n", + "- [wr.sqlserver.to_sql()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.sqlserver.to_sql.html)\n", + "- [wr.sqlserver.read_sql_query()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.sqlserver.read_sql_query.html)", + "- [wr.oracle.to_sql()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.oracle.to_sql.html)\n", + "- [wr.oracle.read_sql_query()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.oracle.read_sql_query.html)" ] }, { @@ -43,11 +43,11 @@ "source": [ "## Connect using the Glue Catalog Connections\n", "\n", - "- [wr.redshift.connect()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.redshift.connect.html)\n", - "- [wr.mysql.connect()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.mysql.connect.html)\n", - "- [wr.postgresql.connect()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.postgresql.connect.html)\n", - "- [wr.sqlserver.connect()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.sqlserver.connect.html)", - "- [wr.oracle.connect()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.oracle.connect.html)" + "- [wr.redshift.connect()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.redshift.connect.html)\n", + "- [wr.mysql.connect()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.mysql.connect.html)\n", + "- [wr.postgresql.connect()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.postgresql.connect.html)\n", + "- [wr.sqlserver.connect()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.sqlserver.connect.html)", + "- [wr.oracle.connect()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.oracle.connect.html)" ] }, { diff --git a/tutorials/014 - Schema Evolution.ipynb b/tutorials/014 - Schema Evolution.ipynb index ce9fb39af..9b1ede83f 100644 --- a/tutorials/014 - Schema Evolution.ipynb +++ b/tutorials/014 - Schema Evolution.ipynb @@ -10,9 +10,9 @@ "\n", "awswrangler supports new **columns** on Parquet and CSV datasets through:\n", "\n", - "- [wr.s3.to_parquet()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.s3.to_parquet.html#awswrangler.s3.to_parquet)\n", - "- [wr.s3.store_parquet_metadata()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.s3.store_parquet_metadata.html#awswrangler.s3.store_parquet_metadata) i.e. \"Crawler\"\n", - "- [wr.s3.to_csv()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/stubs/awswrangler.s3.to_csv.html#awswrangler.s3.to_csv)" + "- [wr.s3.to_parquet()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.s3.to_parquet.html#awswrangler.s3.to_parquet)\n", + "- [wr.s3.store_parquet_metadata()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.s3.store_parquet_metadata.html#awswrangler.s3.store_parquet_metadata) i.e. \"Crawler\"\n", + "- [wr.s3.to_csv()](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/stubs/awswrangler.s3.to_csv.html#awswrangler.s3.to_csv)" ] }, { diff --git a/tutorials/021 - Global Configurations.ipynb b/tutorials/021 - Global Configurations.ipynb index 1c0708fb3..f75724d50 100644 --- a/tutorials/021 - Global Configurations.ipynb +++ b/tutorials/021 - Global Configurations.ipynb @@ -13,7 +13,7 @@ "- **Environment variables**\n", "- **wr.config**\n", "\n", - "*P.S. Check the [function API doc](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html) to see if your function has some argument that can be configured through Global configurations.*\n", + "*P.S. Check the [function API doc](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html) to see if your function has some argument that can be configured through Global configurations.*\n", "\n", "*P.P.S. One exception to the above mentioned rules is the `botocore_config` property. It cannot be set through environment variables\n", "but only via `wr.config`. It will be used as the `botocore.config.Config` for all underlying `boto3` calls.\n", diff --git a/tutorials/022 - Writing Partitions Concurrently.ipynb b/tutorials/022 - Writing Partitions Concurrently.ipynb index 8d1ab9291..cd4d795fe 100644 --- a/tutorials/022 - Writing Partitions Concurrently.ipynb +++ b/tutorials/022 - Writing Partitions Concurrently.ipynb @@ -13,7 +13,7 @@ " If True will increase the parallelism level during the partitions writing. It will decrease the\n", " writing time and increase memory usage.\n", "\n", - "*P.S. Check the [function API doc](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html) to see it has some argument that can be configured through Global configurations.*" + "*P.S. Check the [function API doc](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html) to see it has some argument that can be configured through Global configurations.*" ] }, { diff --git a/tutorials/023 - Flexible Partitions Filter.ipynb b/tutorials/023 - Flexible Partitions Filter.ipynb index 700f49eff..dbbca97d0 100644 --- a/tutorials/023 - Flexible Partitions Filter.ipynb +++ b/tutorials/023 - Flexible Partitions Filter.ipynb @@ -16,7 +16,7 @@ " - Ignored if `dataset=False`.\n", " \n", "\n", - "*P.S. Check the [function API doc](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc1/api.html) to see it has some argument that can be configured through Global configurations.*" + "*P.S. Check the [function API doc](https://aws-sdk-pandas.readthedocs.io/en/3.0.0rc2/api.html) to see it has some argument that can be configured through Global configurations.*" ] }, { diff --git a/tutorials/034 - Distributing Calls using Ray.ipynb b/tutorials/034 - Distributing Calls using Ray.ipynb index 136dbcc69..70ea5b6d8 100644 --- a/tutorials/034 - Distributing Calls using Ray.ipynb +++ b/tutorials/034 - Distributing Calls using Ray.ipynb @@ -27,7 +27,7 @@ }, "outputs": [], "source": [ - "!pip install \"awswrangler[modin,ray]==3.0.0rc1\"" + "!pip install \"awswrangler[modin,ray]==3.0.0rc2\"" ] }, { diff --git a/tutorials/035 - Distributing Calls on Ray Remote Cluster.ipynb b/tutorials/035 - Distributing Calls on Ray Remote Cluster.ipynb index e111588d4..b24f42cbe 100644 --- a/tutorials/035 - Distributing Calls on Ray Remote Cluster.ipynb +++ b/tutorials/035 - Distributing Calls on Ray Remote Cluster.ipynb @@ -31,7 +31,7 @@ "outputs": [], "source": [ "\n", - "!pip install \"awswrangler[modin,ray]==3.0.0rc1\"" + "!pip install \"awswrangler[modin,ray]==3.0.0rc2\"" ] }, { @@ -128,7 +128,7 @@ "\n", "\n", "setup_commands:\n", - "- pip install \"awswrangler[modin,ray]==3.0.0rc1\"" + "- pip install \"awswrangler[modin,ray]==3.0.0rc2\"" ] }, {