Skip to content
This repository has been archived by the owner on Apr 15, 2022. It is now read-only.

Commit

Permalink
Merge pull request #58 from splicemachine/add_drop_option
Browse files Browse the repository at this point in the history
drop_table function
  • Loading branch information
splicemaahs committed May 18, 2020
2 parents cde40f1 + acd3076 commit ec6433e
Showing 1 changed file with 7 additions and 3 deletions.
10 changes: 7 additions & 3 deletions splicemachine/spark/context.py
Expand Up @@ -288,18 +288,22 @@ def _dropTableIfExists(self, schema_table_name):
print(f'Droping table {schema_table_name}')
self.dropTable(schema_table_name)

def createTable(self, schema_table_name, dataframe, keys=None, create_table_options=None, to_upper=False):
def createTable(self, schema_table_name, dataframe, primary_keys=None, create_table_options=None, to_upper=False, drop_table=False):
"""
Creates a schema.table from a dataframe
:param schema_table_name: str The schema.table to create
:param dataframe: The Spark DataFrame to base the table off
:param keys: List[str] the primary keys. Default None
:param primary_keys: List[str] the primary keys. Default None
:param create_table_options: str The additional table-level SQL options default None
:param to_upper: bool If the dataframe columns should be converted to uppercase before table creation
If False, the table will be created with lower case columns. Default False
:param drop_table: bool whether to drop the table if it exists. Default False. If False and the table exists,
the function will throw an exception.
"""
if to_upper:
dataframe = self.toUpper(dataframe)
if drop_table:
self._dropTableIfExists(schema_table_name)
# Need to convert List (keys) to scala seq
keys_seq = self.jvm.PythonUtils.toSeq(keys)
keys_seq = self.jvm.PythonUtils.toSeq(primary_keys)
self.context.createTable(schema_table_name, dataframe._jdf.schema(), keys_seq, create_table_options)

0 comments on commit ec6433e

Please sign in to comment.