Skip to content

Commit

Permalink
Merge pull request #30 from CurtLH/update_cli
Browse files Browse the repository at this point in the history
Update CLI
  • Loading branch information
CurtLH committed Jan 13, 2021
2 parents 51080bd + 4fca55a commit a286b6f
Showing 1 changed file with 53 additions and 28 deletions.
81 changes: 53 additions & 28 deletions prism/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def main(ctx, base_url, tenant_name, client_id, client_secret, refresh_token):
"""CLI for interacting with Workday’s Prism API"""

# initialize the prism class with your credentials
p = prism.Prism(base_url, tenant_name, client_id, client_secret, refresh_token)
p = prism.Prism(base_url, tenant_name, client_id, client_secret, refresh_token, version="v2")

# create the bearer token
p.create_bearer_token()
Expand All @@ -44,64 +44,89 @@ def main(ctx, base_url, tenant_name, client_id, client_secret, refresh_token):


@main.command()
@click.option("--id", default=None, type=str, help="The ID of the dataset to obtain details about")
@click.option("--name", default=None, type=str, help="The name of the table to obtain details about")
@click.pass_context
def list(ctx, id):
"""List all datasets of type API"""
def list(ctx, name):
"""List all tables of type API"""

# get the initialized prism class
p = ctx.obj["p"]

# list the datasets
status = p.list_dataset(dataset_id=id)
# list the tables
status = p.list_table(table_name=name)

# print message
if id is None:
click.echo("There are {} API datasets".format(status["total"]))
click.echo("There are {} API tables".format(status["total"]))
click.echo(json.dumps(status["data"], indent=2, sort_keys=True))
else:
click.echo(json.dumps(status, indent=2, sort_keys=True))


@main.command()
@click.argument("table_name", type=str)
@click.argument("schema_path", type=click.Path())
@click.pass_context
def create(ctx, table_name, schema_path):
"""Create a new Prism table TABLE_NAME with schema from SCHEMA_PATH
Example: prism create my_table /home/data/schema.json
"""

# get the initialized prism class
p = ctx.obj["p"]

# read in your table schema
schema = prism.load_schema(schema_path)

# clean up the table name
table_name = table_name.replace(" ", "_")

# create an empty API table
table = p.create_table(table_name, schema=schema["fields"])

# print message
click.echo(json.dumps(table, indent=2, sort_keys=True))


@main.command()
@click.argument("gzip_file", type=click.Path())
@click.argument("table_id", type=str)
@click.option(
"--dataset_name",
type=str,
required=True,
help="The dataset name. The name must be unique and conform to the name validation rules",
"--operation",
type=click.Choice(["TruncateandInsert", "Insert", "Update", "Upsert", "Delete"]),
default="TruncateandInsert",
help="The Table load operation",
)
@click.option("--schema_path", type=click.Path(), required=True, help="The path to your schema file")
@click.option("--data_path", type=click.Path(), required=True, help="The path to your gzip compressed data file")
@click.pass_context
def upload(ctx, dataset_name, schema_path, data_path):
"""Upload a gzip CSV file"""
def upload(ctx, gzip_file, table_id, operation):
"""Upload GZIP_FILE to TABLE_ID
Example: prism upload /home/data/file.csv.gz bbab30e3018b01a723524ce18010811b
"""

# get the initialized prism class
p = ctx.obj["p"]

# clean up the dataset name
dataset_name = dataset_name.replace(" ", "_")
# get the details about the newly created table
details = p.describe_table(table_id)

# create an empty API dataset
dataset = p.create_dataset(dataset_name)

# read in your dataset schema
schema = prism.load_schema(schema_path)
# convert the details to a bucket schema
bucket_schema = p.convert_describe_schema_to_bucket_schema(details)

# create a new bucket to hold your file
bucket = p.create_bucket(schema, dataset["id"])
bucket = p.create_bucket(bucket_schema, table_id, operation=operation)

# add your file the bucket you just created
p.upload_file_to_bucket(bucket["id"], data_path)
# add your file to the bucket you just created
p.upload_file_to_bucket(bucket["id"], gzip_file)

# complete the bucket and upload your file
p.complete_bucket(bucket["id"])

# check the status of the dataset you just created
status = p.list_dataset(dataset["id"])
# check the status of the table you just created
status = p.list_table(table_id)

# print message
click.echo("{} has successfully uploaded".format(dataset_name))
click.echo(json.dumps(status["data"], indent=2, sort_keys=True))


Expand Down

0 comments on commit a286b6f

Please sign in to comment.