Abstracting data source idiosyncrasies so you can stop reading Q&A forums and start reading your data.
pip install spackl
from spackl import db, file
conf = db.Config()
pg = db.Postgres(**conf.default)
bq = db.BigQuery(**conf.bq_datalake)
csv = file.CSV('/path/to/file.csv')
# Same method for all sources
pg_results = pg.query('SELECT id FROM schema.some_table')
bq_results = bq.query('SELECT id FROM dataset.some_table')
csv_results = csv.query()
by index
results[0]
# (1234,)
by attribute
results.id
# (1234, 1235, 1236)
by key
results['id']
# (1234, 1235, 1236)
index by index
results[0][0]
# 1234
attribute by index
results.id[0]
# 1234
key by index
results['id'][0]
# 1234
index by attribute
results[0].id
# 1234
index by key
results[0]['id']
# 1234
# Pandas Dataframe
results.df()
# JSON String
results.json()
# List of tuples
results.list()
# Vertical dictionary
results.dict()