Skip to content

Commit

Permalink
Merge branch 'master' into vliu_dashboard_fix
Browse files Browse the repository at this point in the history
  • Loading branch information
vera-liu committed Nov 12, 2016
2 parents f25d6d5 + fdbb2bb commit 9189e0b
Show file tree
Hide file tree
Showing 8 changed files with 55 additions and 17 deletions.
1 change: 1 addition & 0 deletions superset/assets/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
"font-awesome": "^4.6.3",
"gridster": "^0.5.6",
"immutability-helper": "^2.0.0",
"immutable": "^3.8.1",
"jquery": "^2.2.1",
"jquery-ui": "1.10.5",
"mapbox-gl": "^0.26.0",
Expand Down
12 changes: 8 additions & 4 deletions superset/bin/superset
Original file line number Diff line number Diff line change
Expand Up @@ -112,14 +112,18 @@ def load_examples(load_test_data):
print("Loading [Unicode test data]")
data.load_unicode_test_data()

@manager.command
def refresh_druid():
"""Refresh all druid datasources"""
@manager.option(
'-d', '--datasource',
help=(
"Specify which datasource name to load, if omitted, all "
"datasources will be refreshed"))
def refresh_druid(datasource):
"""Refresh druid datasources"""
session = db.session()
from superset import models
for cluster in session.query(models.DruidCluster).all():
try:
cluster.refresh_datasources()
cluster.refresh_datasources(datasource_name=datasource)
except Exception as e:
print(
"Error while processing cluster '{}'\n{}".format(
Expand Down
1 change: 1 addition & 0 deletions superset/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,7 @@ class CeleryConfig(object):

try:
from superset_config import * # noqa
print('Loaded your LOCAL configuration')
except ImportError:
pass

Expand Down
7 changes: 5 additions & 2 deletions superset/db_engine_specs.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,8 +174,11 @@ class PrestoEngineSpec(BaseEngineSpec):

@classmethod
def convert_dttm(cls, target_type, dttm):
if target_type.upper() in ('DATE', 'DATETIME'):
return "from_iso8601_date('{}')".format(dttm.isoformat())
tt = target_type.upper()
if tt == 'DATE':
return "from_iso8601_date('{}')".format(dttm.isoformat()[:10])
if tt == 'TIMESTAMP':
return "from_iso8601_timestamp('{}')".format(dttm.isoformat())
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))

@classmethod
Expand Down
2 changes: 1 addition & 1 deletion superset/jinja_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ def latest_sub_partition(self, table_name, **kwargs):
indexes = self.database.get_indexes(table_name, schema)
part_fields = indexes[0]['column_names']
for k in kwargs.keys():
if k not in k in part_field:
if k not in k in part_fields:
msg = "Field [{k}] is not part of the partionning key"
raise SupersetTemplateException(msg)
if len(kwargs.keys()) != len(part_fields) - 1:
Expand Down
42 changes: 34 additions & 8 deletions superset/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -1522,11 +1522,16 @@ def get_druid_version(self):
).format(obj=self)
return json.loads(requests.get(endpoint).text)['version']

def refresh_datasources(self):
def refresh_datasources(self, datasource_name=None):
"""Refresh metadata of all datasources in the cluster
If ``datasource_name`` is specified, only that datasource is updated
"""
self.druid_version = self.get_druid_version()
for datasource in self.get_datasources():
if datasource not in config.get('DRUID_DATA_SOURCE_BLACKLIST'):
DruidDatasource.sync_to_db(datasource, self)
if not datasource_name or datasource_name == datasource:
DruidDatasource.sync_to_db(datasource, self)

@property
def perm(self):
Expand Down Expand Up @@ -1670,15 +1675,35 @@ def latest_metadata(self):
# we need to set this interval to more than 1 day ago to exclude
# realtime segments, which trigged a bug (fixed in druid 0.8.2).
# https://groups.google.com/forum/#!topic/druid-user/gVCqqspHqOQ
start = (0 if self.version_higher(self.cluster.druid_version, '0.8.2') else 1)
intervals = (max_time - timedelta(days=7)).isoformat() + '/'
intervals += (max_time - timedelta(days=start)).isoformat()
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=intervals)
lbound = (max_time - timedelta(days=7)).isoformat()
rbound = max_time.isoformat()
if not self.version_higher(self.cluster.druid_version, '0.8.2'):
rbound = (max_time - timedelta(1)).isoformat()
segment_metadata = None
try:
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=lbound + '/' + rbound)
except Exception as e:
logging.warning("Failed first attempt to get latest segment")
logging.exception(e)
if not segment_metadata:
# if no segments in the past 7 days, look at all segments
lbound = datetime(1901, 1, 1).isoformat()[:10]
rbound = datetime(2050, 1, 1).isoformat()[:10]
if not self.version_higher(self.cluster.druid_version, '0.8.2'):
rbound = datetime.now().isoformat()[:10]
try:
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=lbound + '/' + rbound)
except Exception as e:
logging.warning("Failed 2nd attempt to get latest segment")
logging.exception(e)
if segment_metadata:
return segment_metadata[-1]['columns']


def generate_metrics(self):
for col in self.columns:
col.generate_metrics()
Expand Down Expand Up @@ -1774,6 +1799,7 @@ def sync_to_db(cls, name, cluster):

cols = datasource.latest_metadata()
if not cols:
logging.error("Failed at fetching the latest segment")
return
for col in cols:
col_obj = (
Expand Down
4 changes: 3 additions & 1 deletion superset/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -1307,7 +1307,9 @@ def import_dashboards(self):
def explore(self, datasource_type, datasource_id):
viz_type = request.args.get("viz_type")
slice_id = request.args.get('slice_id')
slc = db.session.query(models.Slice).filter_by(id=slice_id).first()
slc = None
if slice_id:
slc = db.session.query(models.Slice).filter_by(id=slice_id).first()

error_redirect = '/slicemodelview/list/'
datasource_class = SourceRegistry.sources[datasource_type]
Expand Down
3 changes: 2 additions & 1 deletion superset/viz.py
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,8 @@ def query_obj(self):
if fd.get('all_columns'):
d['columns'] = fd.get('all_columns')
d['groupby'] = []
d['orderby'] = [json.loads(t) for t in fd.get('order_by_cols', [])]
order_by_cols = fd.get('order_by_cols', []) or []
d['orderby'] = [json.loads(t) for t in order_by_cols]
return d

def get_df(self, query_obj=None):
Expand Down

0 comments on commit 9189e0b

Please sign in to comment.