diff --git a/superset/assets/package.json b/superset/assets/package.json index 4f1400307495..33588b25f8f8 100644 --- a/superset/assets/package.json +++ b/superset/assets/package.json @@ -55,6 +55,7 @@ "font-awesome": "^4.6.3", "gridster": "^0.5.6", "immutability-helper": "^2.0.0", + "immutable": "^3.8.1", "jquery": "^2.2.1", "jquery-ui": "1.10.5", "mapbox-gl": "^0.26.0", diff --git a/superset/bin/superset b/superset/bin/superset index 3b5b52906ffd..3d711fc826ed 100755 --- a/superset/bin/superset +++ b/superset/bin/superset @@ -112,14 +112,18 @@ def load_examples(load_test_data): print("Loading [Unicode test data]") data.load_unicode_test_data() -@manager.command -def refresh_druid(): - """Refresh all druid datasources""" +@manager.option( + '-d', '--datasource', + help=( + "Specify which datasource name to load, if omitted, all " + "datasources will be refreshed")) +def refresh_druid(datasource): + """Refresh druid datasources""" session = db.session() from superset import models for cluster in session.query(models.DruidCluster).all(): try: - cluster.refresh_datasources() + cluster.refresh_datasources(datasource_name=datasource) except Exception as e: print( "Error while processing cluster '{}'\n{}".format( diff --git a/superset/config.py b/superset/config.py index b8dbd62f226d..dcd40c39b982 100644 --- a/superset/config.py +++ b/superset/config.py @@ -247,6 +247,7 @@ class CeleryConfig(object): try: from superset_config import * # noqa + print('Loaded your LOCAL configuration') except ImportError: pass diff --git a/superset/db_engine_specs.py b/superset/db_engine_specs.py index f5e19107b9e9..d1175352e9a6 100644 --- a/superset/db_engine_specs.py +++ b/superset/db_engine_specs.py @@ -174,8 +174,11 @@ class PrestoEngineSpec(BaseEngineSpec): @classmethod def convert_dttm(cls, target_type, dttm): - if target_type.upper() in ('DATE', 'DATETIME'): - return "from_iso8601_date('{}')".format(dttm.isoformat()) + tt = target_type.upper() + if tt == 'DATE': + return "from_iso8601_date('{}')".format(dttm.isoformat()[:10]) + if tt == 'TIMESTAMP': + return "from_iso8601_timestamp('{}')".format(dttm.isoformat()) return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) @classmethod diff --git a/superset/jinja_context.py b/superset/jinja_context.py index 830466702178..94f56328b247 100644 --- a/superset/jinja_context.py +++ b/superset/jinja_context.py @@ -171,7 +171,7 @@ def latest_sub_partition(self, table_name, **kwargs): indexes = self.database.get_indexes(table_name, schema) part_fields = indexes[0]['column_names'] for k in kwargs.keys(): - if k not in k in part_field: + if k not in k in part_fields: msg = "Field [{k}] is not part of the partionning key" raise SupersetTemplateException(msg) if len(kwargs.keys()) != len(part_fields) - 1: diff --git a/superset/models.py b/superset/models.py index c91bfff3224d..4557317851c1 100644 --- a/superset/models.py +++ b/superset/models.py @@ -1522,11 +1522,16 @@ def get_druid_version(self): ).format(obj=self) return json.loads(requests.get(endpoint).text)['version'] - def refresh_datasources(self): + def refresh_datasources(self, datasource_name=None): + """Refresh metadata of all datasources in the cluster + + If ``datasource_name`` is specified, only that datasource is updated + """ self.druid_version = self.get_druid_version() for datasource in self.get_datasources(): if datasource not in config.get('DRUID_DATA_SOURCE_BLACKLIST'): - DruidDatasource.sync_to_db(datasource, self) + if not datasource_name or datasource_name == datasource: + DruidDatasource.sync_to_db(datasource, self) @property def perm(self): @@ -1670,15 +1675,35 @@ def latest_metadata(self): # we need to set this interval to more than 1 day ago to exclude # realtime segments, which trigged a bug (fixed in druid 0.8.2). # https://groups.google.com/forum/#!topic/druid-user/gVCqqspHqOQ - start = (0 if self.version_higher(self.cluster.druid_version, '0.8.2') else 1) - intervals = (max_time - timedelta(days=7)).isoformat() + '/' - intervals += (max_time - timedelta(days=start)).isoformat() - segment_metadata = client.segment_metadata( - datasource=self.datasource_name, - intervals=intervals) + lbound = (max_time - timedelta(days=7)).isoformat() + rbound = max_time.isoformat() + if not self.version_higher(self.cluster.druid_version, '0.8.2'): + rbound = (max_time - timedelta(1)).isoformat() + segment_metadata = None + try: + segment_metadata = client.segment_metadata( + datasource=self.datasource_name, + intervals=lbound + '/' + rbound) + except Exception as e: + logging.warning("Failed first attempt to get latest segment") + logging.exception(e) + if not segment_metadata: + # if no segments in the past 7 days, look at all segments + lbound = datetime(1901, 1, 1).isoformat()[:10] + rbound = datetime(2050, 1, 1).isoformat()[:10] + if not self.version_higher(self.cluster.druid_version, '0.8.2'): + rbound = datetime.now().isoformat()[:10] + try: + segment_metadata = client.segment_metadata( + datasource=self.datasource_name, + intervals=lbound + '/' + rbound) + except Exception as e: + logging.warning("Failed 2nd attempt to get latest segment") + logging.exception(e) if segment_metadata: return segment_metadata[-1]['columns'] + def generate_metrics(self): for col in self.columns: col.generate_metrics() @@ -1774,6 +1799,7 @@ def sync_to_db(cls, name, cluster): cols = datasource.latest_metadata() if not cols: + logging.error("Failed at fetching the latest segment") return for col in cols: col_obj = ( diff --git a/superset/views.py b/superset/views.py index 6ea81cb4f902..841c968b4376 100755 --- a/superset/views.py +++ b/superset/views.py @@ -1307,7 +1307,9 @@ def import_dashboards(self): def explore(self, datasource_type, datasource_id): viz_type = request.args.get("viz_type") slice_id = request.args.get('slice_id') - slc = db.session.query(models.Slice).filter_by(id=slice_id).first() + slc = None + if slice_id: + slc = db.session.query(models.Slice).filter_by(id=slice_id).first() error_redirect = '/slicemodelview/list/' datasource_class = SourceRegistry.sources[datasource_type] diff --git a/superset/viz.py b/superset/viz.py index aca2ac740d46..ce4ddecd26ab 100755 --- a/superset/viz.py +++ b/superset/viz.py @@ -441,7 +441,8 @@ def query_obj(self): if fd.get('all_columns'): d['columns'] = fd.get('all_columns') d['groupby'] = [] - d['orderby'] = [json.loads(t) for t in fd.get('order_by_cols', [])] + order_by_cols = fd.get('order_by_cols', []) or [] + d['orderby'] = [json.loads(t) for t in order_by_cols] return d def get_df(self, query_obj=None):