Skip to content

Commit

Permalink
Merge pull request apache#52 from michellethomas/cherry_pick_annotati…
Browse files Browse the repository at this point in the history
…ons_empty_metrics

Cherry pick annotations empty metrics
  • Loading branch information
michellethomas authored Jun 14, 2018
2 parents d59e564 + 6a70796 commit 8887517
Show file tree
Hide file tree
Showing 9 changed files with 99 additions and 20 deletions.
2 changes: 1 addition & 1 deletion superset/assets/.istanbul.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
verbose: true
instrumentation:
root: './javascripts'
root: './src'
extensions: ['.js', '.jsx']
excludes: [
'dist/**'
Expand Down
6 changes: 4 additions & 2 deletions superset/assets/src/explore/controls.jsx
Original file line number Diff line number Diff line change
Expand Up @@ -959,12 +959,14 @@ export const controls = {
},

timeseries_limit_metric: {
type: 'SelectControl',
type: 'MetricsControl',
label: t('Sort By'),
default: null,
description: t('Metric used to define the top series'),
mapStateToProps: state => ({
choices: (state.datasource) ? state.datasource.metrics_combo : [],
columns: state.datasource ? state.datasource.columns : [],
savedMetrics: state.datasource ? state.datasource.metrics : [],
datasourceType: state.datasource && state.datasource.type,
}),
},

Expand Down
3 changes: 2 additions & 1 deletion superset/assets/src/visualizations/heatmap.js
Original file line number Diff line number Diff line change
Expand Up @@ -177,11 +177,12 @@ function heatmapVis(slice, payload) {
const k = d3.mouse(this);
const m = Math.floor(scale[0].invert(k[0]));
const n = Math.floor(scale[1].invert(k[1]));
const metric = typeof fd.metric === 'object' ? fd.metric.label : fd.metric;
if (m in matrix && n in matrix[m]) {
const obj = matrix[m][n];
s += '<div><b>' + fd.all_columns_x + ': </b>' + obj.x + '<div>';
s += '<div><b>' + fd.all_columns_y + ': </b>' + obj.y + '<div>';
s += '<div><b>' + fd.metric + ': </b>' + valueFormatter(obj.v) + '<div>';
s += '<div><b>' + metric + ': </b>' + valueFormatter(obj.v) + '<div>';
if (fd.show_perc) {
s += '<div><b>%: </b>' + fp(fd.normalized ? obj.rank : obj.perc) + '<div>';
}
Expand Down
6 changes: 6 additions & 0 deletions superset/assets/src/visualizations/nvd3_vis.js
Original file line number Diff line number Diff line change
Expand Up @@ -784,6 +784,12 @@ export default function nvd3Vis(slice, payload) {
});
});
}

// rerender chart appended with annotation layer
svg.datum(data)
.attr('height', height)
.attr('width', width)
.call(chart);
}
}
return chart;
Expand Down
6 changes: 3 additions & 3 deletions superset/assets/src/visualizations/table.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ function tableVis(slice, payload) {
const data = payload.data;
const fd = slice.formData;

let metrics = fd.metrics || [];
let metrics = fd.metrics.map(m => m.label || m);
// Add percent metrics
metrics = metrics.concat((fd.percent_metrics || []).map(m => '%' + m));
// Removing metrics (aggregates) that are strings
Expand Down Expand Up @@ -187,15 +187,15 @@ function tableVis(slice, payload) {
let sortBy;
if (fd.timeseries_limit_metric) {
// Sort by as specified
sortBy = fd.timeseries_limit_metric;
sortBy = fd.timeseries_limit_metric.label || fd.timeseries_limit_metric;
} else if (metrics.length > 0) {
// If not specified, use the first metric from the list
sortBy = metrics[0];
}
if (sortBy) {
datatable.column(data.columns.indexOf(sortBy)).order(fd.order_desc ? 'desc' : 'asc');
}
if (fd.timeseries_limit_metric && metrics.indexOf(fd.timeseries_limit_metric) < 0) {
if (sortBy && metrics.indexOf(sortBy) < 0) {
// Hiding the sortBy column if not in the metrics list
datatable.column(data.columns.indexOf(sortBy)).visible(false);
}
Expand Down
23 changes: 16 additions & 7 deletions superset/connectors/druid/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -1098,6 +1098,18 @@ def _dimensions_to_values(dimensions):

return values

@staticmethod
def sanitize_metric_object(metric):
"""
Update a metric with the correct type if necessary.
:param dict metric: The metric to sanitize
"""
if (
utils.is_adhoc_metric(metric) and
metric['column']['type'].upper() == 'FLOAT'
):
metric['column']['type'] = 'DOUBLE'

def run_query( # noqa / druid
self,
groupby, metrics,
Expand Down Expand Up @@ -1141,11 +1153,8 @@ def run_query( # noqa / druid
LooseVersion(self.cluster.get_druid_version()) < LooseVersion('0.11.0')
):
for metric in metrics:
if (
utils.is_adhoc_metric(metric) and
metric['column']['type'].upper() == 'FLOAT'
):
metric['column']['type'] = 'DOUBLE'
self.sanitize_metric_object(metric)
self.sanitize_metric_object(timeseries_limit_metric)

aggregations, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics,
Expand Down Expand Up @@ -1201,7 +1210,7 @@ def run_query( # noqa / druid
logging.info('Running two-phase topn query for dimension [{}]'.format(dim))
pre_qry = deepcopy(qry)
if timeseries_limit_metric:
order_by = timeseries_limit_metric
order_by = utils.get_metric_name(timeseries_limit_metric)
aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs(
[timeseries_limit_metric],
metrics_dict)
Expand Down Expand Up @@ -1270,7 +1279,7 @@ def run_query( # noqa / druid
order_by = pre_qry_dims[0]

if timeseries_limit_metric:
order_by = timeseries_limit_metric
order_by = utils.get_metric_name(timeseries_limit_metric)
aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs(
[timeseries_limit_metric],
metrics_dict)
Expand Down
13 changes: 11 additions & 2 deletions superset/connectors/sqla/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -651,6 +651,8 @@ def get_sqla_query( # sqla

for col, ascending in orderby:
direction = asc if ascending else desc
if utils.is_adhoc_metric(col):
col = self.adhoc_metric_to_sa(col, cols)
qry = qry.order_by(direction(col))

if row_limit:
Expand All @@ -675,8 +677,15 @@ def get_sqla_query( # sqla

ob = inner_main_metric_expr
if timeseries_limit_metric:
timeseries_limit_metric = metrics_dict.get(timeseries_limit_metric)
ob = timeseries_limit_metric.sqla_col
if utils.is_adhoc_metric(timeseries_limit_metric):
ob = self.adhoc_metric_to_sa(timeseries_limit_metric, cols)
elif timeseries_limit_metric in metrics_dict:
timeseries_limit_metric = metrics_dict.get(
timeseries_limit_metric,
)
ob = timeseries_limit_metric.sqla_col
else:
raise Exception(_("Metric '{}' is not valid".format(m)))
direction = desc if order_desc else asc
subq = subq.order_by(direction(ob))
subq = subq.limit(timeseries_limit)
Expand Down
50 changes: 49 additions & 1 deletion superset/data/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -626,7 +626,8 @@ def load_birth_names():
'op': 'in',
'val': ['girl'],
}],
row_limit=50)),
row_limit=50,
timeseries_limit_metric='sum__num')),
Slice(
slice_name="Boys",
viz_type='table',
Expand Down Expand Up @@ -760,6 +761,53 @@ def load_birth_names():
},
viz_type="big_number_total",
granularity_sqla="ds")),
Slice(
slice_name='Top 10 California Names Timeseries',
viz_type='line',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
metrics=[{
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num_california',
'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
},
'aggregate': 'SUM',
'label': 'SUM(num_california)',
}],
viz_type='line',
granularity_sqla='ds',
groupby=['name'],
timeseries_limit_metric={
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num_california',
'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
},
'aggregate': 'SUM',
'label': 'SUM(num_california)',
},
limit='10')),
Slice(
slice_name="Names Sorted by Num in California",
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
groupby=['name'],
row_limit=50,
timeseries_limit_metric={
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num_california',
'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
},
'aggregate': 'SUM',
'label': 'SUM(num_california)',
})),
]
for slc in slices:
merge_slice(slc)
Expand Down
10 changes: 7 additions & 3 deletions superset/viz.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ class BaseViz(object):
is_timeseries = False
default_fillna = 0
cache_type = 'df'
enforce_numerical_metrics = True

def __init__(self, datasource, form_data, force=False):
if not datasource:
Expand Down Expand Up @@ -205,7 +206,8 @@ def get_df(self, query_obj=None):
df[DTTM_ALIAS] += timedelta(hours=self.datasource.offset)
df[DTTM_ALIAS] += self.time_shift

self.df_metrics_to_num(df, query_obj.get('metrics') or [])
if self.enforce_numerical_metrics:
self.df_metrics_to_num(df, query_obj.get('metrics') or [])

df.replace([np.inf, -np.inf], np.nan)
fillna = self.get_fillna_for_columns(df.columns)
Expand Down Expand Up @@ -480,6 +482,7 @@ class TableViz(BaseViz):
verbose_name = _('Table View')
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
enforce_numerical_metrics = False

def should_be_timeseries(self):
fd = self.form_data
Expand Down Expand Up @@ -510,7 +513,8 @@ def query_obj(self):
order_by_cols = fd.get('order_by_cols') or []
d['orderby'] = [json.loads(t) for t in order_by_cols]
elif sort_by:
if sort_by not in d['metrics']:
sort_by_label = utils.get_metric_name(sort_by)
if sort_by_label not in utils.get_metric_names(d['metrics']):
d['metrics'] += [sort_by]
d['orderby'] = [(sort_by, not fd.get('order_desc', True))]

Expand Down Expand Up @@ -552,7 +556,7 @@ def get_data(self, df):
m_name = '%' + m
df[m_name] = pd.Series(metric_percents[m], name=m_name)
# Remove metrics that are not in the main metrics list
metrics = fd.get('metrics', [])
metrics = fd.get('metrics') or []
metrics = [self.get_metric_label(m) for m in metrics]
for m in filter(
lambda m: m not in metrics and m in df.columns,
Expand Down

0 comments on commit 8887517

Please sign in to comment.