Merge pull request #5118 from michellethomas/add_metrics_control_sort_by

Adding the MetricsControl to the timeseries_limit_metric field
This commit is contained in:
timifasubaa 2018-06-14 10:38:32 -07:00 committed by GitHub
commit 66ffcb665a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 85 additions and 16 deletions

View File

@ -959,12 +959,14 @@ export const controls = {
},
timeseries_limit_metric: {
type: 'SelectControl',
type: 'MetricsControl',
label: t('Sort By'),
default: null,
description: t('Metric used to define the top series'),
mapStateToProps: state => ({
choices: (state.datasource) ? state.datasource.metrics_combo : [],
columns: state.datasource ? state.datasource.columns : [],
savedMetrics: state.datasource ? state.datasource.metrics : [],
datasourceType: state.datasource && state.datasource.type,
}),
},

View File

@ -17,7 +17,7 @@ function tableVis(slice, payload) {
const data = payload.data;
const fd = slice.formData;
let metrics = fd.metrics || [];
let metrics = fd.metrics.map(m => m.label || m);
// Add percent metrics
metrics = metrics.concat((fd.percent_metrics || []).map(m => '%' + m));
// Removing metrics (aggregates) that are strings
@ -187,7 +187,7 @@ function tableVis(slice, payload) {
let sortBy;
if (fd.timeseries_limit_metric) {
// Sort by as specified
sortBy = fd.timeseries_limit_metric;
sortBy = fd.timeseries_limit_metric.label || fd.timeseries_limit_metric;
} else if (metrics.length > 0) {
// If not specified, use the first metric from the list
sortBy = metrics[0];
@ -195,7 +195,7 @@ function tableVis(slice, payload) {
if (sortBy) {
datatable.column(data.columns.indexOf(sortBy)).order(fd.order_desc ? 'desc' : 'asc');
}
if (fd.timeseries_limit_metric && metrics.indexOf(fd.timeseries_limit_metric) < 0) {
if (sortBy && metrics.indexOf(sortBy) < 0) {
// Hiding the sortBy column if not in the metrics list
datatable.column(data.columns.indexOf(sortBy)).visible(false);
}

View File

@ -1068,6 +1068,18 @@ class DruidDatasource(Model, BaseDatasource):
return values
@staticmethod
def sanitize_metric_object(metric):
"""
Update a metric with the correct type if necessary.
:param dict metric: The metric to sanitize
"""
if (
utils.is_adhoc_metric(metric) and
metric['column']['type'].upper() == 'FLOAT'
):
metric['column']['type'] = 'DOUBLE'
def run_query( # noqa / druid
self,
groupby, metrics,
@ -1111,11 +1123,8 @@ class DruidDatasource(Model, BaseDatasource):
LooseVersion(self.cluster.get_druid_version()) < LooseVersion('0.11.0')
):
for metric in metrics:
if (
utils.is_adhoc_metric(metric) and
metric['column']['type'].upper() == 'FLOAT'
):
metric['column']['type'] = 'DOUBLE'
self.sanitize_metric_object(metric)
self.sanitize_metric_object(timeseries_limit_metric)
aggregations, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics,
@ -1171,7 +1180,7 @@ class DruidDatasource(Model, BaseDatasource):
logging.info('Running two-phase topn query for dimension [{}]'.format(dim))
pre_qry = deepcopy(qry)
if timeseries_limit_metric:
order_by = timeseries_limit_metric
order_by = utils.get_metric_name(timeseries_limit_metric)
aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs(
[timeseries_limit_metric],
metrics_dict)
@ -1240,7 +1249,7 @@ class DruidDatasource(Model, BaseDatasource):
order_by = pre_qry_dims[0]
if timeseries_limit_metric:
order_by = timeseries_limit_metric
order_by = utils.get_metric_name(timeseries_limit_metric)
aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs(
[timeseries_limit_metric],
metrics_dict)

View File

@ -651,6 +651,8 @@ class SqlaTable(Model, BaseDatasource):
for col, ascending in orderby:
direction = asc if ascending else desc
if utils.is_adhoc_metric(col):
col = self.adhoc_metric_to_sa(col, cols)
qry = qry.order_by(direction(col))
if row_limit:
@ -675,8 +677,15 @@ class SqlaTable(Model, BaseDatasource):
ob = inner_main_metric_expr
if timeseries_limit_metric:
timeseries_limit_metric = metrics_dict.get(timeseries_limit_metric)
ob = timeseries_limit_metric.sqla_col
if utils.is_adhoc_metric(timeseries_limit_metric):
ob = self.adhoc_metric_to_sa(timeseries_limit_metric, cols)
elif timeseries_limit_metric in metrics_dict:
timeseries_limit_metric = metrics_dict.get(
timeseries_limit_metric,
)
ob = timeseries_limit_metric.sqla_col
else:
raise Exception(_("Metric '{}' is not valid".format(m)))
direction = desc if order_desc else asc
subq = subq.order_by(direction(ob))
subq = subq.limit(timeseries_limit)

View File

@ -629,7 +629,8 @@ def load_birth_names():
'op': 'in',
'val': ['girl'],
}],
row_limit=50)),
row_limit=50,
timeseries_limit_metric='sum__num')),
Slice(
slice_name="Boys",
viz_type='table',
@ -763,6 +764,53 @@ def load_birth_names():
},
viz_type="big_number_total",
granularity_sqla="ds")),
Slice(
slice_name='Top 10 California Names Timeseries',
viz_type='line',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
metrics=[{
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num_california',
'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
},
'aggregate': 'SUM',
'label': 'SUM(num_california)',
}],
viz_type='line',
granularity_sqla='ds',
groupby=['name'],
timeseries_limit_metric={
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num_california',
'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
},
'aggregate': 'SUM',
'label': 'SUM(num_california)',
},
limit='10')),
Slice(
slice_name="Names Sorted by Num in California",
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
groupby=['name'],
row_limit=50,
timeseries_limit_metric={
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num_california',
'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
},
'aggregate': 'SUM',
'label': 'SUM(num_california)',
})),
]
for slc in slices:
merge_slice(slc)

View File

@ -516,7 +516,8 @@ class TableViz(BaseViz):
order_by_cols = fd.get('order_by_cols') or []
d['orderby'] = [json.loads(t) for t in order_by_cols]
elif sort_by:
if sort_by not in d['metrics']:
sort_by_label = utils.get_metric_name(sort_by)
if sort_by_label not in utils.get_metric_names(d['metrics']):
d['metrics'] += [sort_by]
d['orderby'] = [(sort_by, not fd.get('order_desc', True))]