[flake8] Fixing additional flake8 issue w/ the presence of ignore (#4474)

This commit is contained in:
John Bodley 2018-02-23 14:46:26 -08:00 committed by Grace Guo
parent cacf53c92e
commit 8aac63e74c
7 changed files with 51 additions and 44 deletions

1
.gitignore vendored
View File

@ -26,6 +26,7 @@ app.db
*.sqllite
.vscode
.python-version
.tox
# Node.js, webpack artifacts
*.entry.js

View File

@ -29,7 +29,7 @@ from sqlalchemy.orm import backref, relationship
from superset import conf, db, import_util, sm, utils
from superset.connectors.base.models import BaseColumn, BaseDatasource, BaseMetric
from superset.models.helpers import (
AuditMixinNullable, ImportMixin, QueryResult, set_perm,
AuditMixinNullable, ImportMixin, QueryResult, set_perm,
)
from superset.utils import (
DimSelector, DTTM_ALIAS, flasher, MetricPermException,
@ -582,11 +582,11 @@ class DruidDatasource(Model, BaseDatasource):
v1nums = (v1nums + [0, 0, 0])[:3]
v2nums = (v2nums + [0, 0, 0])[:3]
return (
v1nums[0] > v2nums[0] or
(v1nums[0] == v2nums[0] and v1nums[1] > v2nums[1]) or
(v1nums[0] == v2nums[0] and v1nums[1] == v2nums[1] and
v1nums[2] > v2nums[2])
)
v1nums[0] > v2nums[0] or
(v1nums[0] == v2nums[0] and v1nums[1] > v2nums[1]) or
(v1nums[0] == v2nums[0] and v1nums[1] == v2nums[1] and
v1nums[2] > v2nums[2])
)
def latest_metadata(self):
"""Returns segment metadata from the latest segment"""
@ -869,8 +869,8 @@ class DruidDatasource(Model, BaseDatasource):
def resolve_postagg(postagg, post_aggs, agg_names, visited_postaggs, metrics_dict):
mconf = postagg.json_obj
required_fields = set(
DruidDatasource.recursive_get_fields(mconf)
+ mconf.get('fieldNames', []))
DruidDatasource.recursive_get_fields(mconf) +
mconf.get('fieldNames', []))
# Check if the fields are already in aggs
# or is a previous postagg
required_fields = set([

View File

@ -35,8 +35,8 @@ class CsvToDatabaseForm(DynamicForm):
validators=[
FileRequired(), FileAllowed(['csv'], _('CSV Files Only!'))])
con = QuerySelectField(
query_factory=all_db_items,
get_pk=lambda a: a.id, get_label=lambda a: a.database_name)
query_factory=all_db_items,
get_pk=lambda a: a.id, get_label=lambda a: a.database_name)
sep = StringField(
_('Delimiter'),
description=_('Delimiter used by CSV file (for whitespace use \s+).'),

View File

@ -61,8 +61,9 @@ class ImportMixin(object):
if parent_ref:
parent_excludes = {c.name for c in parent_ref.local_columns}
def formatter(c): return ('{0} Default ({1})'.format(
str(c.type), c.default.arg) if c.default else str(c.type))
def formatter(c):
return ('{0} Default ({1})'.format(
str(c.type), c.default.arg) if c.default else str(c.type))
schema = {c.name: formatter(c) for c in cls.__table__.columns
if (c.name in cls.export_fields and
@ -96,7 +97,7 @@ class ImportMixin(object):
for p in parent_refs.keys():
if p not in dict_rep:
raise RuntimeError(
'{0}: Missing field {1}'.format(cls.__name__, p))
'{0}: Missing field {1}'.format(cls.__name__, p))
else:
# Set foreign keys to parent obj
for k, v in parent_refs.items():
@ -176,19 +177,22 @@ class ImportMixin(object):
if (c.name in self.export_fields and
c.name not in parent_excludes and
(include_defaults or (
getattr(self, c.name) is not None and
(not c.default or
getattr(self, c.name) != c.default.arg))))
getattr(self, c.name) is not None and
(not c.default or
getattr(self, c.name) != c.default.arg))))
}
if recursive:
for c in self.export_children:
# sorting to make lists of children stable
dict_rep[c] = sorted([child.export_to_dict(
recursive=recursive,
include_parent_ref=include_parent_ref,
include_defaults=include_defaults)
for child in getattr(self, c)],
key=lambda k: sorted(k.items()))
dict_rep[c] = sorted(
[
child.export_to_dict(
recursive=recursive,
include_parent_ref=include_parent_ref,
include_defaults=include_defaults,
) for child in getattr(self, c)
],
key=lambda k: sorted(k.items()))
return dict_rep

View File

@ -350,7 +350,7 @@ class CsvToDatabaseView(SimpleFormView):
except OSError:
pass
message = u'Table name {} already exists. Please pick another'.format(
form.name.data) if isinstance(e, IntegrityError) else text_type(e)
form.name.data) if isinstance(e, IntegrityError) else text_type(e)
flash(
message,
'danger')
@ -993,10 +993,10 @@ class Superset(BaseSupersetView):
def slice(self, slice_id):
viz_obj = self.get_viz(slice_id)
endpoint = '/superset/explore/{}/{}?form_data={}'.format(
viz_obj.datasource.type,
viz_obj.datasource.id,
parse.quote(json.dumps(viz_obj.form_data)),
)
viz_obj.datasource.type,
viz_obj.datasource.id,
parse.quote(json.dumps(viz_obj.form_data)),
)
if request.args.get('standalone') == 'true':
endpoint += '&standalone=true'
return redirect(endpoint)
@ -1098,9 +1098,9 @@ class Superset(BaseSupersetView):
'val': layer_id}]
datasource = AnnotationDatasource()
viz_obj = viz.viz_types['table'](
datasource,
form_data=form_data,
force=False,
datasource,
form_data=form_data,
force=False,
)
try:
payload = viz_obj.get_payload()

View File

@ -11,7 +11,8 @@ import yaml
from superset import db
from superset.connectors.druid.models import (
DruidColumn, DruidDatasource, DruidMetric)
DruidColumn, DruidDatasource, DruidMetric,
)
from superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn
from .base_tests import SupersetTestCase
@ -81,12 +82,12 @@ class DictImportExportTests(SupersetTestCase):
cluster_name = 'druid_test'
params = {DBREF: id, 'database_name': cluster_name}
dict_rep = {
'cluster_name': cluster_name,
'datasource_name': name,
'id': id,
'params': json.dumps(params),
'columns': [{'column_name': c} for c in cols_names],
'metrics': [{'metric_name': c} for c in metric_names],
'cluster_name': cluster_name,
'datasource_name': name,
'id': id,
'params': json.dumps(params),
'columns': [{'column_name': c} for c in cols_names],
'metrics': [{'metric_name': c} for c in metric_names],
}
datasource = DruidDatasource(
@ -180,12 +181,12 @@ class DictImportExportTests(SupersetTestCase):
imported_table = SqlaTable.import_from_dict(db.session, dict_table)
db.session.commit()
table_over, dict_table_over = self.create_table(
'table_override', id=ID_PREFIX + 3,
cols_names=['new_col1', 'col2', 'col3'],
metric_names=['new_metric1'])
'table_override', id=ID_PREFIX + 3,
cols_names=['new_col1', 'col2', 'col3'],
metric_names=['new_metric1'])
imported_over_table = SqlaTable.import_from_dict(
db.session,
dict_table_over)
db.session,
dict_table_over)
db.session.commit()
imported_over = self.get_table(imported_over_table.id)
@ -289,8 +290,8 @@ class DictImportExportTests(SupersetTestCase):
cols_names=['new_col1', 'col2', 'col3'],
metric_names=['new_metric1'])
imported_over_cluster = DruidDatasource.import_from_dict(
db.session,
table_over_dict)
db.session,
table_over_dict)
db.session.commit()
imported_over = self.get_datasource(imported_over_cluster.id)
self.assertEquals(imported_cluster.id, imported_over.id)

View File

@ -16,6 +16,7 @@ exclude =
superset/data
superset/migrations
superset/templates
ignore =
import-order-style = google
max-line-length = 90