diff --git a/CHANGELOG.txt b/CHANGELOG.txt index 21a87c73948..3a117515ddf 100644 --- a/CHANGELOG.txt +++ b/CHANGELOG.txt @@ -1,6 +1,29 @@ CKAN CHANGELOG ++++++++++++++ +v1.8 +==== + +* [#2592,#2428] Ubuntu 12.04 Precise is now supported with CKAN source install. + The source install instructions have been updated and simplified. + Some of CKAN's dependencies have been updated and some removed. +* Requirements have been updated see doc/install-from-source.rst + users will need to do a new pip install (#2592) +* [#2304] New 'follow' feature. You'll now see a 'Followers' tab on user and + dataset pages, where you can see how many users are following that user or + dataset. If you're logged in, you'll see a 'Follow' button on the pages of + datasets and other users that you can click to follow them. There are also + API calls for the follow features, see the Action API reference + documentation. +* [#2305] New user dashboards, implemented by Sven R. Kunze + (https://github.com/kunsv) as part of his Masters thesis. When logged in, if + you go to your own user page you'll see a new 'Dashboard' tab where you can + see an activity stream from of all the users and datasets that you're + following. +* [#2345] New action API reference docs. The documentation for CKAN's Action + API has been rewritten, with each function and its arguments and return + values now individually documented. + v1.7.1 2012-06-20 ================= diff --git a/ckan/config/deployment.ini_tmpl b/ckan/config/deployment.ini_tmpl index d3c0199687f..d66d6543248 100644 --- a/ckan/config/deployment.ini_tmpl +++ b/ckan/config/deployment.ini_tmpl @@ -24,7 +24,7 @@ app_instance_uuid = ${app_instance_uuid} # List the names of CKAN extensions to activate. # Note: This line is required to be here for packaging, even if it is empty. -ckan.plugins = stats synchronous_search +ckan.plugins = stats # If you'd like to fine-tune the individual locations of the cache data dirs # for the Cache data, or the Session saves, un-comment the desired settings @@ -112,6 +112,11 @@ ckan.gravatar_default = identicon ## Solr support #solr_url = http://127.0.0.1:8983/solr +## Automatic indexing. Make all changes immediately available via the search +## after editing or creating a dataset. Default is true. If for some reason +## you need the indexing to occur asynchronously, set this option to 0. +# ckan.search.automatic_indexing = 1 + ## An 'id' for the site (using, for example, when creating entries in a common search index) ## If not specified derived from the site_url # ckan.site_id = ckan.net diff --git a/ckan/config/environment.py b/ckan/config/environment.py index cc56477b79f..d388937ff33 100644 --- a/ckan/config/environment.py +++ b/ckan/config/environment.py @@ -19,10 +19,12 @@ import ckan.lib.search as search import ckan.lib.app_globals as app_globals +log = logging.getLogger(__name__) # Suppress benign warning 'Unbuilt egg for setuptools' warnings.simplefilter('ignore', UserWarning) + class _Helpers(object): ''' Helper object giving access to template helpers stopping missing functions from causing template exceptions. Useful if @@ -93,13 +95,16 @@ def load_environment(global_conf, app_conf): from pylons.wsgiapp import PylonsApp import pkg_resources find_controller_generic = PylonsApp.find_controller + # This is from pylons 1.0 source, will monkey-patch into 0.9.7 def find_controller(self, controller): if controller in self.controller_classes: return self.controller_classes[controller] # Check to see if its a dotted name if '.' in controller or ':' in controller: - mycontroller = pkg_resources.EntryPoint.parse('x=%s' % controller).load(False) + mycontroller = pkg_resources \ + .EntryPoint \ + .parse('x=%s' % controller).load(False) self.controller_classes[controller] = mycontroller return mycontroller return find_controller_generic(self, controller) @@ -122,6 +127,13 @@ def find_controller(self, controller): # load all CKAN plugins p.load_all(config) + # Load the synchronous search plugin, unless already loaded or + # explicitly disabled + if not 'synchronous_search' in config.get('ckan.plugins') and \ + asbool(config.get('ckan.search.automatic_indexing', True)): + log.debug('Loading the synchronous search plugin') + p.load('synchronous_search') + for plugin in p.PluginImplementations(p.IConfigurer): # must do update in place as this does not work: # config = plugin.update_config(config) @@ -152,11 +164,13 @@ def find_controller(self, controller): config['pylons.app_globals'] = app_globals.Globals() # add helper functions - restrict_helpers = asbool(config.get('ckan.restrict_template_vars', 'true')) + restrict_helpers = asbool( + config.get('ckan.restrict_template_vars', 'true')) helpers = _Helpers(h, restrict_helpers) config['pylons.h'] = helpers - ## redo template setup to use genshi.search_path (so remove std template setup) + # Redo template setup to use genshi.search_path + # (so remove std template setup) template_paths = [paths['templates'][0]] extra_template_paths = config.get('extra_template_paths', '') if extra_template_paths: @@ -165,6 +179,7 @@ def find_controller(self, controller): # Translator (i18n) translator = Translator(pylons.translator) + def template_loaded(template): translator.setup(template) @@ -195,8 +210,6 @@ def template_loaded(template): # # ################################################################# - - ''' This code is based on Genshi code @@ -265,19 +278,29 @@ def genshi_lookup_attr(cls, obj, key): # Setup the SQLAlchemy database engine # Suppress a couple of sqlalchemy warnings - warnings.filterwarnings('ignore', '^Unicode type received non-unicode bind param value', sqlalchemy.exc.SAWarning) - warnings.filterwarnings('ignore', "^Did not recognize type 'BIGINT' of column 'size'", sqlalchemy.exc.SAWarning) - warnings.filterwarnings('ignore', "^Did not recognize type 'tsvector' of column 'search_vector'", sqlalchemy.exc.SAWarning) + msgs = ['^Unicode type received non-unicode bind param value', + "^Did not recognize type 'BIGINT' of column 'size'", + "^Did not recognize type 'tsvector' of column 'search_vector'" + ] + for msg in msgs: + warnings.filterwarnings('ignore', msg, sqlalchemy.exc.SAWarning) - ckan_db = os.environ.get('CKAN_DB') + ckan_db = os.environ.get('CKAN_DB') if ckan_db: config['sqlalchemy.url'] = ckan_db - engine = sqlalchemy.engine_from_config(config, 'sqlalchemy.') + + # for postgresql we want to enforce utf-8 + sqlalchemy_url = config.get('sqlalchemy.url', '') + if sqlalchemy_url.startswith('postgresql://'): + extras = {'client_encoding': 'utf8'} + else: + extras = {} + + engine = sqlalchemy.engine_from_config(config, 'sqlalchemy.', **extras) if not model.meta.engine: model.init_model(engine) for plugin in p.PluginImplementations(p.IConfigurable): plugin.configure(config) - diff --git a/ckan/config/middleware.py b/ckan/config/middleware.py index f0dca730d62..05b1887b096 100644 --- a/ckan/config/middleware.py +++ b/ckan/config/middleware.py @@ -20,7 +20,7 @@ from ckan.plugins import PluginImplementations from ckan.plugins.interfaces import IMiddleware -from ckan.lib.i18n import get_locales +from ckan.lib.i18n import get_locales_from_config from ckan.config.environment import load_environment @@ -145,7 +145,7 @@ class I18nMiddleware(object): def __init__(self, app, config): self.app = app self.default_locale = config.get('ckan.locale_default', 'en') - self.local_list = get_locales() + self.local_list = get_locales_from_config() def __call__(self, environ, start_response): # strip the language selector from the requested url diff --git a/ckan/config/routing.py b/ckan/config/routing.py index 3890032817f..b529d90eed7 100644 --- a/ckan/config/routing.py +++ b/ckan/config/routing.py @@ -252,6 +252,7 @@ def make_map(): m.connect('/user/edit', action='edit') # Note: openid users have slashes in their ids, so need the wildcard # in the route. + m.connect('/user/dashboard', action='dashboard') m.connect('/user/followers/{id:.*}', action='followers') m.connect('/user/edit/{id:.*}', action='edit') m.connect('/user/reset/{id:.*}', action='perform_reset') diff --git a/ckan/config/solr/CHANGELOG.txt b/ckan/config/solr/CHANGELOG.txt index 1e4e67f6ff6..eb600e042cc 100644 --- a/ckan/config/solr/CHANGELOG.txt +++ b/ckan/config/solr/CHANGELOG.txt @@ -8,6 +8,8 @@ v1.4 - (ckan>=1.7) * Add title_string so you can sort alphabetically on title. * Fields related to analytics, access and view counts. * Add data_dict field for the whole package_dict. +* Add vocab_* dynamic field so it is possible to facet by vocabulary tags +* Add copyField for text with source vocab_* v1.3 - (ckan>=1.5.1) -------------------- diff --git a/ckan/config/solr/schema-1.4.xml b/ckan/config/solr/schema-1.4.xml index 0409e71b14b..d98b9c56f5c 100644 --- a/ckan/config/solr/schema-1.4.xml +++ b/ckan/config/solr/schema-1.4.xml @@ -153,6 +153,7 @@ + @@ -165,6 +166,7 @@ + diff --git a/ckan/controllers/admin.py b/ckan/controllers/admin.py index 6c3d48a1fd2..1c14b351939 100644 --- a/ckan/controllers/admin.py +++ b/ckan/controllers/admin.py @@ -5,7 +5,8 @@ from ckan.model.authz import Role roles = Role.get_all() -role_tuples = [(x,x) for x in roles] +role_tuples = [(x, x) for x in roles] + def get_sysadmins(): q = model.Session.query(model.SystemRole).filter_by(role=model.Role.ADMIN) @@ -18,10 +19,9 @@ def __before__(self, action, **params): if not ckan.authz.Authorizer().is_sysadmin(unicode(c.user)): abort(401, _('Need to be system administrator to administer')) c.revision_change_state_allowed = ( - c.user and - self.authorizer.is_authorized(c.user, model.Action.CHANGE_STATE, - model.Revision) - ) + c.user and self.authorizer.is_authorized(c.user, + model.Action.CHANGE_STATE, + model.Revision)) def index(self): #now pass the list of sysadmins @@ -29,7 +29,6 @@ def index(self): return render('admin/index.html') - def authz(self): def action_save_form(users_or_authz_groups): # The permissions grid has been saved @@ -37,69 +36,82 @@ def action_save_form(users_or_authz_groups): rpi = request.params.items() # The grid passes us a list of the users/roles that were displayed - submitted = [ a for (a,b) in rpi if (b == u'submitted')] + submitted = [a for (a, b) in rpi if (b == u'submitted')] # and also those which were checked - checked = [ a for (a,b) in rpi if (b == u'on')] + checked = [a for (a, b) in rpi if (b == u'on')] - # from which we can deduce true/false for each user/role combination - # that was displayed in the form - table_dict={} + # from which we can deduce true/false for each user/role + # combination that was displayed in the form + table_dict = {} for a in submitted: - table_dict[a]=False + table_dict[a] = False for a in checked: - table_dict[a]=True + table_dict[a] = True - # now we'll split up the user$role strings to make a dictionary from - # (user,role) to True/False, which tells us what we need to do. - new_user_role_dict={} - for (ur,val) in table_dict.items(): - u,r = ur.split('$') - new_user_role_dict[(u,r)] = val + # now we'll split up the user$role strings to make a dictionary + # from (user,role) to True/False, which tells us what we need to + # do. + new_user_role_dict = {} + for (ur, val) in table_dict.items(): + u, r = ur.split('$') + new_user_role_dict[(u, r)] = val # we get the current user/role assignments # and make a dictionary of them current_uors = model.Session.query(model.SystemRole).all() - if users_or_authz_groups=='users': - current_users_roles = [( uor.user.name, uor.role) for uor in current_uors if uor.user] - elif users_or_authz_groups=='authz_groups': - current_users_roles = [( uor.authorized_group.name, uor.role) for uor in current_uors if uor.authorized_group] + if users_or_authz_groups == 'users': + current_users_roles = [(uor.user.name, uor.role) + for uor in current_uors + if uor.user] + elif users_or_authz_groups == 'authz_groups': + current_users_roles = [(uor.authorized_group.name, uor.role) + for uor in current_uors + if uor.authorized_group] else: assert False, "shouldn't be here" - current_user_role_dict={} - for (u,r) in current_users_roles: - current_user_role_dict[(u,r)]=True + current_user_role_dict = {} + for (u, r) in current_users_roles: + current_user_role_dict[(u, r)] = True # and now we can loop through our dictionary of desired states # checking whether a change needs to be made, and if so making it - # WORRY: Here it seems that we have to check whether someone is already assigned - # a role, in order to avoid assigning it twice, or attempting to delete it when - # it doesn't exist. Otherwise problems occur. However this doesn't affect the - # index page, which would seem to be prone to suffer the same effect. - # Why the difference? + # WORRY: Here it seems that we have to check whether someone is + # already assigned a role, in order to avoid assigning it twice, + # or attempting to delete it when it doesn't exist. Otherwise + # problems occur. However this doesn't affect the index page, + # which would seem to be prone to suffer the same effect. Why + # the difference? - if users_or_authz_groups=='users': - for ((u,r), val) in new_user_role_dict.items(): + if users_or_authz_groups == 'users': + for ((u, r), val) in new_user_role_dict.items(): if val: - if not ((u,r) in current_user_role_dict): - model.add_user_to_role(model.User.by_name(u),r,model.System()) + if not ((u, r) in current_user_role_dict): + model.add_user_to_role( + model.User.by_name(u), r, + model.System()) else: - if ((u,r) in current_user_role_dict): - model.remove_user_from_role(model.User.by_name(u),r,model.System()) - elif users_or_authz_groups=='authz_groups': - for ((u,r), val) in new_user_role_dict.items(): + if ((u, r) in current_user_role_dict): + model.remove_user_from_role( + model.User.by_name(u), r, + model.System()) + elif users_or_authz_groups == 'authz_groups': + for ((u, r), val) in new_user_role_dict.items(): if val: - if not ((u,r) in current_user_role_dict): - model.add_authorization_group_to_role(model.AuthorizationGroup.by_name(u),r,model.System()) + if not ((u, r) in current_user_role_dict): + model.add_authorization_group_to_role( + model.AuthorizationGroup.by_name(u), r, + model.System()) else: - if ((u,r) in current_user_role_dict): - model.remove_authorization_group_from_role(model.AuthorizationGroup.by_name(u),r,model.System()) + if ((u, r) in current_user_role_dict): + model.remove_authorization_group_from_role( + model.AuthorizationGroup.by_name(u), r, + model.System()) else: assert False, "shouldn't be here" - # finally commit the change to the database model.Session.commit() h.flash_success(_("Changes Saved")) @@ -110,77 +122,80 @@ def action_save_form(users_or_authz_groups): if ('authz_save' in request.POST): action_save_form('authz_groups') - - - def action_add_form(users_or_authz_groups): # The user is attempting to set new roles for a named user new_user = request.params.get('new_user_name') # this is the list of roles whose boxes were ticked - checked_roles = [ a for (a,b) in request.params.items() if (b == u'on')] - # this is the list of all the roles that were in the submitted form - submitted_roles = [ a for (a,b) in request.params.items() if (b == u'submitted')] + checked_roles = [a for (a, b) in request.params.items() + if (b == u'on')] + # this is the list of all the roles that were in the submitted + # form + submitted_roles = [a for (a, b) in request.params.items() + if (b == u'submitted')] # from this we can make a dictionary of the desired states # i.e. true for the ticked boxes, false for the unticked desired_roles = {} for r in submitted_roles: - desired_roles[r]=False + desired_roles[r] = False for r in checked_roles: - desired_roles[r]=True + desired_roles[r] = True - # again, in order to avoid either creating a role twice or deleting one which is - # non-existent, we need to get the users' current roles (if any) + # again, in order to avoid either creating a role twice or + # deleting one which is non-existent, we need to get the users' + # current roles (if any) current_uors = model.Session.query(model.SystemRole).all() - if users_or_authz_groups=='users': - current_roles = [uor.role for uor in current_uors if ( uor.user and uor.user.name == new_user )] + if users_or_authz_groups == 'users': + current_roles = [uor.role for uor in current_uors + if (uor.user and uor.user.name == new_user)] user_object = model.User.by_name(new_user) - if user_object==None: - # The submitted user does not exist. Bail with flash message - h.flash_error(_('unknown user:') + str (new_user)) + if user_object is None: + # The submitted user does not exist. Bail with flash + # message + h.flash_error(_('unknown user:') + str(new_user)) else: - # Whenever our desired state is different from our current state, change it. - for (r,val) in desired_roles.items(): + # Whenever our desired state is different from our + # current state, change it. + for (r, val) in desired_roles.items(): if val: if (r not in current_roles): - model.add_user_to_role(user_object, r, model.System()) + model.add_user_to_role(user_object, r, + model.System()) else: if (r in current_roles): - model.remove_user_from_role(user_object, r, model.System()) + model.remove_user_from_role(user_object, r, + model.System()) h.flash_success(_("User Added")) - elif users_or_authz_groups=='authz_groups': - current_roles = [uor.role for uor in current_uors if ( uor.authorized_group and uor.authorized_group.name == new_user )] + elif users_or_authz_groups == 'authz_groups': + current_roles = [uor.role for uor in current_uors + if (uor.authorized_group and + uor.authorized_group.name == new_user)] user_object = model.AuthorizationGroup.by_name(new_user) - if user_object==None: - # The submitted user does not exist. Bail with flash message - h.flash_error(_('unknown authorization group:') + str (new_user)) + if user_object is None: + # The submitted user does not exist. Bail with flash + # message + h.flash_error(_('unknown authorization group:') + + str(new_user)) else: - # Whenever our desired state is different from our current state, change it. - for (r,val) in desired_roles.items(): + # Whenever our desired state is different from our + # current state, change it. + for (r, val) in desired_roles.items(): if val: if (r not in current_roles): - model.add_authorization_group_to_role(user_object, r, model.System()) + model.add_authorization_group_to_role( + user_object, r, model.System()) else: if (r in current_roles): - model.remove_authorization_group_from_role(user_object, r, model.System()) + model.remove_authorization_group_from_role( + user_object, r, model.System()) h.flash_success(_("Authorization Group Added")) - else: assert False, "shouldn't be here" - - - - - - - - - # and finally commit all these changes to the database model.Session.commit() @@ -189,41 +204,40 @@ def action_add_form(users_or_authz_groups): if 'authz_add' in request.POST: action_add_form('authz_groups') - # ================= # Display the page - - # Find out all the possible roles. For the system object that's just all of them. + # Find out all the possible roles. For the system object that's just + # all of them. possible_roles = Role.get_all() # get the list of users who have roles on the System, with their roles uors = model.Session.query(model.SystemRole).all() # uniquify and sort users = sorted(list(set([uor.user.name for uor in uors if uor.user]))) - authz_groups = sorted(list(set([uor.authorized_group.name for uor in uors if uor.authorized_group]))) + authz_groups = sorted(list(set([uor.authorized_group.name + for uor in uors if uor.authorized_group]))) # make a dictionary from (user, role) to True, False - users_roles = [( uor.user.name, uor.role) for uor in uors if uor.user] - user_role_dict={} + users_roles = [(uor.user.name, uor.role) for uor in uors if uor.user] + user_role_dict = {} for u in users: for r in possible_roles: - if (u,r) in users_roles: - user_role_dict[(u,r)]=True + if (u, r) in users_roles: + user_role_dict[(u, r)] = True else: - user_role_dict[(u,r)]=False - + user_role_dict[(u, r)] = False - # and similarly make a dictionary from (authz_group, role) to True, False - authz_groups_roles = [( uor.authorized_group.name, uor.role) for uor in uors if uor.authorized_group] - authz_groups_role_dict={} + # and similarly make a dictionary from (authz_group, role) to + # True, False + authz_groups_roles = [(uor.authorized_group.name, uor.role) + for uor in uors if uor.authorized_group] + authz_groups_role_dict = {} for u in authz_groups: for r in possible_roles: - if (u,r) in authz_groups_roles: - authz_groups_role_dict[(u,r)]=True + if (u, r) in authz_groups_roles: + authz_groups_role_dict[(u, r)] = True else: - authz_groups_role_dict[(u,r)]=False - - + authz_groups_role_dict[(u, r)] = False # pass these variables to the template for rendering c.roles = possible_roles @@ -234,16 +248,18 @@ def action_add_form(users_or_authz_groups): c.authz_groups = authz_groups c.authz_groups_role_dict = authz_groups_role_dict - c.are_any_authz_groups = bool(model.Session.query(model.AuthorizationGroup).count()) + count = model.Session.query(model.AuthorizationGroup).count() + c.are_any_authz_groups = bool(count) return render('admin/authz.html') def trash(self): c.deleted_revisions = model.Session.query( - model.Revision).filter_by(state=model.State.DELETED) + model.Revision).filter_by(state=model.State.DELETED) c.deleted_packages = model.Session.query( - model.Package).filter_by(state=model.State.DELETED) - if not request.params or (len(request.params) == 1 and '__no_cache__' in request.params): + model.Package).filter_by(state=model.State.DELETED) + if not request.params or (len(request.params) == 1 and '__no_cache__' + in request.params): return render('admin/trash.html') else: # NB: we repeat retrieval of of revisions @@ -252,38 +268,44 @@ def trash(self): # purge packages) of form: "this object already exists in the # session" msgs = [] - if ('purge-packages' in request.params) or ('purge-revisions' in request.params): + if ('purge-packages' in request.params) or ('purge-revisions' in + request.params): if 'purge-packages' in request.params: revs_to_purge = [] for pkg in c.deleted_packages: - revisions = [ x[0] for x in pkg.all_related_revisions ] - # ensure no accidental purging of other(non-deleted) packages - # initially just avoided purging revisions where - # non-deleted packages were affected + revisions = [x[0] for x in pkg.all_related_revisions] + # ensure no accidental purging of other(non-deleted) + # packages initially just avoided purging revisions + # where non-deleted packages were affected # however this lead to confusing outcomes e.g. - # we succesfully deleted revision in which package was deleted (so package - # now active again) but no other revisions + # we succesfully deleted revision in which package + # was deleted (so package now active again) but no + # other revisions problem = False for r in revisions: - affected_pkgs = set(r.packages).difference(set(c.deleted_packages)) + affected_pkgs = set(r.packages).\ + difference(set(c.deleted_packages)) if affected_pkgs: msg = _('Cannot purge package %s as ' - 'associated revision %s includes non-deleted packages %s') - msg = msg % (pkg.id, r.id, [pkg.id for r in affected_pkgs]) + 'associated revision %s includes ' + 'non-deleted packages %s') + msg = msg % (pkg.id, r.id, [pkg.id for r + in affected_pkgs]) msgs.append(msg) problem = True break if not problem: - revs_to_purge += [ r.id for r in revisions ] + revs_to_purge += [r.id for r in revisions] model.Session.remove() else: - revs_to_purge = [ rev.id for rev in c.deleted_revisions ] + revs_to_purge = [rev.id for rev in c.deleted_revisions] revs_to_purge = list(set(revs_to_purge)) for id in revs_to_purge: revision = model.Session.query(model.Revision).get(id) try: - # TODO deleting the head revision corrupts the edit page - # Ensure that whatever 'head' pointer is used gets moved down to the next revision + # TODO deleting the head revision corrupts the edit + # page Ensure that whatever 'head' pointer is used + # gets moved down to the next revision model.repo.purge_revision(revision, leave_record=False) except Exception, inst: msg = _('Problem purging revision %s: %s') % (id, inst) @@ -295,4 +317,3 @@ def trash(self): for msg in msgs: h.flash_error(msg) h.redirect_to(h.url_for('ckanadmin', action='trash')) - diff --git a/ckan/controllers/api.py b/ckan/controllers/api.py index ad6d8410e19..66754fdb1f2 100644 --- a/ckan/controllers/api.py +++ b/ckan/controllers/api.py @@ -34,7 +34,9 @@ 'text': 'text/plain;charset=utf-8', 'html': 'text/html;charset=utf-8', 'json': 'application/json;charset=utf-8', - } +} + + class ApiController(base.BaseController): _actions = {} @@ -50,10 +52,11 @@ def __call__(self, environ, start_response): self._identify_user() try: - context = {'model':model,'user': c.user or c.author} - logic.check_access('site_read',context) + context = {'model': model, 'user': c.user or c.author} + logic.check_access('site_read', context) except NotAuthorized: - response_msg = self._finish(403, _('Not authorized to see this page')) + response_msg = self._finish(403, + _('Not authorized to see this page')) # Call start_response manually instead of the parent __call__ # because we want to end the request instead of continuing. response_msg = response_msg.encode('utf8') @@ -83,9 +86,9 @@ def _finish(self, status_int, response_data=None, else: response_msg = response_data # Support "JSONP" callback. - if status_int==200 and request.params.has_key('callback') and \ - (request.method == 'GET' or \ - c.logic_function and request.method == 'POST'): + if status_int == 200 and 'callback' in request.params and \ + (request.method == 'GET' or + c.logic_function and request.method == 'POST'): # escape callback to remove '<', '&', '>' chars callback = cgi.escape(request.params['callback']) response_msg = self._wrap_jsonp(callback, response_msg) @@ -133,8 +136,9 @@ def _set_response_header(self, name, value): try: value = str(value) except Exception, inst: - msg = "Couldn't convert '%s' header value '%s' to string: %s" % (name, value, inst) - raise Exception, msg + msg = "Couldn't convert '%s' header value '%s' to string: %s" % \ + (name, value, inst) + raise Exception(msg) response.headers[name] = value def get_api(self, ver=None): @@ -143,19 +147,21 @@ def get_api(self, ver=None): return self._finish_ok(response_data) def action(self, logic_function, ver=None): - function = get_action(logic_function) - if not function: + try: + function = get_action(logic_function) + except KeyError: log.error('Can\'t find logic function: %s' % logic_function) return self._finish_bad_request( gettext('Action name not known: %s') % str(logic_function)) context = {'model': model, 'session': model.Session, 'user': c.user, - 'api_version':ver} + 'api_version': ver} model.Session()._context = context return_dict = {'help': function.__doc__} try: side_effect_free = getattr(function, 'side_effect_free', False) - request_data = self._get_request_data(try_url_params=side_effect_free) + request_data = self._get_request_data(try_url_params= + side_effect_free) except ValueError, inst: log.error('Bad request data: %s' % str(inst)) return self._finish_bad_request( @@ -164,8 +170,8 @@ def action(self, logic_function, ver=None): # this occurs if request_data is blank log.error('Bad request data - not dict: %r' % request_data) return self._finish_bad_request( - gettext('Bad request data: %s') % \ - 'Request data JSON decoded to %r but ' \ + gettext('Bad request data: %s') % + 'Request data JSON decoded to %r but ' 'it needs to be a dictionary.' % request_data) try: result = function(context, request_data) @@ -174,8 +180,8 @@ def action(self, logic_function, ver=None): except DataError, e: log.error('Format incorrect: %s - %s' % (e.error, request_data)) #TODO make better error message - return self._finish(400, _(u'Integrity Error') + \ - ': %s - %s' % (e.error, request_data)) + return self._finish(400, _(u'Integrity Error') + + ': %s - %s' % (e.error, request_data)) except NotAuthorized: return_dict['error'] = {'__type': 'Authorization Error', 'message': _('Access denied')} @@ -197,25 +203,27 @@ def action(self, logic_function, ver=None): return self._finish(409, return_dict, content_type='json') except logic.ParameterError, e: return_dict['error'] = {'__type': 'Parameter Error', - 'message': '%s: %s' % \ + 'message': '%s: %s' % (_('Parameter Error'), e.extra_msg)} return_dict['success'] = False log.error('Parameter error: %r' % e.extra_msg) return self._finish(409, return_dict, content_type='json') except search.SearchQueryError, e: return_dict['error'] = {'__type': 'Search Query Error', - 'message': 'Search Query is invalid: %r' % e.args } + 'message': 'Search Query is invalid: %r' % + e.args} return_dict['success'] = False return self._finish(400, return_dict, content_type='json') except search.SearchError, e: return_dict['error'] = {'__type': 'Search Error', - 'message': 'Search error: %r' % e.args } + 'message': 'Search error: %r' % e.args} return_dict['success'] = False return self._finish(409, return_dict, content_type='json') return self._finish_ok(return_dict) def _get_action_from_map(self, action_map, register, subregister): - # Helper function to get the action function specified in the action map + ''' Helper function to get the action function specified in + the action map''' # translate old package calls to use dataset if register == 'package': @@ -243,6 +251,7 @@ def list(self, ver=None, register=None, subregister=None, id=None): ('dataset', 'activity'): 'package_activity_list', ('group', 'activity'): 'group_activity_list', ('user', 'activity'): 'user_activity_list', + ('user', 'dashboard_activity'): 'dashboard_activity_list', ('activity', 'details'): 'activity_detail_list', } @@ -258,7 +267,8 @@ def list(self, ver=None, register=None, subregister=None, id=None): except NotAuthorized: return self._finish_not_authz() - def show(self, ver=None, register=None, subregister=None, id=None, id2=None): + def show(self, ver=None, register=None, subregister=None, + id=None, id2=None): action_map = { 'revision': 'revision_show', 'group': 'group_show_rest', @@ -289,15 +299,17 @@ def show(self, ver=None, register=None, subregister=None, id=None, id2=None): return self._finish_not_authz() def _represent_package(self, package): - return package.as_dict(ref_package_by=self.ref_package_by, ref_group_by=self.ref_group_by) + return package.as_dict(ref_package_by=self.ref_package_by, + ref_group_by=self.ref_group_by) - def create(self, ver=None, register=None, subregister=None, id=None, id2=None): + def create(self, ver=None, register=None, subregister=None, + id=None, id2=None): action_map = { - 'group': 'group_create_rest', - 'dataset': 'package_create_rest', - 'rating': 'rating_create', - 'related': 'related_create', + 'group': 'group_create_rest', + 'dataset': 'package_create_rest', + 'rating': 'rating_create', + 'related': 'related_create', ('dataset', 'relationships'): 'package_relationship_create_rest', } for type in model.PackageRelationship.get_all_types(): @@ -317,14 +329,15 @@ def create(self, ver=None, register=None, subregister=None, id=None, id2=None): action = self._get_action_from_map(action_map, register, subregister) if not action: return self._finish_bad_request( - gettext('Cannot create new entity of this type: %s %s') % \ + gettext('Cannot create new entity of this type: %s %s') % (register, subregister)) try: response_data = action(context, data_dict) location = None if "id" in data_dict: - location = str('%s/%s' % (request.path.replace('package', 'dataset'), + location = str('%s/%s' % (request.path.replace('package', + 'dataset'), data_dict.get("id"))) return self._finish_ok(response_data, resource_location=location) @@ -339,19 +352,23 @@ def create(self, ver=None, register=None, subregister=None, id=None, id2=None): except DataError, e: log.error('Format incorrect: %s - %s' % (e.error, request_data)) #TODO make better error message - return self._finish(400, _(u'Integrity Error') + \ - ': %s - %s' % (e.error, request_data)) + return self._finish(400, _(u'Integrity Error') + + ': %s - %s' % (e.error, request_data)) except search.SearchIndexError: - log.error('Unable to add package to search index: %s' % request_data) - return self._finish(500, _(u'Unable to add package to search index') % request_data) + log.error('Unable to add package to search index: %s' % + request_data) + return self._finish(500, + _(u'Unable to add package to search index') % + request_data) except: model.Session.rollback() raise - def update(self, ver=None, register=None, subregister=None, id=None, id2=None): + def update(self, ver=None, register=None, subregister=None, + id=None, id2=None): action_map = { - 'dataset': 'package_update_rest', - 'group': 'group_update_rest', + 'dataset': 'package_update_rest', + 'group': 'group_update_rest', ('dataset', 'relationships'): 'package_relationship_update_rest', } for type in model.PackageRelationship.get_all_types(): @@ -371,8 +388,8 @@ def update(self, ver=None, register=None, subregister=None, id=None, id2=None): action = self._get_action_from_map(action_map, register, subregister) if not action: return self._finish_bad_request( - gettext('Cannot update entity of this type: %s') % \ - register.encode('utf-8')) + gettext('Cannot update entity of this type: %s') % + register.encode('utf-8')) try: response_data = action(context, data_dict) return self._finish_ok(response_data) @@ -387,17 +404,19 @@ def update(self, ver=None, register=None, subregister=None, id=None, id2=None): except DataError, e: log.error('Format incorrect: %s - %s' % (e.error, request_data)) #TODO make better error message - return self._finish(400, _(u'Integrity Error') + \ - ': %s - %s' % (e.error, request_data)) + return self._finish(400, _(u'Integrity Error') + + ': %s - %s' % (e.error, request_data)) except search.SearchIndexError: log.error('Unable to update search index: %s' % request_data) - return self._finish(500, _(u'Unable to update search index') % request_data) + return self._finish(500, _(u'Unable to update search index') % + request_data) - def delete(self, ver=None, register=None, subregister=None, id=None, id2=None): + def delete(self, ver=None, register=None, subregister=None, + id=None, id2=None): action_map = { - 'group': 'group_delete', - 'dataset': 'package_delete', - 'related': 'related_delete', + 'group': 'group_delete', + 'dataset': 'package_delete', + 'related': 'related_delete', ('dataset', 'relationships'): 'package_relationship_delete_rest', } for type in model.PackageRelationship.get_all_types(): @@ -413,7 +432,7 @@ def delete(self, ver=None, register=None, subregister=None, id=None, id2=None): action = self._get_action_from_map(action_map, register, subregister) if not action: return self._finish_bad_request( - gettext('Cannot delete entity of this type: %s %s') %\ + gettext('Cannot delete entity of this type: %s %s') % (register, subregister or '')) try: response_data = action(context, data_dict) @@ -432,7 +451,7 @@ def search(self, ver=None, register=None): log.debug('search %s params: %r' % (register, request.params)) if register == 'revision': since_time = None - if request.params.has_key('since_id'): + if 'since_id' in request.params: id = request.params['since_id'] if not id: return self._finish_bad_request( @@ -442,7 +461,7 @@ def search(self, ver=None, register=None): return self._finish_not_found( gettext(u'There is no revision with id: %s') % id) since_time = rev.timestamp - elif request.params.has_key('since_time'): + elif 'since_time' in request.params: since_time_str = request.params['since_time'] try: since_time = h.date_str_to_datetime(since_time_str) @@ -450,8 +469,10 @@ def search(self, ver=None, register=None): return self._finish_bad_request('ValueError: %s' % inst) else: return self._finish_bad_request( - gettext("Missing search term ('since_id=UUID' or 'since_time=TIMESTAMP')")) - revs = model.Session.query(model.Revision).filter(model.Revision.timestamp>since_time) + gettext("Missing search term ('since_id=UUID' or " + + " 'since_time=TIMESTAMP')")) + revs = model.Session.query(model.Revision).\ + filter(model.Revision.timestamp > since_time) return self._finish_ok([rev.id for rev in revs]) elif register in ['dataset', 'package', 'resource']: try: @@ -482,7 +503,7 @@ def search(self, ver=None, register=None): for field, value in params.items(): field = field.strip() if field in search.DEFAULT_OPTIONS.keys() or \ - field in IGNORE_FIELDS: + field in IGNORE_FIELDS: continue values = [value] if isinstance(value, list): @@ -491,14 +512,18 @@ def search(self, ver=None, register=None): query_fields.add(field, v) results = query.run( - query=params.get('q'), fields=query_fields, options=options + query=params.get('q'), + fields=query_fields, + options=options ) else: # For package searches in API v3 and higher, we can pass # parameters straight to Solr. if ver in [1, 2]: - # Otherwise, put all unrecognised ones into the q parameter - params = search.convert_legacy_parameters_to_solr(params) + # Otherwise, put all unrecognised ones into the q + # parameter + params = search.\ + convert_legacy_parameters_to_solr(params) query = search.query_for(model.Package) results = query.run(params) return self._finish_ok(results) @@ -512,20 +537,23 @@ def search(self, ver=None, register=None): @classmethod def _get_search_params(cls, request_params): - if request_params.has_key('qjson'): + if 'qjson' in request_params: try: params = h.json.loads(request_params['qjson'], encoding='utf8') except ValueError, e: - raise ValueError, gettext('Malformed qjson value') + ': %r' % e + raise ValueError(gettext('Malformed qjson value') + ': %r' + % e) elif len(request_params) == 1 and \ - len(request_params.values()[0]) < 2 and \ - request_params.keys()[0].startswith('{'): + len(request_params.values()[0]) < 2 and \ + request_params.keys()[0].startswith('{'): # e.g. {some-json}='1' or {some-json}='' params = h.json.loads(request_params.keys()[0], encoding='utf8') else: params = request_params if not isinstance(params, (UnicodeMultiDict, dict)): - raise ValueError, _('Request params must be in form of a json encoded dictionary.') + msg = _('Request params must be in form ' + + 'of a json encoded dictionary.') + raise ValueError(msg) return params def markdown(self, ver=None): @@ -559,7 +587,8 @@ def _calc_throughput(self, ver=None): for t in range(0, period): expr = '%s/%s*' % ( timing_cache_path, - (datetime.datetime.now() - datetime.timedelta(0,t)).isoformat()[0:19], + (datetime.datetime.now() - + datetime.timedelta(0, t)).isoformat()[0:19], ) call_count += len(glob.glob(expr)) # Todo: Clear old records. @@ -574,9 +603,9 @@ def user_autocomplete(self): context = {'model': model, 'session': model.Session, 'user': c.user or c.author} - data_dict = {'q':q,'limit':limit} + data_dict = {'q': q, 'limit': limit} - user_list = get_action('user_autocomplete')(context,data_dict) + user_list = get_action('user_autocomplete')(context, data_dict) return user_list @jsonp.jsonpify @@ -591,16 +620,17 @@ def group_autocomplete(self): limit = min(50, limit) query = model.Group.search_by_name_or_title(q, t) + def convert_to_dict(user): out = {} for k in ['id', 'name', 'title']: out[k] = getattr(user, k) return out + query = query.limit(limit) out = map(convert_to_dict, query.all()) return out - @jsonp.jsonpify def authorizationgroup_autocomplete(self): q = request.params.get('q', '') @@ -612,11 +642,13 @@ def authorizationgroup_autocomplete(self): limit = min(50, limit) query = model.AuthorizationGroup.search(q) + def convert_to_dict(user): out = {} for k in ['id', 'name']: out[k] = getattr(user, k) return out + query = query.limit(limit) out = map(convert_to_dict, query.all()) return out @@ -626,13 +658,13 @@ def is_slug_valid(self): slugtype = request.params.get('type') or '' # TODO: We need plugins to be able to register new disallowed names disallowed = ['new', 'edit', 'search'] - if slugtype==u'package': + if slugtype == u'package': response_data = dict(valid=not bool(common.package_exists(slug) - or slug in disallowed )) + or slug in disallowed)) return self._finish_ok(response_data) - if slugtype==u'group': + if slugtype == u'group': response_data = dict(valid=not bool(common.group_exists(slug) or - slug in disallowed )) + slug in disallowed)) return self._finish_ok(response_data) return self._finish_bad_request('Bad slug type: %s' % slugtype) @@ -647,7 +679,8 @@ def dataset_autocomplete(self): data_dict = {'q': q, 'limit': limit} - package_dicts = get_action('package_autocomplete')(context, data_dict) + package_dicts = get_action('package_autocomplete')(context, + data_dict) resultSet = {'ResultSet': {'Result': package_dicts}} return self._finish_ok(resultSet) @@ -706,9 +739,9 @@ def munge_tag(self): def format_icon(self): f = request.params.get('format') out = { - 'format' : f, - 'icon' : h.icon_url(h.format_icon(f)) - } + 'format': f, + 'icon': h.icon_url(h.format_icon(f)) + } return self._finish_ok(out) def status(self): diff --git a/ckan/controllers/authorization_group.py b/ckan/controllers/authorization_group.py index fa1b91aafb6..e1b4b846ac2 100644 --- a/ckan/controllers/authorization_group.py +++ b/ckan/controllers/authorization_group.py @@ -8,20 +8,22 @@ from ckan.lib.helpers import Page from ckan.logic import NotAuthorized, check_access + class AuthorizationGroupController(BaseController): - + def __init__(self): BaseController.__init__(self) - + def index(self): from ckan.lib.helpers import Page try: - context = {'model':model,'user': c.user or c.author} - check_access('site_read',context) + context = {'model': model, 'user': c.user or c.author} + check_access('site_read', context) except NotAuthorized: abort(401, _('Not authorized to see this page')) - query = ckan.authz.Authorizer().authorized_query(c.user, model.AuthorizationGroup) + query = ckan.authz.Authorizer().authorized_query( + c.user, model.AuthorizationGroup) query = query.options(eagerload_all('users')) c.page = Page( collection=query, @@ -32,19 +34,20 @@ def index(self): def _get_authgroup_by_name_or_id(self, id): return model.AuthorizationGroup.by_name(id) or\ - model.Session.query(model.AuthorizationGroup).get(id) + model.Session.query(model.AuthorizationGroup).get(id) def read(self, id): c.authorization_group = self._get_authgroup_by_name_or_id(id) if c.authorization_group is None: abort(404) - auth_for_read = self.authorizer.am_authorized(c, model.Action.READ, + auth_for_read = self.authorizer.am_authorized(c, model.Action.READ, c.authorization_group) if not auth_for_read: abort(401, _('Not authorized to read %s') % id.encode('utf8')) - + import ckan.misc - c.authorization_group_admins = self.authorizer.get_admins(c.authorization_group) + c.authorization_group_admins = self.authorizer.get_admins( + c.authorization_group) c.page = Page( collection=c.authorization_group.users, @@ -56,16 +59,17 @@ def read(self, id): def new(self): record = model.AuthorizationGroup c.error = '' - - auth_for_create = self.authorizer.am_authorized(c, model.Action.AUTHZ_GROUP_CREATE, model.System()) + + auth_for_create = self.authorizer.am_authorized( + c, model.Action.AUTHZ_GROUP_CREATE, model.System()) if not auth_for_create: abort(401, _('Unauthorized to create a group')) - + is_admin = self.authorizer.is_sysadmin(c.user) - + fs = ckan.forms.get_authorization_group_fieldset(is_admin=is_admin) - if request.params.has_key('save'): + if 'save' in request.params: # needed because request is nested # multidict which is read only params = dict(request.params) @@ -78,41 +82,48 @@ def new(self): return render('authorization_group/edit.html') # do not use groupname from id as may have changed c.authzgroupname = c.fs.name.value - authorization_group = model.AuthorizationGroup.by_name(c.authzgroupname) + authorization_group = model.AuthorizationGroup.by_name( + c.authzgroupname) assert authorization_group user = model.User.by_name(c.user) model.setup_default_user_roles(authorization_group, [user]) - users = [model.User.by_name(name) for name in \ + users = [model.User.by_name(name) for name in request.params.getall('AuthorizationGroup-users-current')] authorization_group.users = list(set(users)) - usernames = request.params.getall('AuthorizationGroupUser--user_name') + usernames = request.params.getall( + 'AuthorizationGroupUser--user_name') for username in usernames: if username: usr = model.User.by_name(username) if usr and usr not in authorization_group.users: - model.add_user_to_authorization_group(usr, authorization_group, model.Role.READER) + model.add_user_to_authorization_group( + usr, authorization_group, model.Role.READER) model.repo.commit_and_remove() - h.redirect_to(controller='authorization_group', action='read', id=c.authzgroupname) + h.redirect_to(controller='authorization_group', action='read', + id=c.authzgroupname) c.form = self._render_edit_form(fs) return render('authorization_group/new.html') - def edit(self, id=None): # allow id=None to allow posting + def edit(self, id=None): + # allow id=None to allow posting c.error = '' authorization_group = self._get_authgroup_by_name_or_id(id) if authorization_group is None: abort(404, '404 Not Found') - am_authz = self.authorizer.am_authorized(c, model.Action.EDIT, authorization_group) + am_authz = self.authorizer.am_authorized(c, model.Action.EDIT, + authorization_group) if not am_authz: abort(401, _('User %r not authorized to edit %r') % (c.user, id)) - + is_admin = self.authorizer.is_sysadmin(c.user) - + if not 'save' in request.params: c.authorization_group = authorization_group c.authorization_group_name = authorization_group.name - - fs = ckan.forms.get_authorization_group_fieldset(is_admin=is_admin).bind(authorization_group) + + fs = ckan.forms.get_authorization_group_fieldset( + is_admin=is_admin).bind(authorization_group) c.form = self._render_edit_form(fs) return render('authorization_group/edit.html') else: @@ -133,17 +144,20 @@ def edit(self, id=None): # allow id=None to allow posting c.form = self._render_edit_form(fs) return render('authorization_group/edit.html') user = model.User.by_name(c.user) - users = [model.User.by_name(name) for name in \ + users = [model.User.by_name(name) for name in request.params.getall('AuthorizationGroup-users-current')] authorization_group.users = list(set(users)) - usernames = request.params.getall('AuthorizationGroupUser--user_name') + usernames = request.params.\ + getall('AuthorizationGroupUser--user_name') for username in usernames: if username: usr = model.User.by_name(username) if usr and usr not in authorization_group.users: - model.add_user_to_authorization_group(usr, authorization_group, model.Role.READER) + model.add_user_to_authorization_group( + usr, authorization_group, model.Role.READER) model.repo.commit_and_remove() - h.redirect_to(controller='authorization_group', action='read', id=c.authorization_group_name) + h.redirect_to(controller='authorization_group', action='read', + id=c.authorization_group_name) def authz(self, id): authorization_group = self._get_authgroup_by_name_or_id(id) @@ -153,16 +167,17 @@ def authz(self, id): c.authorization_group_name = authorization_group.name c.authorization_group = authorization_group - c.authz_editable = self.authorizer.am_authorized(c, model.Action.EDIT_PERMISSIONS, - authorization_group) + c.authz_editable = self.authorizer.am_authorized( + c, model.Action.EDIT_PERMISSIONS, authorization_group) if not c.authz_editable: - abort(401, gettext('User %r not authorized to edit %s authorizations') % (c.user, id)) + abort(401, + gettext('User %r not authorized to edit %s authorizations') + % (c.user, id)) roles = self._handle_update_of_authz(authorization_group) self._prepare_authz_info_for_render(roles) return render('authorization_group/authz.html') - def _render_edit_form(self, fs): # errors arrive in c.error and fs.errors c.fieldset = fs diff --git a/ckan/controllers/datastore.py b/ckan/controllers/datastore.py index bd5e2d124e2..b8199c8f7ff 100644 --- a/ckan/controllers/datastore.py +++ b/ckan/controllers/datastore.py @@ -1,9 +1,10 @@ from ckan.lib.base import BaseController, abort, _, c, response, request, g import ckan.model as model -from ckan.lib.helpers import json from ckan.lib.jsonp import jsonpify from ckan.logic import get_action, check_access -from ckan.logic import NotFound, NotAuthorized, ValidationError +from ckan.logic import NotFound, NotAuthorized + + class DatastoreController(BaseController): def _make_redirect(self, id, url=''): @@ -20,10 +21,6 @@ def read(self, id, url=''): try: resource = get_action('resource_show')(context, {'id': id}) - if not resource.get('webstore_url', ''): - return { - 'error': 'DataStore is disabled for this resource' - } self._make_redirect(id, url) return '' except NotFound: @@ -36,19 +33,15 @@ def write(self, id, url): context = {'model': model, 'session': model.Session, 'user': c.user or c.author} try: - resource = model.Resource.get(id) - if not resource: - abort(404, _('Resource not found')) - if not resource.webstore_url: - return { - 'error': 'DataStore is disabled for this resource' - } - context["resource"] = resource check_access('resource_update', context, {'id': id}) + resource_dict = get_action('resource_show')(context,{'id':id}) + if not resource_dict['webstore_url']: + resource_dict['webstore_url'] = u'active' + get_action('resource_update')(context,resource_dict) + self._make_redirect(id, url) return '' except NotFound: abort(404, _('Resource not found')) except NotAuthorized: abort(401, _('Unauthorized to read resource %s') % id) - diff --git a/ckan/controllers/error.py b/ckan/controllers/error.py index 675e3f049d2..db210b5eae3 100644 --- a/ckan/controllers/error.py +++ b/ckan/controllers/error.py @@ -9,6 +9,7 @@ from ckan.lib.base import BaseController from ckan.lib.base import render + class ErrorController(BaseController): """Generates error documents as and when they are required. @@ -33,9 +34,11 @@ def document(self): if original_request and original_request.path.startswith('/api'): return original_response.body # Otherwise, decorate original response with error template. - c.content = literal(original_response.unicode_body) or cgi.escape(request.GET.get('message', '')) - c.prefix=request.environ.get('SCRIPT_NAME', ''), - c.code=cgi.escape(request.GET.get('code', str(original_response.status_int))), + c.content = literal(original_response.unicode_body) or \ + cgi.escape(request.GET.get('message', '')) + c.prefix = request.environ.get('SCRIPT_NAME', ''), + c.code = cgi.escape(request.GET.get('code', + str(original_response.status_int))), return render('error_document_template.html') def img(self, id): diff --git a/ckan/controllers/feed.py b/ckan/controllers/feed.py index 92bd4fe8056..c39ffcc3628 100644 --- a/ckan/controllers/feed.py +++ b/ckan/controllers/feed.py @@ -36,6 +36,7 @@ log = logging.getLogger(__name__) + def _package_search(data_dict): """ Helper method that wraps the package_search action. @@ -53,10 +54,11 @@ def _package_search(data_dict): data_dict['rows'] = ITEMS_LIMIT # package_search action modifies the data_dict, so keep our copy intact. - query = get_action('package_search')(context,data_dict.copy()) + query = get_action('package_search')(context, data_dict.copy()) return query['count'], query['results'] + def _create_atom_id(resource_path, authority_name=None, date_string=None): """ Helper method that creates an atom id for a feed or entry. @@ -84,11 +86,12 @@ def _create_atom_id(resource_path, authority_name=None, date_string=None): The domain name or email address of the publisher of the feed. See [3] for more details. If ``None`` then the domain name is taken from the config file. First trying ``ckan.feeds.authority_name``, and failing - that, it uses ``ckan.site_url``. Again, this should not change over time. + that, it uses ``ckan.site_url``. Again, this should not change over + time. date_string - A string representing a date on which the authority_name is owned by the - publisher of the feed. + A string representing a date on which the authority_name is owned by + the publisher of the feed. e.g. ``"2012-03-22"`` @@ -99,7 +102,8 @@ def _create_atom_id(resource_path, authority_name=None, date_string=None): then the date_string is not used in the generation of the atom id. Following the methods outlined in [1], [2] and [3], this function produces - tagURIs like: ``"tag:thedatahub.org,2012:/group/933f3857-79fd-4beb-a835-c0349e31ce76"``. + tagURIs like: + ``"tag:thedatahub.org,2012:/group/933f3857-79fd-4beb-a835-c0349e31ce76"``. If not enough information is provide to produce a valid tagURI, then only the resource_path is used, e.g.: :: @@ -110,10 +114,11 @@ def _create_atom_id(resource_path, authority_name=None, date_string=None): "/group/933f3857-79fd-4beb-a835-c0349e31ce76" - The latter of which is only used if no site_url is available. And it should - be noted will result in an invalid feed. + The latter of which is only used if no site_url is available. And it + should be noted will result in an invalid feed. - [1] http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id + [1] http://web.archive.org/web/20110514113830/http://diveintomark.org/\ + archives/2004/05/28/howto-atom-id [2] http://www.taguri.org/ [3] http://tools.ietf.org/html/rfc4151#section-2.1 [4] http://www.ietf.org/rfc/rfc4287 @@ -144,6 +149,7 @@ def _create_atom_id(resource_path, authority_name=None, date_string=None): tagging_entity = ','.join([authority_name, date_string]) return ':'.join(['tag', tagging_entity, resource_path]) + class FeedController(BaseController): base_url = config.get('ckan.site_url') @@ -160,14 +166,13 @@ def _alternate_url(self, params, **kwargs): controller='package', action='search') - def group(self,id): - + def group(self, id): try: context = {'model': model, 'session': model.Session, - 'user': c.user or c.author} - group_dict = get_action('group_show')(context,{'id':id}) + 'user': c.user or c.author} + group_dict = get_action('group_show')(context, {'id': id}) except NotFound: - abort(404,'Group not found') + abort(404, 'Group not found') data_dict, params = self._parse_url_params() data_dict['fq'] = 'groups:"%s"' % id @@ -189,16 +194,18 @@ def group(self,id): alternate_url = self._alternate_url(params, groups=id) return self.output_feed(results, - feed_title = u'%s - Group: "%s"' % (g.site_title, group_dict['title']), - feed_description = u'Recently created or updated datasets on %s by group: "%s"' % \ - (g.site_title,group_dict['title']), - feed_link = alternate_url, - feed_guid = _create_atom_id(u'/feeds/groups/%s.atom' % id), - feed_url = feed_url, - navigation_urls = navigation_urls, - ) - - def tag(self,id): + feed_title=u'%s - Group: "%s"' % (g.site_title, + group_dict['title']), + feed_description=u'Recently created or ' + 'updated datasets on %s by group: "%s"' % + (g.site_title, group_dict['title']), + feed_link=alternate_url, + feed_guid=_create_atom_id( + u'/feeds/groups/%s.atom' % id), + feed_url=feed_url, + navigation_urls=navigation_urls) + + def tag(self, id): data_dict, params = self._parse_url_params() data_dict['fq'] = 'tags:"%s"' % id @@ -220,14 +227,16 @@ def tag(self,id): alternate_url = self._alternate_url(params, tags=id) return self.output_feed(results, - feed_title = u'%s - Tag: "%s"' % (g.site_title, id), - feed_description = u'Recently created or updated datasets on %s by tag: "%s"' % \ - (g.site_title, id), - feed_link = alternate_url, - feed_guid = _create_atom_id(u'/feeds/tag/%s.atom' % id), - feed_url = feed_url, - navigation_urls = navigation_urls, - ) + feed_title=u'%s - Tag: "%s"' % + (g.site_title, id), + feed_description=u'Recently created or ' + 'updated datasets on %s by tag: "%s"' % + (g.site_title, id), + feed_link=alternate_url, + feed_guid=_create_atom_id( + u'/feeds/tag/%s.atom' % id), + feed_url=feed_url, + navigation_urls=navigation_urls) def general(self): data_dict, params = self._parse_url_params() @@ -248,13 +257,14 @@ def general(self): alternate_url = self._alternate_url(params) return self.output_feed(results, - feed_title = g.site_title, - feed_description = u'Recently created or updated datasets on %s' % g.site_title, - feed_link = alternate_url, - feed_guid = _create_atom_id(u'/feeds/dataset.atom'), - feed_url = feed_url, - navigation_urls = navigation_urls, - ) + feed_title=g.site_title, + feed_description=u'Recently created or ' + 'updated datasets on %s' % g.site_title, + feed_link=alternate_url, + feed_guid=_create_atom_id( + u'/feeds/dataset.atom'), + feed_url=feed_url, + navigation_urls=navigation_urls) # TODO check search params def custom(self): @@ -278,7 +288,7 @@ def custom(self): data_dict = { 'q': q, 'fq': fq, - 'start': (page-1) * limit, + 'start': (page - 1) * limit, 'rows': limit, 'sort': request.params.get('sort', None), } @@ -298,26 +308,23 @@ def custom(self): alternate_url = self._alternate_url(request.params) return self.output_feed(results, - feed_title = u'%s - Custom query' % g.site_title, - feed_description = u'Recently created or updated datasets on %s. Custom query: \'%s\'' % (g.site_title, q), - feed_link = alternate_url, - feed_guid = _create_atom_id(u'/feeds/custom.atom?%s' % search_url_params), - feed_url = feed_url, - navigation_urls = navigation_urls, - ) - - def output_feed(self, results, - feed_title, - feed_description, - feed_link, - feed_url, - navigation_urls, - feed_guid): + feed_title=u'%s - Custom query' % g.site_title, + feed_description=u'Recently created or updated' + ' datasets on %s. Custom query: \'%s\'' % + (g.site_title, q), + feed_link=alternate_url, + feed_guid=_create_atom_id( + u'/feeds/custom.atom?%s' % search_url_params), + feed_url=feed_url, + navigation_urls=navigation_urls) + + def output_feed(self, results, feed_title, feed_description, + feed_link, feed_url, navigation_urls, feed_guid): author_name = config.get('ckan.feeds.author_name', '').strip() or \ - config.get('ckan.site_id', '').strip() + config.get('ckan.site_id', '').strip() author_link = config.get('ckan.feeds.author_link', '').strip() or \ - config.get('ckan.site_url', '').strip() + config.get('ckan.site_url', '').strip() # TODO language feed = _FixedAtom1Feed( @@ -333,25 +340,30 @@ def output_feed(self, results, next_page=navigation_urls['next'], first_page=navigation_urls['first'], last_page=navigation_urls['last'], - ) + ) for pkg in results: feed.add_item( - title = pkg.get('title', ''), - link = self.base_url + url_for(controller='package', action='read', id=pkg['id']), - description = pkg.get('notes', ''), - updated = date_str_to_datetime(pkg.get('metadata_modified')), - published = date_str_to_datetime(pkg.get('metadata_created')), - unique_id = _create_atom_id(u'/dataset/%s' % pkg['id']), - author_name = pkg.get('author', ''), - author_email = pkg.get('author_email', ''), - categories = [t['name'] for t in pkg.get('tags', [])], - enclosure=webhelpers.feedgenerator.Enclosure( - self.base_url + url_for(controller='api', register='package', action='show', id=pkg['name'], ver='2'), - unicode(len(json.dumps(pkg))), # TODO fix this - u'application/json' - ) - ) + title=pkg.get('title', ''), + link=self.base_url + url_for(controller='package', + action='read', + id=pkg['id']), + description=pkg.get('notes', ''), + updated=date_str_to_datetime(pkg.get('metadata_modified')), + published=date_str_to_datetime(pkg.get('metadata_created')), + unique_id=_create_atom_id(u'/dataset/%s' % pkg['id']), + author_name=pkg.get('author', ''), + author_email=pkg.get('author_email', ''), + categories=[t['name'] for t in pkg.get('tags', [])], + enclosure=webhelpers.feedgenerator.Enclosure( + self.base_url + url_for(controller='api', + register='package', + action='show', + id=pkg['name'], + ver='2'), + unicode(len(json.dumps(pkg))), # TODO fix this + u'application/json') + ) response.content_type = feed.mime_type return feed.writeString('utf-8') @@ -359,46 +371,53 @@ def output_feed(self, results, def _feed_url(self, query, controller, action, **kwargs): """ - Constructs the url for the given action. Encoding the query parameters. + Constructs the url for the given action. Encoding the query + parameters. """ path = url_for(controller=controller, action=action, **kwargs) - query = [(k, v.encode('utf-8') if isinstance(v, basestring) else str(v)) \ - for k, v in query.items()] + query = [(k, v.encode('utf-8') if isinstance(v, basestring) + else str(v)) for k, v in query.items()] - return self.base_url + path + u'?' + urlencode(query) # a trailing '?' is valid. + # a trailing '?' is valid. + return self.base_url + path + u'?' + urlencode(query) - def _navigation_urls(self, query, controller, action, item_count, limit, **kwargs): + def _navigation_urls(self, query, controller, action, + item_count, limit, **kwargs): """ Constructs and returns first, last, prev and next links for paging """ - urls = dict( (rel, None) for rel in 'previous next first last'.split() ) + urls = dict((rel, None) for rel in 'previous next first last'.split()) page = int(query.get('page', 1)) # first: remove any page parameter first_query = query.copy() first_query.pop('page', None) - urls['first'] = self._feed_url(first_query, controller, action, **kwargs) + urls['first'] = self._feed_url(first_query, controller, + action, **kwargs) # last: add last page parameter last_page = (item_count / limit) + min(1, item_count % limit) last_query = query.copy() last_query['page'] = last_page - urls['last'] = self._feed_url(last_query, controller, action, **kwargs) + urls['last'] = self._feed_url(last_query, controller, + action, **kwargs) # previous if page > 1: previous_query = query.copy() - previous_query['page'] = page-1 - urls['previous'] = self._feed_url(previous_query, controller, action, **kwargs) + previous_query['page'] = page - 1 + urls['previous'] = self._feed_url(previous_query, controller, + action, **kwargs) else: urls['previous'] = None # next if page < last_page: next_query = query.copy() - next_query['page'] = page+1 - urls['next'] = self._feed_url(next_query, controller, action, **kwargs) + next_query['page'] = page + 1 + urls['next'] = self._feed_url(next_query, controller, + action, **kwargs) else: urls['next'] = None @@ -408,7 +427,8 @@ def _parse_url_params(self): """ Constructs a search-query dict from the URL query parameters. - Returns the constructed search-query dict, and the valid URL query parameters. + Returns the constructed search-query dict, and the valid URL + query parameters. """ try: @@ -418,16 +438,17 @@ def _parse_url_params(self): limit = ITEMS_LIMIT data_dict = { - 'start': (page-1)*limit, + 'start': (page - 1) * limit, 'rows': limit } # Filter ignored query parameters valid_params = ['page'] - params = dict( (p,request.params.get(p)) for p in valid_params \ - if p in request.params ) + params = dict((p, request.params.get(p)) for p in valid_params + if p in request.params) return data_dict, params + # TODO paginated feed class _FixedAtom1Feed(webhelpers.feedgenerator.Atom1Feed): """ @@ -443,7 +464,8 @@ class _FixedAtom1Feed(webhelpers.feedgenerator.Atom1Feed): * In Atom1Feed, the feed description is not used. So this class uses the field to publish that. - [1] https://bitbucket.org/bbangert/webhelpers/src/f5867a319abf/webhelpers/feedgenerator.py#cl-373 + [1] https://bitbucket.org/bbangert/webhelpers/src/f5867a319abf/\ + webhelpers/feedgenerator.py#cl-373 """ def add_item(self, *args, **kwargs): @@ -461,22 +483,28 @@ def latest_post_date(self): Calculates the latest post date from the 'updated' fields, rather than the 'pubdate' fields. """ - updates = [ item['updated'] for item in self.items if item['updated'] is not None ] - if not len(updates): # delegate to parent for default behaviour + updates = [item['updated'] for item in self.items + if item['updated'] is not None] + if not len(updates): # delegate to parent for default behaviour return super(_FixedAtom1Feed, self).latest_post_date() return max(updates) def add_item_elements(self, handler, item): """ - Add the and fields to each entry that's written to the handler. + Add the and fields to each entry that's written + to the handler. """ super(_FixedAtom1Feed, self).add_item_elements(handler, item) + dfunc = webhelpers.feedgenerator.rfc3339_date + if(item['updated']): - handler.addQuickElement(u'updated', webhelpers.feedgenerator.rfc3339_date(item['updated']).decode('utf-8')) + handler.addQuickElement(u'updated', + dfunc(item['updated']).decode('utf-8')) if(item['published']): - handler.addQuickElement(u'published', webhelpers.feedgenerator.rfc3339_date(item['published']).decode('utf-8')) + handler.addQuickElement(u'published', + dfunc(item['published']).decode('utf-8')) def add_root_elements(self, handler): """ @@ -490,7 +518,8 @@ def add_root_elements(self, handler): handler.addQuickElement(u'subtitle', self.feed['description']) for page in ['previous', 'next', 'first', 'last']: - if self.feed.get(page+'_page', None): + if self.feed.get(page + '_page', None): handler.addQuickElement(u'link', u'', - {'rel': page, 'href': self.feed.get(page+'_page')}) - + {'rel': page, + 'href': + self.feed.get(page + '_page')}) diff --git a/ckan/controllers/home.py b/ckan/controllers/home.py index f748e8cb9cf..e885f3bda9e 100644 --- a/ckan/controllers/home.py +++ b/ckan/controllers/home.py @@ -10,14 +10,15 @@ CACHE_PARAMETER = '__cache' + class HomeController(BaseController): repo = model.repo def __before__(self, action, **env): try: BaseController.__before__(self, action, **env) - context = {'model':model,'user': c.user or c.author} - ckan.logic.check_access('site_read',context) + context = {'model': model, 'user': c.user or c.author} + ckan.logic.check_access('site_read', context) except ckan.logic.NotAuthorized: abort(401, _('Not authorized to see this page')) except (sqlalchemy.exc.ProgrammingError, @@ -25,33 +26,34 @@ def __before__(self, action, **env): # postgres and sqlite errors for missing tables msg = str(e) if ('relation' in msg and 'does not exist' in msg) or \ - ('no such table' in msg) : + ('no such table' in msg): # table missing, major database problem - abort(503, _('This site is currently off-line. Database is not initialised.')) + abort(503, _('This site is currently off-line. Database ' + 'is not initialised.')) # TODO: send an email to the admin person (#1285) else: raise - def index(self): try: # package search context = {'model': model, 'session': model.Session, 'user': c.user or c.author} data_dict = { - 'q':'*:*', - 'facet.field':g.facets, - 'rows':0, - 'start':0, + 'q': '*:*', + 'facet.field': g.facets, + 'rows': 0, + 'start': 0, 'fq': 'capacity:"public"' } - query = ckan.logic.get_action('package_search')(context,data_dict) + query = ckan.logic.get_action('package_search')( + context, data_dict) c.package_count = query['count'] c.facets = query['facets'] data_dict = {'order_by': 'packages', 'all_fields': 1} - #only give the terms to group dictize that are returned in the facets - #as full results take a lot longer + # only give the terms to group dictize that are returned in the + # facets as full results take a lot longer if 'groups' in c.facets: data_dict['groups'] = c.facets['groups'].keys() c.groups = ckan.logic.get_action('group_list')(context, data_dict) @@ -63,28 +65,30 @@ def index(self): msg = None url = url_for(controller='user', action='edit') is_google_id = \ - c.userobj.name.startswith('https://www.google.com/accounts/o8/id') - if not c.userobj.email and (is_google_id and not c.userobj.fullname): - msg = _('Please update your profile' - ' and add your email address and your full name. ') % url + \ - _('%s uses your email address' - ' if you need to reset your password.') \ - % g.site_title + c.userobj.name.startswith( + 'https://www.google.com/accounts/o8/id') + if not c.userobj.email and (is_google_id and + not c.userobj.fullname): + msg = _('Please update your profile' + ' and add your email address and your full name. ' + '{site} uses your email address' + ' if you need to reset your password.'.format(link=url, + site=g.site_title)) elif not c.userobj.email: msg = _('Please update your profile' ' and add your email address. ') % url + \ - _('%s uses your email address' - ' if you need to reset your password.') \ - % g.site_title + _('%s uses your email address' + ' if you need to reset your password.') \ + % g.site_title elif is_google_id and not c.userobj.fullname: msg = _('Please update your profile' - ' and add your full name.') % (url) + ' and add your full name.') % (url) if msg: h.flash_notice(msg, allow_html=True) c.recently_changed_packages_activity_stream = \ ckan.logic.action.get.recently_changed_packages_activity_list_html( - context, {}) + context, {}) return render('home/index.html', cache_force=True) @@ -106,4 +110,3 @@ def cache(self, id): def cors_options(self, url=None): # just return 200 OK and empty data return '' - diff --git a/ckan/controllers/package.py b/ckan/controllers/package.py index dffb2bece31..8b5010ccc76 100644 --- a/ckan/controllers/package.py +++ b/ckan/controllers/package.py @@ -9,13 +9,20 @@ from ckan.logic import get_action, check_access from ckan.lib.helpers import date_str_to_datetime -from ckan.lib.base import request, c, BaseController, model, abort, h, g, render +from ckan.lib.base import (request, + render, + BaseController, + model, + abort, h, g, c) from ckan.lib.base import response, redirect, gettext from ckan.lib.package_saver import PackageSaver, ValidationException from ckan.lib.navl.dictization_functions import DataError, unflatten, validate from ckan.lib.helpers import json from ckan.logic import NotFound, NotAuthorized, ValidationError -from ckan.logic import tuplize_dict, clean_dict, parse_params, flatten_to_string_key +from ckan.logic import (tuplize_dict, + clean_dict, + parse_params, + flatten_to_string_key) from ckan.lib.i18n import get_lang import ckan.forms import ckan.authz @@ -28,14 +35,17 @@ log = logging.getLogger(__name__) + def _encode_params(params): - return [(k, v.encode('utf-8') if isinstance(v, basestring) else str(v)) \ - for k, v in params] + return [(k, v.encode('utf-8') if isinstance(v, basestring) else str(v)) + for k, v in params] + def url_with_params(url, params): params = _encode_params(params) return url + u'?' + urlencode(params) + def search_url(params): url = h.url_for(controller='package', action='search') return url_with_params(url, params) @@ -60,7 +70,8 @@ def _check_data_dict(self, data_dict, package_type=None): return lookup_package_plugin(package_type).check_data_dict(data_dict) def _setup_template_variables(self, context, data_dict, package_type=None): - return lookup_package_plugin(package_type).setup_template_variables(context, data_dict) + return lookup_package_plugin(package_type).\ + setup_template_variables(context, data_dict) def _new_template(self, package_type): return lookup_package_plugin(package_type).new_template() @@ -102,12 +113,13 @@ def search(self): package_type = self._guess_package_type() try: - context = {'model':model,'user': c.user or c.author} - check_access('site_read',context) + context = {'model': model, 'user': c.user or c.author} + check_access('site_read', context) except NotAuthorized: abort(401, _('Not authorized to see this page')) - q = c.q = request.params.get('q', u'') # unicode format (decoded from utf8) + # unicode format (decoded from utf8) + q = c.q = request.params.get('q', u'') c.query_error = False try: page = int(request.params.get('page', 1)) @@ -116,7 +128,8 @@ def search(self): limit = g.datasets_per_page # most search operations should reset the page counter: - params_nopage = [(k, v) for k,v in request.params.items() if k != 'page'] + params_nopage = [(k, v) for k, v in request.params.items() + if k != 'page'] def drill_down_url(alternative_url=None, **by): return h.add_url_param(alternative_url=alternative_url, @@ -132,7 +145,7 @@ def remove_field(key, value=None, replace=None): c.remove_field = remove_field sort_by = request.params.get('sort', None) - params_nosort = [(k, v) for k,v in params_nopage if k != 'sort'] + params_nosort = [(k, v) for k, v in params_nopage if k != 'sort'] def _sort_by(fields): """ @@ -147,7 +160,7 @@ def _sort_by(fields): params = params_nosort[:] if fields: - sort_string = ', '.join( '%s %s' % f for f in fields ) + sort_string = ', '.join('%s %s' % f for f in fields) params.append(('sort', sort_string)) return search_url(params) @@ -155,7 +168,8 @@ def _sort_by(fields): if sort_by is None: c.sort_by_fields = [] else: - c.sort_by_fields = [ field.split()[0] for field in sort_by.split(',') ] + c.sort_by_fields = [field.split()[0] + for field in sort_by.split(',')] def pager_url(q=None, page=None): params = list(params_nopage) @@ -181,16 +195,16 @@ def pager_url(q=None, page=None): 'user': c.user or c.author, 'for_view': True} data_dict = { - 'q':q, - 'fq':fq, - 'facet.field':g.facets, - 'rows':limit, - 'start':(page-1)*limit, + 'q': q, + 'fq': fq, + 'facet.field': g.facets, + 'rows': limit, + 'start': (page - 1) * limit, 'sort': sort_by, - 'extras':search_extras + 'extras': search_extras } - query = get_action('package_search')(context,data_dict) + query = get_action('package_search')(context, data_dict) c.page = h.Page( collection=query['results'], @@ -208,13 +222,13 @@ def pager_url(q=None, page=None): c.facets = {} c.page = h.Page(collection=[]) - return render( self._search_template(package_type) ) + return render(self._search_template(package_type)) def _content_type_from_extension(self, ext): - ct,mu,ext = accept.parse_extension(ext) + ct, mu, ext = accept.parse_extension(ext) if not ct: return None, None, None, - return ct, ext, (NewTextTemplate,MarkupTemplate)[mu] + return ct, ext, (NewTextTemplate, MarkupTemplate)[mu] def _content_type_from_accept(self): """ @@ -223,20 +237,21 @@ def _content_type_from_accept(self): it accurately. TextTemplate must be used for non-xml templates whilst all that are some sort of XML should use MarkupTemplate. """ - ct,mu,ext = accept.parse_header(request.headers.get('Accept', '')) - return ct, ext, (NewTextTemplate,MarkupTemplate)[mu] - + ct, mu, ext = accept.parse_header(request.headers.get('Accept', '')) + return ct, ext, (NewTextTemplate, MarkupTemplate)[mu] def read(self, id, format='html'): if not format == 'html': - ctype,extension,loader = self._content_type_from_extension(format) + ctype, extension, loader = \ + self._content_type_from_extension(format) if not ctype: # An unknown format, we'll carry on in case it is a # revision specifier and re-constitute the original id id = "%s.%s" % (id, format) - ctype, format, loader = "text/html; charset=utf-8", "html", MarkupTemplate + ctype, format, loader = "text/html; charset=utf-8", "html", \ + MarkupTemplate else: - ctype,extension,loader = self._content_type_from_accept() + ctype, extension, loader = self._content_type_from_accept() response.headers['Content-Type'] = ctype @@ -261,13 +276,14 @@ def read(self, id, format='html'): except ValueError, e: abort(400, _('Invalid revision format: %r') % e.args) elif len(split) > 2: - abort(400, _('Invalid revision format: %r') % 'Too many "@" symbols') + abort(400, _('Invalid revision format: %r') % + 'Too many "@" symbols') #check if package exists try: c.pkg_dict = get_action('package_show')(context, data_dict) c.pkg = context['package'] - c.resources_json = json.dumps(c.pkg_dict.get('resources',[])) + c.resources_json = json.dumps(c.pkg_dict.get('resources', [])) except NotFound: abort(404, _('Dataset not found')) except NotAuthorized: @@ -281,25 +297,24 @@ def read(self, id, format='html'): # template context for the package/read.html template to retrieve # later. c.package_activity_stream = \ - get_action('package_activity_list_html')(context, - {'id': c.current_package_id}) + get_action('package_activity_list_html')( + context, {'id': c.current_package_id}) PackageSaver().render_package(c.pkg_dict, context) - template = self._read_template( package_type ) - template = template[:template.index('.')+1] + format - - return render( template, loader_class=loader) + template = self._read_template(package_type) + template = template[:template.index('.') + 1] + format + return render(template, loader_class=loader) def comments(self, id): package_type = self._get_package_type(id) context = {'model': model, 'session': model.Session, - 'user': c.user or c.author, 'extras_as_string': True,} + 'user': c.user or c.author, 'extras_as_string': True} #check if package exists try: - c.pkg_dict = get_action('package_show')(context, {'id':id}) + c.pkg_dict = get_action('package_show')(context, {'id': id}) c.pkg = context['package'] except NotFound: abort(404, _('Dataset not found')) @@ -311,33 +326,34 @@ def comments(self, id): #render the package PackageSaver().render_package(c.pkg_dict) - return render( self._comments_template( package_type ) ) - + return render(self._comments_template(package_type)) def history(self, id): package_type = self._get_package_type(id.split('@')[0]) if 'diff' in request.params or 'selected1' in request.params: try: - params = {'id':request.params.getone('pkg_name'), - 'diff':request.params.getone('selected1'), - 'oldid':request.params.getone('selected2'), + params = {'id': request.params.getone('pkg_name'), + 'diff': request.params.getone('selected1'), + 'oldid': request.params.getone('selected2'), } except KeyError, e: - if dict(request.params).has_key('pkg_name'): + if 'pkg_name' in dict(request.params): id = request.params.getone('pkg_name') - c.error = _('Select two revisions before doing the comparison.') + c.error = \ + _('Select two revisions before doing the comparison.') else: params['diff_entity'] = 'package' h.redirect_to(controller='revision', action='diff', **params) context = {'model': model, 'session': model.Session, 'user': c.user or c.author, - 'extras_as_string': True,} - data_dict = {'id':id} + 'extras_as_string': True} + data_dict = {'id': id} try: c.pkg_dict = get_action('package_show')(context, data_dict) - c.pkg_revisions = get_action('package_revision_list')(context, data_dict) + c.pkg_revisions = get_action('package_revision_list')(context, + data_dict) #TODO: remove # Still necessary for the authz check in group/layout.html c.pkg = context['package'] @@ -353,12 +369,15 @@ def history(self, id): from webhelpers.feedgenerator import Atom1Feed feed = Atom1Feed( title=_(u'CKAN Dataset Revision History'), - link=h.url_for(controller='revision', action='read', id=c.pkg_dict['name']), - description=_(u'Recent changes to CKAN Dataset: ') + (c.pkg_dict['title'] or ''), + link=h.url_for(controller='revision', action='read', + id=c.pkg_dict['name']), + description=_(u'Recent changes to CKAN Dataset: ') + + (c.pkg_dict['title'] or ''), language=unicode(get_lang()), ) for revision_dict in c.pkg_revisions: - revision_date = h.date_str_to_datetime(revision_dict['timestamp']) + revision_date = h.date_str_to_datetime( + revision_dict['timestamp']) try: dayHorizon = int(request.params.get('days')) except: @@ -367,10 +386,12 @@ def history(self, id): if dayAge >= dayHorizon: break if revision_dict['message']: - item_title = u'%s' % revision_dict['message'].split('\n')[0] + item_title = u'%s' % revision_dict['message'].\ + split('\n')[0] else: item_title = u'%s' % revision_dict['id'] - item_link = h.url_for(controller='revision', action='read', id=revision_dict['id']) + item_link = h.url_for(controller='revision', action='read', + id=revision_dict['id']) item_description = _('Log message: ') item_description += '%s' % (revision_dict['message'] or '') item_author_name = revision_dict['author'] @@ -386,19 +407,20 @@ def history(self, id): return feed.writeString('utf-8') c.related_count = len(c.pkg.related) - return render( self._history_template(c.pkg_dict.get('type',package_type))) + return render(self._history_template(c.pkg_dict.get('type', + package_type))) def new(self, data=None, errors=None, error_summary=None): package_type = self._guess_package_type(True) context = {'model': model, 'session': model.Session, 'user': c.user or c.author, 'extras_as_string': True, - 'save': 'save' in request.params,} + 'save': 'save' in request.params} - # Package needs to have a organization group in the call to check_access - # and also to save it + # Package needs to have a organization group in the call to + # check_access and also to save it try: - check_access('package_create',context) + check_access('package_create', context) except NotAuthorized: abort(401, _('Unauthorized to create a package')) @@ -407,23 +429,24 @@ def new(self, data=None, errors=None, error_summary=None): data = data or clean_dict(unflatten(tuplize_dict(parse_params( request.params, ignore_keys=[CACHE_PARAMETER])))) - c.resources_json = json.dumps(data.get('resources',[])) + c.resources_json = json.dumps(data.get('resources', [])) errors = errors or {} error_summary = error_summary or {} - vars = {'data': data, 'errors': errors, 'error_summary': error_summary} + vars = {'data': data, 'errors': errors, + 'error_summary': error_summary} c.errors_json = json.dumps(errors) self._setup_template_variables(context, {'id': id}) - # TODO: This check is to maintain backwards compatibility with the old way of creating - # custom forms. This behaviour is now deprecated. + # TODO: This check is to maintain backwards compatibility with the + # old way of creating custom forms. This behaviour is now deprecated. if hasattr(self, 'package_form'): c.form = render(self.package_form, extra_vars=vars) else: - c.form = render(self._package_form(package_type=package_type), extra_vars=vars) - return render( self._new_template(package_type)) - + c.form = render(self._package_form(package_type=package_type), + extra_vars=vars) + return render(self._new_template(package_type)) def edit(self, id, data=None, errors=None, error_summary=None): package_type = self._get_package_type(id) @@ -431,14 +454,14 @@ def edit(self, id, data=None, errors=None, error_summary=None): 'user': c.user or c.author, 'extras_as_string': True, 'save': 'save' in request.params, 'moderated': config.get('moderated'), - 'pending': True,} + 'pending': True} if context['save'] and not data: return self._save_edit(id, context) try: - c.pkg_dict = get_action('package_show')(context, {'id':id}) + c.pkg_dict = get_action('package_show')(context, {'id': id}) context['for_edit'] = True - old_data = get_action('package_show')(context, {'id':id}) + old_data = get_action('package_show')(context, {'id': id}) # old data is from the database and data is passed from the # user if there is a validation error. Use users data if there. data = data or old_data @@ -448,42 +471,46 @@ def edit(self, id, data=None, errors=None, error_summary=None): abort(404, _('Dataset not found')) c.pkg = context.get("package") - c.resources_json = json.dumps(data.get('resources',[])) + c.resources_json = json.dumps(data.get('resources', [])) try: - check_access('package_update',context) + check_access('package_update', context) except NotAuthorized, e: abort(401, _('User %r not authorized to edit %s') % (c.user, id)) errors = errors or {} - vars = {'data': data, 'errors': errors, 'error_summary': error_summary} + vars = {'data': data, 'errors': errors, + 'error_summary': error_summary} c.errors_json = json.dumps(errors) - self._setup_template_variables(context, {'id': id}, package_type=package_type) + self._setup_template_variables(context, {'id': id}, + package_type=package_type) c.related_count = len(c.pkg.related) - # TODO: This check is to maintain backwards compatibility with the old way of creating - # custom forms. This behaviour is now deprecated. + # TODO: This check is to maintain backwards compatibility with the + # old way of creating custom forms. This behaviour is now deprecated. if hasattr(self, 'package_form'): c.form = render(self.package_form, extra_vars=vars) else: - c.form = render(self._package_form(package_type=package_type), extra_vars=vars) + c.form = render(self._package_form(package_type=package_type), + extra_vars=vars) if (c.action == u'editresources'): - return render('package/editresources.html') + return render('package/editresources.html') else: - return render('package/edit.html') + return render('package/edit.html') def editresources(self, id, data=None, errors=None, error_summary=None): '''Hook method made available for routing purposes.''' - return self.edit(id,data,errors,error_summary) + return self.edit(id, data, errors, error_summary) def read_ajax(self, id, revision=None): - package_type=self._get_package_type(id) + package_type = self._get_package_type(id) context = {'model': model, 'session': model.Session, 'user': c.user or c.author, 'extras_as_string': True, - 'schema': self._form_to_db_schema(package_type=package_type), + 'schema': self._form_to_db_schema(package_type= + package_type), 'revision_id': revision} try: data = get_action('package_show')(context, {'id': id}) @@ -496,7 +523,8 @@ def read_ajax(self, id, revision=None): abort(404, _('Dataset not found')) ## hack as db_to_form schema should have this - data['tag_string'] = ', '.join([tag['name'] for tag in data.get('tags', [])]) + data['tag_string'] = ', '.join([tag['name'] for tag + in data.get('tags', [])]) data.pop('tags') data = flatten_to_string_key(data) response.headers['Content-Type'] = 'application/json;charset=utf-8' @@ -506,16 +534,16 @@ def history_ajax(self, id): context = {'model': model, 'session': model.Session, 'user': c.user or c.author, - 'extras_as_string': True,} - data_dict = {'id':id} + 'extras_as_string': True} + data_dict = {'id': id} try: - pkg_revisions = get_action('package_revision_list')(context, data_dict) + pkg_revisions = get_action('package_revision_list')( + context, data_dict) except NotAuthorized: abort(401, _('Unauthorized to read package %s') % '') except NotFound: abort(404, _('Dataset not found')) - data = [] approved = False for num, revision in enumerate(pkg_revisions): @@ -563,7 +591,7 @@ def _save_new(self, context, package_type=None): except DataError: abort(400, _(u'Integrity Error')) except SearchIndexError, e: - abort(500, _(u'Unable to add package to search index.') + repr(e.args)) + abort(500, _(u'Unable to add package to search index.')) except ValidationError, e: errors = e.error_dict error_summary = e.error_summary @@ -582,7 +610,8 @@ def _save_edit(self, name_or_id, context): data_dict['id'] = name_or_id pkg = get_action('package_update')(context, data_dict) if request.params.get('save', '') == 'Approve': - get_action('make_latest_pending_package_active')(context, data_dict) + get_action('make_latest_pending_package_active')( + context, data_dict) c.pkg = context['package'] c.pkg_dict = pkg @@ -594,7 +623,7 @@ def _save_edit(self, name_or_id, context): except DataError: abort(400, _(u'Integrity Error')) except SearchIndexError, e: - abort(500, _(u'Unable to update search index.') + repr(e.args)) + abort(500, _(u'Unable to update search index.')) except ValidationError, e: errors = e.error_dict error_summary = e.error_summary @@ -609,7 +638,7 @@ def _form_save_redirect(self, pkgname, action): ''' assert action in ('new', 'edit') url = request.params.get('return_to') or \ - config.get('package_%s_return_url' % action) + config.get('package_%s_return_url' % action) if url: url = url.replace('', pkgname) else: @@ -630,18 +659,21 @@ def authz(self, id): pkg = model.Package.get(id) if pkg is None: abort(404, gettext('Dataset not found')) - c.pkg = pkg # needed to add in the tab bar to the top of the auth page + # needed to add in the tab bar to the top of the auth page + c.pkg = pkg c.pkgname = pkg.name c.pkgtitle = pkg.title try: - context = {'model':model,'user':c.user or c.author, 'package':pkg} - check_access('package_edit_permissions',context) + context = {'model': model, 'user': c.user or c.author, + 'package': pkg} + check_access('package_edit_permissions', context) c.authz_editable = True c.pkg_dict = get_action('package_show')(context, {'id': id}) except NotAuthorized: c.authz_editable = False if not c.authz_editable: - abort(401, gettext('User %r not authorized to edit %s authorizations') % (c.user, id)) + abort(401, gettext('User %r not authorized to edit %s ' + 'authorizations') % (c.user, id)) roles = self._handle_update_of_authz(pkg) self._prepare_authz_info_for_render(roles) @@ -659,13 +691,13 @@ def autocomplete(self): context = {'model': model, 'session': model.Session, 'user': c.user or c.author} - data_dict = {'q':q} - - packages = get_action('package_autocomplete')(context,data_dict) + data_dict = {'q': q} + packages = get_action('package_autocomplete')(context, data_dict) pkg_list = [] for pkg in packages: - pkg_list.append('%s|%s' % (pkg['match_displayed'].replace('|', ' '), pkg['name'])) + pkg_list.append('%s|%s' % (pkg['match_displayed']. + replace('|', ' '), pkg['name'])) return '\n'.join(pkg_list) def _render_edit_form(self, fs, params={}, clear_session=False): @@ -684,13 +716,15 @@ def _render_edit_form(self, fs, params={}, clear_session=False): # with the log comments to find out. if clear_session: # log to see if clearing the session is ever required - if model.Session.new or model.Session.dirty or model.Session.deleted: + if model.Session.new or model.Session.dirty or \ + model.Session.deleted: log.warn('Expunging session changes which were not expected: ' '%r %r %r', (model.Session.new, model.Session.dirty, model.Session.deleted)) try: model.Session.rollback() - except AttributeError: # older SQLAlchemy versions + except AttributeError: + # older SQLAlchemy versions model.Session.clear() edit_form_html = fs.render() c.form = h.literal(edit_form_html) @@ -714,7 +748,8 @@ def resource_read(self, id, resource_id): 'user': c.user or c.author} try: - c.resource = get_action('resource_show')(context, {'id': resource_id}) + c.resource = get_action('resource_show')(context, + {'id': resource_id}) c.package = get_action('package_show')(context, {'id': id}) # required for nav menu c.pkg = context['package'] @@ -727,11 +762,12 @@ def resource_read(self, id, resource_id): # get package license info license_id = c.package.get('license_id') try: - c.package['isopen'] = model.Package.get_license_register()[license_id].isopen() + c.package['isopen'] = model.Package.\ + get_license_register()[license_id].isopen() except KeyError: c.package['isopen'] = False c.datastore_api = h.url_for('datastore_read', id=c.resource.get('id'), - qualified=True) + qualified=True) c.related_count = len(c.pkg.related) return render('package/resource_read.html') @@ -785,12 +821,14 @@ def resource_embedded_dataviewer(self, id, resource_id, 'user': c.user or c.author} try: - c.resource = get_action('resource_show')(context, {'id': resource_id}) + c.resource = get_action('resource_show')(context, + {'id': resource_id}) c.package = get_action('package_show')(context, {'id': id}) c.resource_json = json.dumps(c.resource) # double check that the resource belongs to the specified package - if not c.resource['id'] in [ r['id'] for r in c.package['resources'] ]: + if not c.resource['id'] in [r['id'] + for r in c.package['resources']]: raise NotFound except NotFound: @@ -802,7 +840,8 @@ def resource_embedded_dataviewer(self, id, resource_id, state_version = int(request.params.get('state_version', '1')) recline_state = self._parse_recline_state(request.params) if recline_state is None: - abort(400, ('"state" parameter must be a valid recline state (version %d)' % state_version)) + abort(400, ('"state" parameter must be a valid recline ' + 'state (version %d)' % state_version)) c.recline_state = json.dumps(recline_state) @@ -818,7 +857,7 @@ def _parse_recline_state(self, params): return None recline_state = {} - for k,v in request.params.items(): + for k, v in request.params.items(): try: v = json.loads(v) except ValueError: @@ -830,9 +869,11 @@ def _parse_recline_state(self, params): recline_state['readOnly'] = True # Ensure only the currentView is available + # default to grid view if none specified if not recline_state.get('currentView', None): - recline_state['currentView'] = 'grid' # default to grid view if none specified + recline_state['currentView'] = 'grid' for k in recline_state.keys(): - if k.startswith('view-') and not k.endswith(recline_state['currentView']): + if k.startswith('view-') and \ + not k.endswith(recline_state['currentView']): recline_state.pop(k) return recline_state diff --git a/ckan/controllers/related.py b/ckan/controllers/related.py index f2aeff0448f..e90b4be4938 100644 --- a/ckan/controllers/related.py +++ b/ckan/controllers/related.py @@ -7,6 +7,7 @@ c = base.c + class RelatedController(base.BaseController): def list(self, id): @@ -26,13 +27,12 @@ def list(self, id): try: c.pkg_dict = logic.get_action('package_show')(context, data_dict) c.pkg = context['package'] - c.resources_json = h.json.dumps(c.pkg_dict.get('resources',[])) + c.resources_json = h.json.dumps(c.pkg_dict.get('resources', [])) except logic.NotFound: base.abort(404, base._('Dataset not found')) except logic.NotAuthorized: base.abort(401, base._('Unauthorized to read package %s') % id) c.related_count = len(c.pkg.related) - - return base.render( "package/related_list.html") - + c.action = 'related' + return base.render("package/related_list.html") diff --git a/ckan/controllers/revision.py b/ckan/controllers/revision.py index 71f837ff475..e2f9af549c1 100644 --- a/ckan/controllers/revision.py +++ b/ckan/controllers/revision.py @@ -9,22 +9,23 @@ from ckan.lib.helpers import Page import ckan.authz + class RevisionController(BaseController): def __before__(self, action, **env): BaseController.__before__(self, action, **env) - context = {'model':model,'user': c.user or c.author} + context = {'model': model, 'user': c.user or c.author} if c.user: try: - check_access('revision_change_state',context) + check_access('revision_change_state', context) c.revision_change_state_allowed = True except NotAuthorized: c.revision_change_state_allowed = False else: c.revision_change_state_allowed = False try: - check_access('site_read',context) + check_access('site_read', context) except NotAuthorized: abort(401, _('Not authorized to see this page')) @@ -53,14 +54,15 @@ def list(self): since_when = datetime.now() + ourtimedelta revision_query = model.repo.history() revision_query = revision_query.filter( - model.Revision.timestamp>=since_when).filter( - model.Revision.id!=None) + model.Revision.timestamp >= since_when).filter( + model.Revision.id != None) revision_query = revision_query.limit(maxresults) for revision in revision_query: package_indications = [] revision_changes = model.repo.list_changes(revision) resource_revisions = revision_changes[model.Resource] - resource_group_revisions = revision_changes[model.ResourceGroup] + resource_group_revisions = \ + revision_changes[model.ResourceGroup] package_extra_revisions = revision_changes[model.PackageExtra] for package in revision.packages: number = len(package.all_revisions) @@ -71,23 +73,29 @@ def list(self): if pr.revision.id == revision.id: package_revision = pr break - if package_revision and package_revision.state == model.State.DELETED: + if package_revision and package_revision.state == \ + model.State.DELETED: transition = 'deleted' elif package_revision and count == number: transition = 'created' else: transition = 'updated' for resource_revision in resource_revisions: - if resource_revision.continuity.resource_group.package_id == package.id: + if resource_revision.continuity.resource_group.\ + package_id == package.id: transition += ':resources' break - for resource_group_revision in resource_group_revisions: - if resource_group_revision.package_id == package.id: + for resource_group_revision in \ + resource_group_revisions: + if resource_group_revision.package_id == \ + package.id: transition += ':resource_group' break for package_extra_revision in package_extra_revisions: - if package_extra_revision.package_id == package.id: - if package_extra_revision.key == 'date_updated': + if package_extra_revision.package_id == \ + package.id: + if package_extra_revision.key == \ + 'date_updated': transition += ':date_updated' break indication = "%s:%s" % (package.name, transition) @@ -127,13 +135,16 @@ def read(self, id=None): c.revision = model.Session.query(model.Revision).get(id) if c.revision is None: abort(404) - - pkgs = model.Session.query(model.PackageRevision).filter_by(revision=c.revision) - c.packages = [ pkg.continuity for pkg in pkgs ] - pkgtags = model.Session.query(model.PackageTagRevision).filter_by(revision=c.revision) - c.pkgtags = [ pkgtag.continuity for pkgtag in pkgtags ] - grps = model.Session.query(model.GroupRevision).filter_by(revision=c.revision) - c.groups = [ grp.continuity for grp in grps ] + + pkgs = model.Session.query(model.PackageRevision).\ + filter_by(revision=c.revision) + c.packages = [pkg.continuity for pkg in pkgs] + pkgtags = model.Session.query(model.PackageTagRevision).\ + filter_by(revision=c.revision) + c.pkgtags = [pkgtag.continuity for pkgtag in pkgtags] + grps = model.Session.query(model.GroupRevision).\ + filter_by(revision=c.revision) + c.groups = [grp.continuity for grp in grps] return render('revision/read.html') def diff(self, id=None): @@ -143,7 +154,7 @@ def diff(self, id=None): request.params.getone('oldid')) c.revision_to = model.Session.query(model.Revision).get( request.params.getone('diff')) - + c.diff_entity = request.params.get('diff_entity') if c.diff_entity == 'package': c.pkg = model.Package.by_name(id) @@ -153,7 +164,7 @@ def diff(self, id=None): diff = c.group.diff(c.revision_to, c.revision_from) else: abort(400) - + c.diff = diff.items() c.diff.sort() return render('revision/diff.html') @@ -176,6 +187,4 @@ def edit(self, id=None): model.Session.commit() h.flash_success(_('Revision updated')) h.redirect_to( - h.url_for(controller='revision', action='read', id=id) - ) - + h.url_for(controller='revision', action='read', id=id)) diff --git a/ckan/controllers/storage.py b/ckan/controllers/storage.py index 594d91594bc..3f605117650 100644 --- a/ckan/controllers/storage.py +++ b/ckan/controllers/storage.py @@ -34,6 +34,8 @@ key_prefix = config.get('ckan.storage.key_prefix', 'file/') _eq_re = re.compile(r"^(.*)(=[0-9]*)$") + + def fix_stupid_pylons_encoding(data): """ Fix an apparent encoding problem when calling request.body @@ -46,6 +48,7 @@ def fix_stupid_pylons_encoding(data): data = m.groups()[0] return data + def create_pairtree_marker(folder): """ Creates the pairtree marker for tests if it doesn't exist """ if not folder[:-1] == '/': @@ -59,7 +62,7 @@ def create_pairtree_marker(folder): if os.path.exists(target): return - open( target, 'wb').close() + open(target, 'wb').close() def get_ofs(): @@ -67,14 +70,14 @@ def get_ofs(): """ storage_backend = config['ofs.impl'] kw = {} - for k,v in config.items(): + for k, v in config.items(): if not k.startswith('ofs.') or k == 'ofs.impl': continue kw[k[4:]] = v # Make sure we have created the marker file to avoid pairtree issues if storage_backend == 'pairtree' and 'storage_dir' in kw: - create_pairtree_marker( kw['storage_dir'] ) + create_pairtree_marker(kw['storage_dir']) ofs = get_impl(storage_backend)(**kw) return ofs @@ -93,13 +96,14 @@ def authorize(method, bucket, key, user, ofs): abort(409) # now check user stuff username = user.name if user else '' - is_authorized = authz.Authorizer.is_authorized(username, 'file-upload', model.System()) + is_authorized = authz.Authorizer.is_authorized(username, + 'file-upload', + model.System()) if not is_authorized: h.flash_error('Not authorized to upload files.') abort(401) - class StorageController(BaseController): '''Upload to storage backend. ''' @@ -111,7 +115,6 @@ def ofs(self): StorageController._ofs_impl = get_ofs() return StorageController._ofs_impl - def upload(self): label = key_prefix + request.params.get('filepath', str(uuid.uuid4())) c.data = { @@ -141,19 +144,19 @@ def upload_handle(self): params['uploaded-by'] = c.userobj.name if c.userobj else "" self.ofs.put_stream(bucket_id, label, stream.file, params) - success_action_redirect = h.url_for('storage_upload_success', qualified=True, - bucket=BUCKET, label=label) + success_action_redirect = h.url_for('storage_upload_success', + qualified=True, + bucket=BUCKET, label=label) # Do not redirect here as it breaks js file uploads (get infinite loop # in FF and crash in Chrome) return self.success(label) def success(self, label=None): - label=request.params.get('label', label) + label = request.params.get('label', label) h.flash_success('Upload successful') c.file_url = h.url_for('storage_file', - label=label, - qualified=True - ) + label=label, + qualified=True) c.upload_url = h.url_for('storage_upload') return render('storage/success.html') @@ -169,7 +172,7 @@ def file(self, label): label = label[:-1] # This may be best being cached_url until we have moved it into # permanent storage - file_url = h.url_for( 'storage_file', label=label ) + file_url = h.url_for('storage_file', label=label) h.redirect_to(file_url) else: abort(404) @@ -180,14 +183,13 @@ def file(self, label): filepath = file_url[len("file://"):] headers = { # 'Content-Disposition':'attachment; filename="%s"' % label, - 'Content-Type':metadata.get('_format', 'text/plain')} + 'Content-Type': metadata.get('_format', 'text/plain')} fapp = FileApp(filepath, headers=None, **headers) return fapp(request.environ, self.start_response) else: h.redirect_to(file_url) - class StorageAPIController(BaseController): _ofs_impl = None @@ -202,20 +204,18 @@ def ofs(self): def index(self): info = { 'metadata/{label}': { - 'description': 'Get or set metadata for this item in storage', - }, + 'description': 'Get or set metadata for this ' + 'item in storage', }, 'auth/request/{label}': { - 'description': self.auth_request.__doc__, - }, + 'description': self.auth_request.__doc__, }, 'auth/form/{label}': { - 'description': self.auth_form.__doc__, - } - } + 'description': self.auth_form.__doc__, }} return info def set_metadata(self, label): bucket = BUCKET - if not label.startswith("/"): label = "/" + label + if not label.startswith("/"): + label = "/" + label try: data = fix_stupid_pylons_encoding(request.body) @@ -262,12 +262,13 @@ def get_metadata(self, label): if storage_backend in ['google', 's3']: if not label.startswith("/"): label = "/" + label - url = "https://%s/%s%s" % (self.ofs.conn.server_name(), bucket, label) + url = "https://%s/%s%s" % (self.ofs.conn.server_name(), + bucket, label) else: url = h.url_for('storage_file', - label=label, - qualified=False - ) + label=label, + qualified=False + ) if not self.ofs.exists(bucket, label): abort(404) metadata = self.ofs.get_metadata(bucket, label) @@ -319,32 +320,30 @@ def auth_request(self, label): authorize(method, bucket, label, c.userobj, self.ofs) http_request = self.ofs.authenticate_request(method, bucket, label, - headers) + headers) return { 'host': http_request.host, 'method': http_request.method, 'path': http_request.path, - 'headers': http_request.headers - } + 'headers': http_request.headers} def _get_remote_form_data(self, label): method = 'POST' - content_length_range = int( - config.get('ckan.storage.max_content_length', - 50000000)) + content_length_range = \ + int(config.get('ckan.storage.max_content_length', 50000000)) acl = 'public-read' - fields = [ { - 'name': self.ofs.conn.provider.metadata_prefix + 'uploaded-by', - 'value': c.userobj.id - }] - conditions = [ '{"%s": "%s"}' % (x['name'], x['value']) for x in - fields ] + fields = [{ + 'name': self.ofs.conn.provider.metadata_prefix + 'uploaded-by', + 'value': c.userobj.id}] + conditions = ['{"%s": "%s"}' % (x['name'], x['value']) for x in + fields] # In FF redirect to this breaks js upload as FF attempts to open file # (presumably because mimetype = javascript) and this stops js - # success_action_redirect = h.url_for('storage_api_get_metadata', qualified=True, - # label=label) - success_action_redirect = h.url_for('storage_upload_success_empty', qualified=True, - label=label) + # success_action_redirect = h.url_for('storage_api_get_metadata', + # qualified=True, label=label) + success_action_redirect = h.url_for('storage_upload_success_empty', + qualified=True, + label=label) data = self.ofs.conn.build_post_form_args( BUCKET, label, @@ -354,11 +353,11 @@ def _get_remote_form_data(self, label): acl=acl, fields=fields, conditions=conditions - ) + ) # HACK: fix up some broken stuff from boto # e.g. should not have content-length-range in list of fields! storage_backend = config['ofs.impl'] - for idx,field in enumerate(data['fields']): + for idx, field in enumerate(data['fields']): if storage_backend == 'google': if field['name'] == 'AWSAccessKeyId': field['name'] = 'GoogleAccessId' @@ -408,4 +407,3 @@ def auth_form(self, label): authorize(method, bucket, label, c.userobj, self.ofs) data = self._get_form_data(label) return data - diff --git a/ckan/controllers/tag.py b/ckan/controllers/tag.py index 06bccd768ca..ef3ff5721c5 100644 --- a/ckan/controllers/tag.py +++ b/ckan/controllers/tag.py @@ -11,13 +11,14 @@ LIMIT = 25 + class TagController(BaseController): def __before__(self, action, **env): BaseController.__before__(self, action, **env) try: - context = {'model':model,'user': c.user or c.author} - check_access('site_read',context) + context = {'model': model, 'user': c.user or c.author} + check_access('site_read', context) except NotAuthorized: abort(401, _('Not authorized to see this page')) @@ -33,18 +34,18 @@ def index(self): page = int(request.params.get('page', 1)) data_dict['q'] = c.q data_dict['limit'] = LIMIT - data_dict['offset'] = (page-1)*LIMIT + data_dict['offset'] = (page - 1) * LIMIT data_dict['return_objects'] = True - - results = get_action('tag_list')(context,data_dict) - + + results = get_action('tag_list')(context, data_dict) + if c.q: c.page = h.Page( - collection=results, - page=page, - item_count=len(results), - items_per_page=LIMIT - ) + collection=results, + page=page, + item_count=len(results), + items_per_page=LIMIT + ) c.page.items = results else: c.page = AlphaPage( @@ -53,18 +54,17 @@ def index(self): alpha_attribute='name', other_text=_('Other'), ) - + return render('tag/index.html') def read(self, id): context = {'model': model, 'session': model.Session, - 'user': c.user or c.author, 'for_view': True} - - data_dict = {'id':id} + 'user': c.user or c.author, 'for_view': True} + + data_dict = {'id': id} try: - c.tag = get_action('tag_show')(context,data_dict) + c.tag = get_action('tag_show')(context, data_dict) except NotFound: abort(404, _('Tag not found')) return render('tag/read.html') - diff --git a/ckan/controllers/template.py b/ckan/controllers/template.py index 04468f7d68b..41e8a49f0d3 100644 --- a/ckan/controllers/template.py +++ b/ckan/controllers/template.py @@ -1,6 +1,7 @@ from ckan.lib.base import * from genshi.template.loader import TemplateNotFound + class TemplateController(BaseController): def view(self, url): @@ -35,4 +36,3 @@ def view(self, url): return render(url) except TemplateNotFound: abort(404) - diff --git a/ckan/controllers/user.py b/ckan/controllers/user.py index be659ad88d9..4ba31b02fcf 100644 --- a/ckan/controllers/user.py +++ b/ckan/controllers/user.py @@ -5,6 +5,7 @@ from urllib import quote import ckan.misc +import ckan.lib.i18n from ckan.lib.base import * from ckan.lib import mailer from ckan.authz import Authorizer @@ -17,15 +18,16 @@ log = logging.getLogger(__name__) + class UserController(BaseController): def __before__(self, action, **env): BaseController.__before__(self, action, **env) try: - context = {'model':model,'user': c.user or c.author} - check_access('site_read',context) + context = {'model': model, 'user': c.user or c.author} + check_access('site_read', context) except NotAuthorized: - if c.action not in ('login','request_reset','perform_reset',): + if c.action not in ('login', 'request_reset', 'perform_reset',): abort(401, _('Not authorized to see this page')) ## hooks for subclasses @@ -62,27 +64,28 @@ def _setup_template_variables(self, context, data_dict): def _get_repoze_handler(self, handler_name): '''Returns the URL that repoze.who will respond to and perform a login or logout.''' - return getattr(request.environ['repoze.who.plugins']['friendlyform'], handler_name) - + return getattr(request.environ['repoze.who.plugins']['friendlyform'], + handler_name) + def index(self): LIMIT = 20 page = int(request.params.get('page', 1)) - c.q = request.params.get('q', '') + c.q = request.params.get('q', '') c.order_by = request.params.get('order_by', 'name') context = {'model': model, 'user': c.user or c.author, 'return_query': True} - data_dict = {'q':c.q, - 'order_by':c.order_by} + data_dict = {'q': c.q, + 'order_by': c.order_by} try: - check_access('user_list',context, data_dict) + check_access('user_list', context, data_dict) except NotAuthorized: abort(401, _('Not authorized to see this page')) - users_list = get_action('user_list')(context,data_dict) + users_list = get_action('user_list')(context, data_dict) c.page = h.Page( collection=users_list, @@ -95,11 +98,11 @@ def index(self): def read(self, id=None): context = {'model': model, 'session': model.Session, - 'user': c.user or c.author, 'for_view': True} - data_dict = {'id':id, - 'user_obj':c.userobj} + 'user': c.user or c.author, 'for_view': True} + data_dict = {'id': id, + 'user_obj': c.userobj} try: - check_access('user_show',context, data_dict) + check_access('user_show', context, data_dict) except NotAuthorized: abort(401, _('Not authorized to see this page')) @@ -107,14 +110,16 @@ def read(self, id=None): c.about_formatted = self._format_about(c.user_dict['about']) c.user_activity_stream = get_action('user_activity_list_html')( - context, {'id':c.user_dict['id']}) + context, {'id': c.user_dict['id']}) return render('user/read.html') def me(self, locale=None): if not c.user: - h.redirect_to(locale=locale, controller='user', action='login', id=None) + h.redirect_to(locale=locale, controller='user', + action='login', id=None) user_ref = c.userobj.get_reference_preferred_for_uri() - h.redirect_to(locale=locale, controller='user', action='read', id=user_ref) + h.redirect_to(locale=locale, controller='user', action='dashboard', + id=user_ref) def register(self, data=None, errors=None, error_summary=None): return self.new(data, errors, error_summary) @@ -129,7 +134,7 @@ def new(self, data=None, errors=None, error_summary=None): 'save': 'save' in request.params} try: - check_access('user_create',context) + check_access('user_create', context) except NotAuthorized: abort(401, _('Unauthorized to create a user')) @@ -171,7 +176,8 @@ def _save_new(self, context): error_summary = e.error_summary return self.new(data_dict, errors, error_summary) if not c.user: - # Redirect to a URL picked up by repoze.who which performs the login + # Redirect to a URL picked up by repoze.who which performs the + # login login_url = self._get_repoze_handler('login_handler_path') h.redirect_to('%s?login=%s&password=%s' % ( login_url, @@ -180,7 +186,9 @@ def _save_new(self, context): else: # #1799 User has managed to register whilst logged in - warn user # they are not re-logged in as new user. - h.flash_success(_('User "%s" is now registered but you are still logged in as "%s" from before') % (data_dict['name'], c.user)) + h.flash_success(_('User "%s" is now registered but you are still ' + 'logged in as "%s" from before') % + (data_dict['name'], c.user)) return render('user/logout_first.html') def edit(self, id=None, data=None, errors=None, error_summary=None): @@ -218,14 +226,18 @@ def edit(self, id=None, data=None, errors=None, error_summary=None): user_obj = context.get('user_obj') - if not (ckan.authz.Authorizer().is_sysadmin(unicode(c.user)) or c.user == user_obj.name): - abort(401, _('User %s not authorized to edit %s') % (str(c.user), id)) + if not (ckan.authz.Authorizer().is_sysadmin(unicode(c.user)) + or c.user == user_obj.name): + abort(401, _('User %s not authorized to edit %s') % + (str(c.user), id)) errors = errors or {} vars = {'data': data, 'errors': errors, 'error_summary': error_summary} self._setup_template_variables({'model': model, - 'session': model.Session, 'user': c.user or c.author}, data_dict) + 'session': model.Session, + 'user': c.user or c.author}, + data_dict) c.is_myself = True c.form = render(self.edit_user_form, extra_vars=vars) @@ -252,12 +264,12 @@ def _save_edit(self, id, context): error_summary = e.error_summary return self.edit(id, data_dict, errors, error_summary) - def login(self): lang = session.pop('lang', None) if lang: session.save() - return h.redirect_to(locale=str(lang), controller='user', action='login') + return h.redirect_to(locale=str(lang), controller='user', + action='login') if 'error' in request.params: h.flash_error(request.params['error']) @@ -267,7 +279,8 @@ def login(self): g.openid_enabled = False if not c.user: - c.login_handler = h.url_for(self._get_repoze_handler('login_handler_path')) + c.login_handler = h.url_for( + self._get_repoze_handler('login_handler_path')) return render('user/login.html') else: return render('user/logout_first.html') @@ -276,20 +289,27 @@ def logged_in(self): # we need to set the language via a redirect lang = session.pop('lang', None) session.save() + + # we need to set the language explicitly here or the flash + # messages will not be translated. + ckan.lib.i18n.set_lang(lang) + if c.user: context = {'model': model, 'user': c.user} - data_dict = {'id':c.user} + data_dict = {'id': c.user} - user_dict = get_action('user_show')(context,data_dict) + user_dict = get_action('user_show')(context, data_dict) - h.flash_success(_("%s is now logged in") % user_dict['display_name']) + h.flash_success(_("%s is now logged in") % + user_dict['display_name']) return self.me(locale=lang) else: err = _('Login failed. Bad username or password.') if g.openid_enabled: - err += _(' (Or if using OpenID, it hasn\'t been associated with a user account.)') + err += _(' (Or if using OpenID, it hasn\'t been associated ' + 'with a user account.)') h.flash_error(err) h.redirect_to(locale=lang, controller='user', action='login') @@ -322,10 +342,10 @@ def request_reset(self): context = {'model': model, 'user': c.user} - data_dict = {'id':id} + data_dict = {'id': id} user_obj = None try: - user_dict = get_action('user_show')(context,data_dict) + user_dict = get_action('user_show')(context, data_dict) user_obj = context['user_obj'] except NotFound: # Try searching the user @@ -333,13 +353,14 @@ def request_reset(self): data_dict['q'] = id if id and len(id) > 2: - user_list = get_action('user_list')(context,data_dict) + user_list = get_action('user_list')(context, data_dict) if len(user_list) == 1: - # This is ugly, but we need the user object for the mailer, + # This is ugly, but we need the user object for the + # mailer, # and user_list does not return them del data_dict['q'] data_dict['id'] = user_list[0]['id'] - user_dict = get_action('user_show')(context,data_dict) + user_dict = get_action('user_show')(context, data_dict) user_obj = context['user_obj'] elif len(user_list) > 1: h.flash_error(_('"%s" matched several users') % (id)) @@ -351,20 +372,22 @@ def request_reset(self): if user_obj: try: mailer.send_reset_link(user_obj) - h.flash_success(_('Please check your inbox for a reset code.')) + h.flash_success(_('Please check your inbox for ' + 'a reset code.')) h.redirect_to('/') except mailer.MailerException, e: - h.flash_error(_('Could not send reset link: %s') % unicode(e)) + h.flash_error(_('Could not send reset link: %s') % + unicode(e)) return render('user/request_reset.html') def perform_reset(self, id): context = {'model': model, 'session': model.Session, 'user': c.user} - data_dict = {'id':id} + data_dict = {'id': id} try: - user_dict = get_action('user_show')(context,data_dict) + user_dict = get_action('user_show')(context, data_dict) user_obj = context['user_obj'] except NotFound, e: abort(404, _('User not found')) @@ -391,7 +414,7 @@ def perform_reset(self, id): except DataError: h.flash_error(_(u'Integrity Error')) except ValidationError, e: - h.flash_error(u'%r'% e.error_dict) + h.flash_error(u'%r' % e.error_dict) except ValueError, ve: h.flash_error(unicode(ve)) @@ -403,7 +426,8 @@ def _format_about(self, about): try: html = genshi.HTML(about_formatted) except genshi.ParseError, e: - log.error('Could not print "about" field Field: %r Error: %r', about, e) + log.error('Could not print "about" field Field: %r Error: %r', + about, e) html = _('Error: Could not parse About text') return html @@ -412,16 +436,25 @@ def _get_form_password(self): password2 = request.params.getone('password2') if (password1 is not None and password1 != ''): if not len(password1) >= 4: - raise ValueError(_("Your password must be 4 characters or longer.")) + raise ValueError(_('Your password must be 4 ' + 'characters or longer.')) elif not password1 == password2: - raise ValueError(_("The passwords you entered do not match.")) + raise ValueError(_('The passwords you entered' + ' do not match.')) return password1 def followers(self, id=None): context = {'model': model, 'session': model.Session, - 'user': c.user or c.author, 'for_view': True} - data_dict = {'id':id, 'user_obj':c.userobj} + 'user': c.user or c.author, 'for_view': True} + data_dict = {'id': id, 'user_obj': c.userobj} self._setup_template_variables(context, data_dict) - c.followers = get_action('user_follower_list')(context, - {'id':c.user_dict['id']}) + f = get_action('user_follower_list') + c.followers = f(context, {'id': c.user_dict['id']}) return render('user/followers.html') + + def dashboard(self, id=None): + context = {'model': model, 'session': model.Session, + 'user': c.user or c.author, 'for_view': True} + data_dict = {'id': id, 'user_obj': c.userobj} + self._setup_template_variables(context, data_dict) + return render('user/dashboard.html') diff --git a/ckan/lib/accept.py b/ckan/lib/accept.py index dcd07490510..0817e7d5b0c 100644 --- a/ckan/lib/accept.py +++ b/ckan/lib/accept.py @@ -9,29 +9,30 @@ accept_re = re.compile("^(?P[^;]+)[ \t]*(;[ \t]*q=(?P[0-9.]+)){0,1}$") accept_types = { -# Name : ContentType, Is Markup?, Extension - "text/html" : ("text/html; charset=utf-8", True, 'html'), - "text/n3" : ("text/n3; charset=utf-8", False, 'n3'), - "text/plain" : ("text/plain; charset=utf-8", False, 'txt'), - "application/rdf+xml" : ("application/rdf+xml; charset=utf-8", True, 'rdf'), + # Name : ContentType, Is Markup?, Extension + "text/html": ("text/html; charset=utf-8", True, 'html'), + "text/n3": ("text/n3; charset=utf-8", False, 'n3'), + "text/plain": ("text/plain; charset=utf-8", False, 'txt'), + "application/rdf+xml": ("application/rdf+xml; charset=utf-8", True, 'rdf'), } accept_by_extension = { "rdf": "application/rdf+xml", - "n3" : "text/n3" + "n3": "text/n3" } -def parse_extension( file_ext ): + +def parse_extension(file_ext): """ If provided an extension, this function will return the details for that extension, if we know about it. """ - ext = accept_by_extension.get(file_ext,None) + ext = accept_by_extension.get(file_ext, None) if ext: return accept_types[ext] - return (None,None,None,) + return (None, None, None,) -def parse_header( accept_header='' ): +def parse_header(accept_header=''): """ Parses the supplied accept header and tries to determine which content types we can provide the response in that will keep the @@ -54,8 +55,8 @@ def parse_header( accept_header='' ): acceptable[key] = float(qscore) for ctype in sorted(acceptable.iteritems(), - key=operator.itemgetter(1), - reverse=True): + key=operator.itemgetter(1), + reverse=True): if ctype[0] in accept_types: return accept_types[ctype[0]] diff --git a/ckan/lib/celery_app.py b/ckan/lib/celery_app.py index 27d2951d10b..d7c8d2efb49 100644 --- a/ckan/lib/celery_app.py +++ b/ckan/lib/celery_app.py @@ -15,7 +15,7 @@ config_file = os.environ.get('CKAN_CONFIG') if not config_file: - config_file = os.path.join( + config_file = os.path.join( os.path.dirname(os.path.abspath(__file__)), '../../development.ini') config.read(config_file) @@ -25,14 +25,14 @@ sqlalchemy_url = config.get('app:main', 'sqlalchemy.url') -default_config = dict( - BROKER_BACKEND = 'sqlalchemy', - BROKER_HOST = sqlalchemy_url, - CELERY_RESULT_DBURI = sqlalchemy_url, - CELERY_RESULT_BACKEND = 'database', - CELERY_RESULT_SERIALIZER = 'json', - CELERY_TASK_SERIALIZER = 'json', - CELERY_IMPORTS = [], +default_config = dict( + BROKER_BACKEND='sqlalchemy', + BROKER_HOST=sqlalchemy_url, + CELERY_RESULT_DBURI=sqlalchemy_url, + CELERY_RESULT_BACKEND='database', + CELERY_RESULT_SERIALIZER='json', + CELERY_TASK_SERIALIZER='json', + CELERY_IMPORTS=[], ) for entry_point in iter_entry_points(group='ckan.celery_task'): diff --git a/ckan/lib/helpers.py b/ckan/lib/helpers.py index 9ab19ac1e72..cc921203ea0 100644 --- a/ckan/lib/helpers.py +++ b/ckan/lib/helpers.py @@ -506,7 +506,7 @@ def format_icon(_format): def linked_gravatar(email_hash, size=100, default=None): return literal( - '' % _('Update your avatar at gravatar.com') + '%s' % gravatar(email_hash,size,default) ) @@ -929,6 +929,19 @@ def remove_url_param(key, value=None, replace=None, controller=None, params.append((key, replace)) return _create_url_with_params(params=params, controller=controller, action=action, extras=extras) +def dashboard_activity_stream(user_id): + '''Return the dashboard activity stream of the given user. + + :param user_id: the id of the user + :type user_id: string + + :returns: an activity stream as an HTML snippet + :rtype: string + + ''' + import ckan.logic as logic + context = {'model' : model, 'session':model.Session, 'user':c.user} + return logic.get_action('dashboard_activity_list_html')(context, {'id': user_id}) # these are the functions that will end up in `h` template helpers # if config option restrict_template_vars is true @@ -987,6 +1000,7 @@ def remove_url_param(key, value=None, replace=None, controller=None, 'follow_count', 'remove_url_param', 'add_url_param', + 'dashboard_activity_stream', # imported into ckan.lib.helpers 'literal', 'link_to', diff --git a/ckan/lib/i18n.py b/ckan/lib/i18n.py index 87e7312e6a0..ca6c1682418 100644 --- a/ckan/lib/i18n.py +++ b/ckan/lib/i18n.py @@ -12,14 +12,26 @@ # we don't have a Portuguese territory # translation currently. -def _get_locales(): +def get_locales_from_config(): + ''' despite the name of this function it gets the locales defined by + the config AND also the locals available subject to the config. ''' + locales_offered = config.get('ckan.locales_offered', '').split() + filtered_out = config.get('ckan.locales_filtered_out', '').split() + locale_default = config.get('ckan.locale_default', 'en') + locale_order = config.get('ckan.locale_order', '').split() + known_locales = get_locales() + all_locales = set(known_locales) | set(locales_offered) | set(locale_order) | set(locale_default) + all_locales -= set(filtered_out) + return all_locales +def _get_locales(): + # FIXME this wants cleaning up and merging with get_locales_from_config() assert not config.get('lang'), \ '"lang" config option not supported - please use ckan.locale_default instead.' locales_offered = config.get('ckan.locales_offered', '').split() filtered_out = config.get('ckan.locales_filtered_out', '').split() - locale_order = config.get('ckan.locale_order', '').split() locale_default = config.get('ckan.locale_default', 'en') + locale_order = config.get('ckan.locale_order', '').split() locales = ['en'] i18n_path = os.path.dirname(ckan.i18n.__file__) @@ -58,6 +70,7 @@ def _get_locales(): available_locales = None locales = None locales_dict = None +_non_translated_locals = None def get_locales(): ''' Get list of available locales @@ -68,6 +81,15 @@ def get_locales(): locales = _get_locales() return locales +def non_translated_locals(): + ''' These are the locales that are available but for which there are + no translations. returns a list like ['en', 'de', ...] ''' + global _non_translated_locals + if not _non_translated_locals: + locales = config.get('ckan.locale_order', '').split() + _non_translated_locals = [x for x in locales if x not in get_locales()] + return _non_translated_locals + def get_locales_dict(): ''' Get a dict of the available locales e.g. { 'en' : Locale('en'), 'de' : Locale('de'), ... } ''' @@ -87,12 +109,25 @@ def get_available_locales(): available_locales = map(Locale.parse, get_locales()) return available_locales +def _set_lang(lang): + ''' Allows a custom i18n directory to be specified. + Creates a fake config file to pass to pylons.i18n.set_lang, which + sets the Pylons root path to desired i18n_directory. + This is needed as Pylons will only look for an i18n directory in + the application root.''' + if config.get('ckan.i18n_directory'): + fake_config = {'pylons.paths': {'root': config['ckan.i18n_directory']}, + 'pylons.package': config['pylons.package']} + i18n.set_lang(lang, pylons_config=fake_config) + else: + i18n.set_lang(lang) + def handle_request(request, tmpl_context): ''' Set the language for the request ''' lang = request.environ.get('CKAN_LANG') or \ - config.get('ckan.locale_default', 'en') + config.get('ckan.locale_default', 'en') if lang != 'en': - i18n.set_lang(lang) + set_lang(lang) tmpl_context.language = lang return lang @@ -104,3 +139,10 @@ def get_lang(): return langs[0] else: return 'en' + +def set_lang(language_code): + ''' Wrapper to pylons call ''' + if language_code in non_translated_locals(): + language_code = config.get('ckan.locale_default', 'en') + if language_code != 'en': + _set_lang(language_code) diff --git a/ckan/lib/plugins.py b/ckan/lib/plugins.py index 258e1449681..bb5b795aa18 100644 --- a/ckan/lib/plugins.py +++ b/ckan/lib/plugins.py @@ -221,11 +221,21 @@ def form_to_db_schema_options(self, options): if options.get('api'): if options.get('type') == 'create': - return logic.schema.default_create_package_schema() + return self.form_to_db_schema_api_create() else: - return logic.schema.default_update_package_schema() + assert options.get('type') == 'update' + return self.form_to_db_schema_api_update() else: - return logic.schema.package_form_schema() + return self.form_to_db_schema() + + def form_to_db_schema(self): + return logic.schema.form_to_db_package_schema() + + def form_to_db_schema_api_create(self): + return logic.schema.default_create_package_schema() + + def form_to_db_schema_api_update(self): + return logic.schema.default_update_package_schema() def db_to_form_schema(self): '''This is an interface to manipulate data from the database @@ -387,7 +397,7 @@ def check_data_dict(self, data_dict): 'extras_validation', 'save', 'return_to', 'resources'] - schema_keys = package_form_schema().keys() + schema_keys = form_to_db_package_schema().keys() keys_in_schema = set(schema_keys) - set(surplus_keys_schema) missing_keys = keys_in_schema - set(data_dict.keys()) diff --git a/ckan/lib/search/__init__.py b/ckan/lib/search/__init__.py index b68e7850c47..f003efd9350 100644 --- a/ckan/lib/search/__init__.py +++ b/ckan/lib/search/__init__.py @@ -9,13 +9,16 @@ from common import (SearchIndexError, SearchError, SearchQueryError, make_connection, is_available, SolrSettings) from index import PackageSearchIndex, NoopSearchIndex -from query import TagSearchQuery, ResourceSearchQuery, PackageSearchQuery, QueryOptions, convert_legacy_parameters_to_solr +from query import (TagSearchQuery, ResourceSearchQuery, PackageSearchQuery, + QueryOptions, convert_legacy_parameters_to_solr) log = logging.getLogger(__name__) import sys import cgitb import warnings + + def text_traceback(): with warnings.catch_warnings(): warnings.simplefilter("ignore") @@ -37,7 +40,7 @@ def text_traceback(): 'ref_entity_with_attr': 'name', 'all_fields': False, 'search_tags': True, - 'callback': None, # simply passed through + 'callback': None, # simply passed through } _INDICES = { @@ -57,6 +60,7 @@ def text_traceback(): _INDICES['package'] = NoopSearchIndex _QUERIES['package'] = sql.PackageSearchQuery + def _normalize_type(_type): if isinstance(_type, domain_object.DomainObject): _type = _type.__class__ @@ -64,8 +68,10 @@ def _normalize_type(_type): _type = _type.__name__ return _type.strip().lower() + def index_for(_type): - """ Get a SearchIndex instance sub-class suitable for the specified type. """ + """ Get a SearchIndex instance sub-class suitable for + the specified type. """ try: _type_n = _normalize_type(_type) return _INDICES[_type_n]() @@ -73,14 +79,17 @@ def index_for(_type): log.warn("Unknown search type: %s" % _type) return NoopSearchIndex() -def query_for( _type): - """ Get a SearchQuery instance sub-class suitable for the specified type. """ + +def query_for(_type): + """ Get a SearchQuery instance sub-class suitable for the specified + type. """ try: _type_n = _normalize_type(_type) return _QUERIES[_type_n]() except KeyError, ke: raise SearchError("Unknown search type: %s" % _type) + def dispatch_by_operation(entity_type, entity, operation): """Call the appropriate index method for a given notification.""" try: @@ -121,7 +130,8 @@ def notify(self, entity, operation): else: log.warn("Discarded Sync. indexing for: %s" % entity) -def rebuild(package_id=None,only_missing=False,force=False,refresh=False): + +def rebuild(package_id=None, only_missing=False, force=False, refresh=False): ''' Rebuilds the search index. @@ -138,18 +148,21 @@ def rebuild(package_id=None,only_missing=False,force=False,refresh=False): if package_id: pkg_dict = get_action('package_show')( - {'model': model, 'ignore_auth': True, 'validate': False}, - {'id': package_id}) + {'model': model, 'ignore_auth': True, 'validate': False}, + {'id': package_id}) log.info('Indexing just package %r...', pkg_dict['name']) package_index.remove_dict(pkg_dict) package_index.insert_dict(pkg_dict) else: - package_ids = [r[0] for r in model.Session.query(model.Package.id).filter(model.Package.state == 'active').all()] + package_ids = [r[0] for r in model.Session.query(model.Package.id). + filter(model.Package.state == 'active').all()] if only_missing: log.info('Indexing only missing packages...') package_query = query_for(model.Package) - indexed_pkg_ids = set(package_query.get_all_entity_ids(max_results=len(package_ids))) - package_ids = set(package_ids) - indexed_pkg_ids # Packages not indexed + indexed_pkg_ids = set(package_query.get_all_entity_ids( + max_results=len(package_ids))) + # Packages not indexed + package_ids = set(package_ids) - indexed_pkg_ids if len(package_ids) == 0: log.info('All datasets are already indexed') @@ -164,12 +177,14 @@ def rebuild(package_id=None,only_missing=False,force=False,refresh=False): try: package_index.insert_dict( get_action('package_show')( - {'model': model, 'ignore_auth': True, 'validate': False}, + {'model': model, 'ignore_auth': True, + 'validate': False}, {'id': pkg_id} ) ) - except Exception,e: - log.error('Error while indexing dataset %s: %s' % (pkg_id,str(e))) + except Exception, e: + log.error('Error while indexing dataset %s: %s' % + (pkg_id, str(e))) if force: log.error(text_traceback()) continue @@ -179,32 +194,38 @@ def rebuild(package_id=None,only_missing=False,force=False,refresh=False): model.Session.commit() log.info('Finished rebuilding search index.') + def check(): from ckan import model package_query = query_for(model.Package) log.debug("Checking packages search index...") - pkgs_q = model.Session.query(model.Package).filter_by(state=model.State.ACTIVE) + pkgs_q = model.Session.query(model.Package).filter_by( + state=model.State.ACTIVE) pkgs = set([pkg.id for pkg in pkgs_q]) indexed_pkgs = set(package_query.get_all_entity_ids(max_results=len(pkgs))) pkgs_not_indexed = pkgs - indexed_pkgs - print 'Packages not indexed = %i out of %i' % (len(pkgs_not_indexed), len(pkgs)) + print 'Packages not indexed = %i out of %i' % (len(pkgs_not_indexed), + len(pkgs)) for pkg_id in pkgs_not_indexed: pkg = model.Session.query(model.Package).get(pkg_id) print pkg.revision.timestamp.strftime('%Y-%m-%d'), pkg.name + def show(package_reference): from ckan import model package_query = query_for(model.Package) return package_query.get_index(package_reference) + def clear(package_reference=None): from ckan import model package_index = index_for(model.Package) if package_reference: - log.debug("Clearing search index for dataset %s..." % package_reference) - package_index.delete_package({'id':package_reference}) + log.debug("Clearing search index for dataset %s..." % + package_reference) + package_index.delete_package({'id': package_reference}) else: log.debug("Clearing search index...") package_index.clear() @@ -253,9 +274,9 @@ def check_solr_schema_version(schema_file=None): url = solr_url.strip('/') + SOLR_SCHEMA_FILE_OFFSET - req = urllib2.Request(url = url) + req = urllib2.Request(url=url) if http_auth: - req.add_header('Authorization',http_auth) + req.add_header('Authorization', http_auth) res = urllib2.urlopen(req) else: @@ -267,9 +288,11 @@ def check_solr_schema_version(schema_file=None): version = tree.documentElement.getAttribute('version') if not len(version): - raise SearchError('Could not extract version info from the SOLR schema, using file: \n%s' % url) + raise SearchError('Could not extract version info from the SOLR' + ' schema, using file: \n%s' % url) if not version in SUPPORTED_SCHEMA_VERSIONS: - raise SearchError('SOLR schema version not supported: %s. Supported versions are [%s]' - % (version,', '.join(SUPPORTED_SCHEMA_VERSIONS))) + raise SearchError('SOLR schema version not supported: %s. Supported' + ' versions are [%s]' + % (version, ', '.join(SUPPORTED_SCHEMA_VERSIONS))) return True diff --git a/ckan/lib/search/common.py b/ckan/lib/search/common.py index 8780f99ea1f..06ab4afc961 100644 --- a/ckan/lib/search/common.py +++ b/ckan/lib/search/common.py @@ -2,12 +2,21 @@ import logging log = logging.getLogger(__name__) -class SearchIndexError(Exception): pass -class SearchError(Exception): pass -class SearchQueryError(SearchError): pass + +class SearchIndexError(Exception): + pass + + +class SearchError(Exception): + pass + + +class SearchQueryError(SearchError): + pass DEFAULT_SOLR_URL = 'http://127.0.0.1:8983/solr' + class SolrSettings(object): _is_initialised = False _url = None @@ -32,6 +41,7 @@ def get(cls): raise SearchIndexError('SOLR URL is blank') return (cls._url, cls._user, cls._password) + def is_available(): """ Return true if we can successfully connect to Solr. @@ -48,11 +58,13 @@ def is_available(): return True + def make_connection(): from solr import SolrConnection solr_url, solr_user, solr_password = SolrSettings.get() assert solr_url is not None if solr_user is not None and solr_password is not None: - return SolrConnection(solr_url, http_user=solr_user, http_pass=solr_password) + return SolrConnection(solr_url, http_user=solr_user, + http_pass=solr_password) else: return SolrConnection(solr_url) diff --git a/ckan/lib/search/index.py b/ckan/lib/search/index.py index e9129009947..09ec2eec913 100644 --- a/ckan/lib/search/index.py +++ b/ckan/lib/search/index.py @@ -1,7 +1,6 @@ import socket import string import logging -import itertools import collections import json @@ -14,6 +13,7 @@ import ckan.model as model from ckan.plugins import (PluginImplementations, IPackageController) +import ckan.logic as logic log = logging.getLogger(__name__) @@ -122,10 +122,27 @@ def index_package(self, pkg_dict): pkg_dict[key] = value pkg_dict.pop('extras', None) - #Add tags and groups + # add tags, removing vocab tags from 'tags' list and adding them as + # vocab_ so that they can be used in facets + non_vocab_tag_names = [] tags = pkg_dict.pop('tags', []) - pkg_dict['tags'] = [tag['name'] for tag in tags] - + context = {'model': model} + + for tag in tags: + if tag.get('vocabulary_id'): + data = {'id': tag['vocabulary_id']} + vocab = logic.get_action('vocabulary_show')(context, data) + key = u'vocab_%s' % vocab['name'] + if key in pkg_dict: + pkg_dict[key].append(tag['name']) + else: + pkg_dict[key] = [tag['name']] + else: + non_vocab_tag_names.append(tag['name']) + + pkg_dict['tags'] = non_vocab_tag_names + + # add groups groups = pkg_dict.pop('groups', []) # Capacity is different to the default only if using organizations @@ -197,7 +214,6 @@ def index_package(self, pkg_dict): import hashlib pkg_dict['index_id'] = hashlib.md5('%s%s' % (pkg_dict['id'],config.get('ckan.site_id'))).hexdigest() - for item in PluginImplementations(IPackageController): pkg_dict = item.before_index(pkg_dict) diff --git a/ckan/lib/search/query.py b/ckan/lib/search/query.py index a3b7adf35fe..25b9ba5bba7 100644 --- a/ckan/lib/search/query.py +++ b/ckan/lib/search/query.py @@ -150,16 +150,26 @@ def run(self, query=None, terms=[], fields={}, facet_by=[], options=None, **kwar class TagSearchQuery(SearchQuery): """Search for tags.""" - def run(self, query=[], fields={}, options=None, **kwargs): + def run(self, query=None, fields=None, options=None, **kwargs): + query = [] if query is None else query + fields = {} if fields is None else fields + if options is None: options = QueryOptions(**kwargs) else: options.update(kwargs) + if isinstance(query, basestring): + query = [query] + + query = query[:] # don't alter caller's query list. + for field, value in fields.items(): + if field in ('tag', 'tags'): + query.append(value) + context = {'model': model, 'session': model.Session} data_dict = { 'query': query, - 'fields': fields, 'offset': options.get('offset'), 'limit': options.get('limit') } @@ -186,9 +196,23 @@ def run(self, fields={}, options=None, **kwargs): else: options.update(kwargs) - context = {'model':model, 'session': model.Session} + context = { + 'model':model, + 'session': model.Session, + 'search_query': True, + } + + # Transform fields into structure required by the resource_search + # action. + query = [] + for field, terms in fields.items(): + if isinstance(terms, basestring): + terms = terms.split() + for term in terms: + query.append(':'.join([field, term])) + data_dict = { - 'fields': fields, + 'query': query, 'offset': options.get('offset'), 'limit': options.get('limit'), 'order_by': options.get('order_by') diff --git a/ckan/logic/__init__.py b/ckan/logic/__init__.py index 098b7edae04..bb7d94b4b7c 100644 --- a/ckan/logic/__init__.py +++ b/ckan/logic/__init__.py @@ -11,12 +11,13 @@ log = logging.getLogger(__name__) + class AttributeDict(dict): def __getattr__(self, name): try: return self[name] except KeyError: - raise AttributeError('No such attribute %r'%name) + raise AttributeError('No such attribute %r' % name) def __setattr__(self, name, value): raise AttributeError( @@ -33,12 +34,15 @@ def __str__(self): self.extra_msg) return ' - '.join([str(err_msg) for err_msg in err_msgs if err_msg]) + class NotFound(ActionError): pass + class NotAuthorized(ActionError): pass + class ParameterError(ActionError): pass @@ -56,6 +60,7 @@ def __str__(self): log = logging.getLogger(__name__) + def parse_params(params, ignore_keys=None): '''Takes a dict and returns it with some values standardised. This is done on a dict before calling tuplize_dict on it. @@ -109,6 +114,7 @@ def clean_dict(data_dict): clean_dict(inner_dict) return data_dict + def tuplize_dict(data_dict): '''Takes a dict with keys of the form 'table__0__key' and converts them to a tuple like ('table', 0, 'key'). @@ -130,6 +136,7 @@ def tuplize_dict(data_dict): tuplized_dict[tuple(key_list)] = value return tuplized_dict + def untuplize_dict(tuplized_dict): data_dict = {} @@ -138,30 +145,33 @@ def untuplize_dict(tuplized_dict): data_dict[new_key] = value return data_dict + def flatten_to_string_key(dict): flattented = flatten_dict(dict) return untuplize_dict(flattented) + def check_access(action, context, data_dict=None): model = context['model'] user = context.get('user') - log.debug('check access - user %r, action %s' % (user,action)) - + log.debug('check access - user %r, action %s' % (user, action)) + if action: - #if action != model.Action.READ and user in (model.PSEUDO_USER__VISITOR, ''): + #if action != model.Action.READ and user in + # (model.PSEUDO_USER__VISITOR, ''): # # TODO Check the API key is valid at some point too! # log.debug('Valid API key needed to make changes') # raise NotAuthorized logic_authorization = is_authorized(action, context, data_dict) if not logic_authorization['success']: - msg = logic_authorization.get('msg','') + msg = logic_authorization.get('msg', '') raise NotAuthorized(msg) elif not user: msg = _('No valid API key provided.') log.debug(msg) - raise NotAuthorized(msg) + raise NotAuthorized(msg) log.debug('Access OK.') return True @@ -172,7 +182,7 @@ def check_access_old(entity, action, context): user = context.get('user') if context.get('ignore_auth'): return True - log.debug('check access - user %r, action %s' % (user,action)) + log.debug('check access - user %r, action %s' % (user, action)) if action and entity and not isinstance(entity, model.PackageRelationship): if action != model.Action.READ and user == '': log.debug('Valid API key needed to make changes') @@ -189,20 +199,23 @@ def check_access_old(entity, action, context): #raise NotAuthorized log.debug('Access OK.') - return True + return True _actions = {} + def get_action(action): if _actions: + if not action in _actions: + raise KeyError("Action '%s' not found" % action) return _actions.get(action) # Otherwise look in all the plugins to resolve all possible # First get the default ones in the ckan/logic/action directory # Rather than writing them out in full will use __import__ # to load anything from ckan.logic.action that looks like it might # be an action - for action_module_name in ['get', 'create', 'update','delete']: - module_path = 'ckan.logic.action.'+action_module_name + for action_module_name in ['get', 'create', 'update', 'delete']: + module_path = 'ckan.logic.action.' + action_module_name module = __import__(module_path) for part in module_path.split('.')[1:]: module = getattr(module, part) @@ -214,8 +227,8 @@ def get_action(action): # Whitelist all actions defined in logic/action/get.py as # being side-effect free. - v.side_effect_free = getattr(v, 'side_effect_free', True) and \ - action_module_name == 'get' + v.side_effect_free = getattr(v, 'side_effect_free', True)\ + and action_module_name == 'get' # Then overwrite them with any specific ones in the plugins: resolved_action_plugins = {} @@ -236,6 +249,7 @@ def get_action(action): _actions.update(fetched_actions) return _actions.get(action) + def get_or_bust(data_dict, keys): '''Try and get values from dictionary and if they are not there raise a validation error. @@ -264,6 +278,7 @@ def get_or_bust(data_dict, keys): return values[0] return tuple(values) + def side_effect_free(action): '''A decorator that marks the given action as side-effect-free. diff --git a/ckan/logic/action/create.py b/ckan/logic/action/create.py index 874abc88685..ce44abfa249 100644 --- a/ckan/logic/action/create.py +++ b/ckan/logic/action/create.py @@ -534,7 +534,8 @@ def group_create(context, data_dict): 'defer_commit':True, 'session': session } - activity_create(activity_create_context, activity_dict, ignore_auth=True) + logic.get_action('activity_create')(activity_create_context, + activity_dict, ignore_auth=True) if not context.get('defer_commit'): model.repo.commit() @@ -648,7 +649,8 @@ def user_create(context, data_dict): 'object_id': user.id, 'activity_type': 'new user', } - activity_create(activity_create_context, activity_dict, ignore_auth=True) + logic.get_action('activity_create')(activity_create_context, + activity_dict, ignore_auth=True) if not context.get('defer_commit'): model.repo.commit() @@ -842,6 +844,7 @@ def follow_user(context, data_dict): raise logic.NotAuthorized model = context['model'] + session = context['session'] userobj = model.User.get(context['user']) if not userobj: @@ -869,6 +872,24 @@ def follow_user(context, data_dict): follower = model_save.user_following_user_dict_save(data_dict, context) + activity_dict = { + 'user_id': userobj.id, + 'object_id': data_dict['id'], + 'activity_type': 'follow user', + } + activity_dict['data'] = { + 'user': ckan.lib.dictization.table_dictize( + model.User.get(data_dict['id']), context), + } + activity_create_context = { + 'model': model, + 'user': userobj, + 'defer_commit':True, + 'session': session + } + logic.get_action('activity_create')(activity_create_context, + activity_dict, ignore_auth=True) + if not context.get('defer_commit'): model.repo.commit() @@ -895,6 +916,7 @@ def follow_dataset(context, data_dict): raise logic.NotAuthorized model = context['model'] + session = context['session'] userobj = model.User.get(context['user']) if not userobj: @@ -918,6 +940,24 @@ def follow_dataset(context, data_dict): follower = model_save.user_following_dataset_dict_save(data_dict, context) + activity_dict = { + 'user_id': userobj.id, + 'object_id': data_dict['id'], + 'activity_type': 'follow dataset', + } + activity_dict['data'] = { + 'dataset': ckan.lib.dictization.table_dictize( + model.Package.get(data_dict['id']), context), + } + activity_create_context = { + 'model': model, + 'user': userobj, + 'defer_commit':True, + 'session': session + } + logic.get_action('activity_create')(activity_create_context, + activity_dict, ignore_auth=True) + if not context.get('defer_commit'): model.repo.commit() diff --git a/ckan/logic/action/get.py b/ckan/logic/action/get.py index 191a72f7fea..acdab602e75 100644 --- a/ckan/logic/action/get.py +++ b/ckan/logic/action/get.py @@ -1178,24 +1178,114 @@ def package_search(context, data_dict): def resource_search(context, data_dict): ''' + Searches for resources satisfying a given search criteria. - :param fields: - :type fields: - :param order_by: - :type order_by: - :param offset: - :type offset: - :param limit: - :type limit: + It returns a dictionary with 2 fields: ``count`` and ``results``. The + ``count`` field contains the total number of Resources found without the + limit or query parameters having an effect. The ``results`` field is a + list of dictized Resource objects. - :returns: - :rtype: + The 'q' parameter is a required field. It is a string of the form + ``{field}:{term}`` or a list of strings, each of the same form. Within + each string, ``{field}`` is a field or extra field on the Resource domain + object. + + If ``{field}`` is ``"hash"``, then an attempt is made to match the + `{term}` as a *prefix* of the ``Resource.hash`` field. + + If ``{field}`` is an extra field, then an attempt is made to match against + the extra fields stored against the Resource. + + Note: The search is limited to search against extra fields declared in + the config setting ``ckan.extra_resource_fields``. + + Note: Due to a Resource's extra fields being stored as a json blob, the + match is made against the json string representation. As such, false + positives may occur: + + If the search criteria is: :: + + query = "field1:term1" + + Then a json blob with the string representation of: :: + + {"field1": "foo", "field2": "term1"} + + will match the search criteria! This is a known short-coming of this + approach. + + All matches are made ignoring case; and apart from the ``"hash"`` field, + a term matches if it is a substring of the field's value. + + Finally, when specifying more than one search criteria, the criteria are + AND-ed together. + + The ``order`` parameter is used to control the ordering of the results. + Currently only ordering one field is available, and in ascending order + only. + + The ``fields`` parameter is deprecated as it is not compatible with calling + this action with a GET request to the action API. + + The context may contain a flag, `search_query`, which if True will make + this action behave as if being used by the internal search api. ie - the + results will not be dictized, and SearchErrors are thrown for bad search + queries (rather than ValidationErrors). + + :param query: The search criteria. See above for description. + :type query: string or list of strings of the form "{field}:{term1}" + :param fields: Deprecated + :type fields: dict of fields to search terms. + :param order_by: A field on the Resource model that orders the results. + :type order_by: string + :param offset: Apply an offset to the query. + :type offset: int + :param limit: Apply a limit to the query. + :type limit: int + + :returns: A dictionary with a ``count`` field, and a ``results`` field. + :rtype: dict ''' model = context['model'] - session = context['session'] - fields = _get_or_bust(data_dict, 'fields') + # Allow either the `query` or `fields` parameter to be given, but not both. + # Once `fields` parameter is dropped, this can be made simpler. + # The result of all this gumpf is to populate the local `fields` variable + # with mappings from field names to list of search terms, or a single + # search-term string. + query = data_dict.get('query') + fields = data_dict.get('fields') + + if query is None and fields is None: + raise ValidationError({'query': _('Missing value')}) + + elif query is not None and fields is not None: + raise ValidationError( + {'fields': _('Do not specify if using "query" parameter')}) + + elif query is not None: + if isinstance(query, basestring): + query = [query] + try: + fields = dict(pair.split(":", 1) for pair in query) + except ValueError: + raise ValidationError( + {'query': _('Must be : pair(s)')}) + + else: + log.warning('Use of the "fields" parameter in resource_search is ' + 'deprecated. Use the "query" parameter instead') + + # The legacy fields paramter splits string terms. + # So maintain that behaviour + split_terms = {} + for field, terms in fields.items(): + if isinstance(terms, basestring): + terms = terms.split() + split_terms[field] = terms + fields = split_terms + order_by = data_dict.get('order_by') offset = data_dict.get('offset') limit = data_dict.get('limit') @@ -1203,16 +1293,36 @@ def resource_search(context, data_dict): # TODO: should we check for user authentication first? q = model.Session.query(model.Resource) resource_fields = model.Resource.get_columns() - for field, terms in fields.items(): + if isinstance(terms, basestring): - terms = terms.split() + terms = [terms] + if field not in resource_fields: - raise search.SearchError('Field "%s" not recognised in Resource search.' % field) + msg = _('Field "{field}" not recognised in resource_search.')\ + .format(field=field) + + # Running in the context of the internal search api. + if context.get('search_query', False): + raise search.SearchError(msg) + + # Otherwise, assume we're in the context of an external api + # and need to provide meaningful external error messages. + raise ValidationError({'query': msg}) + for term in terms: + + # prevent pattern injection + term = misc.escape_sql_like_special_characters(term) + model_attr = getattr(model.Resource, field) + + # Treat the has field separately, see docstring. if field == 'hash': q = q.filter(model_attr.ilike(unicode(term) + '%')) + + # Resource extras are stored in a json blob. So searching for + # matching fields is a bit trickier. See the docstring. elif field in model.Resource.get_extra_columns(): model_attr = getattr(model.Resource, 'extras') @@ -1221,6 +1331,8 @@ def resource_search(context, data_dict): model_attr.ilike(u'''%%"%s": "%%%s%%"}''' % (field, term)) ) q = q.filter(like) + + # Just a regular field else: q = q.filter(model_attr.ilike('%' + unicode(term) + '%')) @@ -1240,15 +1352,24 @@ def resource_search(context, data_dict): else: results.append(result) - return {'count': count, 'results': results} + # If run in the context of a search query, then don't dictize the results. + if not context.get('search_query', False): + results = model_dictize.resource_list_dictize(results, context) + + return {'count': count, + 'results': results} def _tag_search(context, data_dict): model = context['model'] - query = data_dict.get('query') or data_dict.get('q') - if query: - query = query.strip() - terms = [query] if query else [] + terms = data_dict.get('query') or data_dict.get('q') or [] + if isinstance(terms, basestring): + terms = [terms] + terms = [ t.strip() for t in terms if t.strip() ] + + if 'fields' in data_dict: + log.warning('"fields" parameter is deprecated. ' + 'Use the "query" parameter instead') fields = data_dict.get('fields', {}) offset = data_dict.get('offset') @@ -1293,12 +1414,12 @@ def tag_search(context, data_dict): searched. If the ``vocabulary_id`` argument is given then only tags belonging to that vocabulary will be searched instead. - :param query: the string to search for - :type query: string + :param query: the string(s) to search for + :type query: string or list of strings :param vocabulary_id: the id or name of the tag vocabulary to search in (optional) :type vocabulary_id: string - :param fields: + :param fields: deprecated :type fields: dictionary :param limit: the maximum number of tags to return :type limit: int @@ -1334,7 +1455,7 @@ def tag_autocomplete(context, data_dict): :param vocabulary_id: the id or name of the tag vocabulary to search in (optional) :type vocabulary_id: string - :param fields: + :param fields: deprecated :type fields: dictionary :param limit: the maximum number of tags to return :type limit: int @@ -1746,6 +1867,14 @@ def _render_deleted_group_activity(context, activity): return _render('activity_streams/deleted_group.html', extra_vars = {'activity': activity}) +def _render_follow_dataset_activity(context, activity): + return _render('activity_streams/follow_dataset.html', + extra_vars = {'activity': activity}) + +def _render_follow_user_activity(context, activity): + return _render('activity_streams/follow_user.html', + extra_vars = {'activity': activity}) + # Global dictionary mapping activity types to functions that render activity # dicts to HTML snippets for including in HTML pages. activity_renderers = { @@ -1757,6 +1886,8 @@ def _render_deleted_group_activity(context, activity): 'new group' : _render_new_group_activity, 'changed group' : _render_changed_group_activity, 'deleted group' : _render_deleted_group_activity, + 'follow dataset': _render_follow_dataset_activity, + 'follow user': _render_follow_user_activity, } def _activity_list_to_html(context, activity_stream): @@ -1834,6 +1965,7 @@ def user_follower_count(context, data_dict): :param id: the id or name of the user :type id: string + :rtype: int ''' @@ -1849,6 +1981,7 @@ def dataset_follower_count(context, data_dict): :param id: the id or name of the dataset :type id: string + :rtype: int ''' @@ -1869,7 +2002,7 @@ def _follower_list(context, data_dict, FollowerClass): users = [model.User.get(follower.follower_id) for follower in followers] users = [user for user in users if user is not None] - # Dictize the list of user objects. + # Dictize the list of User objects. return [model_dictize.user_dictize(user,context) for user in users] def user_follower_list(context, data_dict): @@ -1877,6 +2010,7 @@ def user_follower_list(context, data_dict): :param id: the id or name of the user :type id: string + :rtype: list of dictionaries ''' @@ -1893,6 +2027,7 @@ def dataset_follower_list(context, data_dict): :param id: the id or name of the dataset :type id: string + :rtype: list of dictionaries ''' @@ -1923,6 +2058,7 @@ def am_following_user(context, data_dict): :param id: the id or name of the user :type id: string + :rtype: boolean ''' @@ -1940,6 +2076,7 @@ def am_following_dataset(context, data_dict): :param id: the id or name of the dataset :type id: string + :rtype: boolean ''' @@ -1951,3 +2088,133 @@ def am_following_dataset(context, data_dict): return _am_following(context, data_dict, context['model'].UserFollowingDataset) + +def user_followee_count(context, data_dict): + '''Return the number of users that are followed by the given user. + + :param id: the id of the user + :type id: string + + :rtype: int + + ''' + schema = context.get('schema') or ( + ckan.logic.schema.default_follow_user_schema()) + data_dict, errors = _validate(data_dict, schema, context) + if errors: + raise ValidationError(errors, ckan.logic.action.error_summary(errors)) + return ckan.model.UserFollowingUser.followee_count(data_dict['id']) + +def dataset_followee_count(context, data_dict): + '''Return the number of datasets that are followed by the given user. + + :param id: the id of the user + :type id: string + + :rtype: int + + ''' + schema = context.get('schema') or ( + ckan.logic.schema.default_follow_user_schema()) + data_dict, errors = _validate(data_dict, schema, context) + if errors: + raise ValidationError(errors, ckan.logic.action.error_summary(errors)) + return ckan.model.UserFollowingDataset.followee_count(data_dict['id']) + +def user_followee_list(context, data_dict): + '''Return the list of users that are followed by the given user. + + :param id: the id of the user + :type id: string + + :rtype: list of dictionaries + + ''' + schema = context.get('schema') or ( + ckan.logic.schema.default_follow_user_schema()) + data_dict, errors = _validate(data_dict, schema, context) + if errors: + raise ValidationError(errors, ckan.logic.action.error_summary(errors)) + + # Get the list of Follower objects. + model = context['model'] + user_id = data_dict.get('id') + followees = model.UserFollowingUser.followee_list(user_id) + + # Convert the list of Follower objects to a list of User objects. + users = [model.User.get(followee.object_id) for followee in followees] + users = [user for user in users if user is not None] + + # Dictize the list of User objects. + return [model_dictize.user_dictize(user, context) for user in users] + +def dataset_followee_list(context, data_dict): + '''Return the list of datasets that are followed by the given user. + + :param id: the id or name of the user + :type id: string + + :rtype: list of dictionaries + + ''' + schema = context.get('schema') or ( + ckan.logic.schema.default_follow_user_schema()) + data_dict, errors = _validate(data_dict, schema, context) + if errors: + raise ValidationError(errors, ckan.logic.action.error_summary(errors)) + + # Get the list of Follower objects. + model = context['model'] + user_id = data_dict.get('id') + followees = model.UserFollowingDataset.followee_list(user_id) + + # Convert the list of Follower objects to a list of Package objects. + datasets = [model.Package.get(followee.object_id) for followee in followees] + datasets = [dataset for dataset in datasets if dataset is not None] + + # Dictize the list of Package objects. + return [model_dictize.package_dictize(dataset, context) for dataset in datasets] + +def dashboard_activity_list(context, data_dict): + '''Return the dashboard activity stream of the given user. + + :param id: the id or name of the user + :type id: string + + :rtype: list of dictionaries + + ''' + model = context['model'] + user_id = _get_or_bust(data_dict, 'id') + + activity_query = model.Session.query(model.Activity) + user_followees_query = activity_query.join(model.UserFollowingUser, model.UserFollowingUser.object_id == model.Activity.user_id) + dataset_followees_query = activity_query.join(model.UserFollowingDataset, model.UserFollowingDataset.object_id == model.Activity.object_id) + + from_user_query = activity_query.filter(model.Activity.user_id==user_id) + about_user_query = activity_query.filter(model.Activity.object_id==user_id) + user_followees_query = user_followees_query.filter(model.UserFollowingUser.follower_id==user_id) + dataset_followees_query = dataset_followees_query.filter(model.UserFollowingDataset.follower_id==user_id) + + query = from_user_query.union(about_user_query).union( + user_followees_query).union(dataset_followees_query) + query = query.order_by(_desc(model.Activity.timestamp)) + query = query.limit(15) + activity_objects = query.all() + + return model_dictize.activity_list_dictize(activity_objects, context) + +def dashboard_activity_list_html(context, data_dict): + '''Return the dashboard activity stream of the given user as HTML. + + The activity stream is rendered as a snippet of HTML meant to be included + in an HTML page, i.e. it doesn't have any HTML header or footer. + + :param id: The id or name of the user. + :type id: string + + :rtype: string + + ''' + activity_stream = dashboard_activity_list(context, data_dict) + return _activity_list_to_html(context, activity_stream) diff --git a/ckan/logic/auth/publisher/create.py b/ckan/logic/auth/publisher/create.py index ea371571b0a..6bdb48d2cc2 100644 --- a/ckan/logic/auth/publisher/create.py +++ b/ckan/logic/auth/publisher/create.py @@ -12,18 +12,20 @@ def package_create(context, data_dict=None): model = context['model'] user = context['user'] - userobj = model.User.get( user ) + userobj = model.User.get(user) - if userobj: + if userobj and len(userobj.get_groups()): return {'success': True} - return {'success': False, 'msg': 'You must be logged in to create a package'} + return {'success': False, + 'msg': _('You must be logged in and be within a group to create ' + 'a package')} def related_create(context, data_dict=None): model = context['model'] user = context['user'] - userobj = model.User.get( user ) + userobj = model.User.get(user) if userobj: return {'success': True} @@ -79,7 +81,7 @@ def group_create(context, data_dict=None): model = context['model'] user = context['user'] - if not user: + if not model.User.get(user): return {'success': False, 'msg': _('User is not authorized to create groups') } if Authorizer.is_sysadmin(user): diff --git a/ckan/logic/schema.py b/ckan/logic/schema.py index 61a4b4abe2c..f3b27654b74 100644 --- a/ckan/logic/schema.py +++ b/ckan/logic/schema.py @@ -49,7 +49,7 @@ def default_resource_schema(): 'revision_id': [ignore_missing, unicode], 'resource_group_id': [ignore], 'package_id': [ignore], - 'url': [ignore_empty, unicode],#, URL(add_http=False)], + 'url': [not_empty, unicode],#, URL(add_http=False)], 'description': [ignore_missing, unicode], 'format': [ignore_missing, unicode], 'hash': [ignore_missing, unicode], @@ -153,6 +153,11 @@ def default_update_package_schema(): return schema def package_form_schema(): + # This function is deprecated and was replaced by + # form_to_db_package_schema(), it remains here for backwards compatibility. + return form_to_db_package_schema() + +def form_to_db_package_schema(): schema = default_package_schema() ##new @@ -175,6 +180,26 @@ def package_form_schema(): schema.pop('relationships_as_subject') return schema +def db_to_form_package_schema(): + schema = default_package_schema() + # Workaround a bug in CKAN's convert_from_tags() function. + # TODO: Fix this issue in convert_from_tags(). + schema.update({ + 'tags': { + '__extras': [ckan.lib.navl.validators.keep_extras, + ckan.logic.converters.free_tags_only] + }, + }) + # Workaround a bug in CKAN. + # TODO: Fix this elsewhere so we don't need to workaround it here. + schema['resources'].update({ + 'created': [ckan.lib.navl.validators.ignore_missing], + 'last_modified': [ckan.lib.navl.validators.ignore_missing], + 'cache_last_updated': [ckan.lib.navl.validators.ignore_missing], + 'webstore_last_updated': [ckan.lib.navl.validators.ignore_missing], + }) + return schema + def default_group_schema(): schema = { diff --git a/ckan/logic/validators.py b/ckan/logic/validators.py index 30f4fea47f3..d7bc95c15d6 100644 --- a/ckan/logic/validators.py +++ b/ckan/logic/validators.py @@ -152,8 +152,10 @@ def activity_type_exists(activity_type): 'new package' : package_id_exists, 'changed package' : package_id_exists, 'deleted package' : package_id_exists, + 'follow dataset' : package_id_exists, 'new user' : user_id_exists, 'changed user' : user_id_exists, + 'follow user' : user_id_exists, 'new group' : group_id_exists, 'changed group' : group_id_exists, 'deleted group' : group_id_exists, diff --git a/ckan/model/follower.py b/ckan/model/follower.py index 0698867682c..0b3240ac9b7 100644 --- a/ckan/model/follower.py +++ b/ckan/model/follower.py @@ -27,24 +27,39 @@ def get(self, follower_id, object_id): return query.first() @classmethod - def follower_count(cls, object_id): - '''Return the number of users following a user.''' + def is_following(cls, follower_id, object_id): + '''Return True if follower_id is currently following object_id, False + otherwise. + + ''' + return UserFollowingUser.get(follower_id, object_id) is not None + + + @classmethod + def followee_count(cls, follower_id): + '''Return the number of users followed by a user.''' return meta.Session.query(UserFollowingUser).filter( - UserFollowingUser.object_id == object_id).count() + UserFollowingUser.follower_id == follower_id).count() @classmethod - def follower_list(cls, object_id): - '''Return a list of all of the followers of a user.''' + def followee_list(cls, follower_id): + '''Return a list of users followed by a user.''' return meta.Session.query(UserFollowingUser).filter( - UserFollowingUser.object_id == object_id).all() + UserFollowingUser.follower_id == follower_id).all() + @classmethod - def is_following(cls, follower_id, object_id): - '''Return True if follower_id is currently following object_id, False - otherwise. + def follower_count(cls, user_id): + '''Return the number of followers of a user.''' + return meta.Session.query(UserFollowingUser).filter( + UserFollowingUser.object_id == user_id).count() + + @classmethod + def follower_list(cls, user_id): + '''Return a list of followers of a user.''' + return meta.Session.query(UserFollowingUser).filter( + UserFollowingUser.object_id == user_id).all() - ''' - return UserFollowingUser.get(follower_id, object_id) is not None user_following_user_table = sqlalchemy.Table('user_following_user', meta.metadata, @@ -85,24 +100,39 @@ def get(self, follower_id, object_id): return query.first() @classmethod - def follower_count(cls, object_id): - '''Return the number of users following a dataset.''' + def is_following(cls, follower_id, object_id): + '''Return True if follower_id is currently following object_id, False + otherwise. + + ''' + return UserFollowingDataset.get(follower_id, object_id) is not None + + + @classmethod + def followee_count(cls, follower_id): + '''Return the number of datasets followed by a user.''' return meta.Session.query(UserFollowingDataset).filter( - UserFollowingDataset.object_id == object_id).count() + UserFollowingDataset.follower_id == follower_id).count() @classmethod - def follower_list(cls, object_id): - '''Return a list of all of the followers of a dataset.''' + def followee_list(cls, follower_id): + '''Return a list of datasets followed by a user.''' return meta.Session.query(UserFollowingDataset).filter( - UserFollowingDataset.object_id == object_id).all() + UserFollowingDataset.follower_id == follower_id).all() + @classmethod - def is_following(cls, follower_id, object_id): - '''Return True if follower_id is currently following object_id, False - otherwise. + def follower_count(cls, dataset_id): + '''Return the number of followers of a dataset.''' + return meta.Session.query(UserFollowingDataset).filter( + UserFollowingDataset.object_id == dataset_id).count() + + @classmethod + def follower_list(cls, dataset_id): + '''Return a list of followers of a dataset.''' + return meta.Session.query(UserFollowingDataset).filter( + UserFollowingDataset.object_id == dataset_id).all() - ''' - return UserFollowingDataset.get(follower_id, object_id) is not None user_following_dataset_table = sqlalchemy.Table('user_following_dataset', meta.metadata, diff --git a/ckan/public/css/style.css b/ckan/public/css/style.css index 3d9758013ce..6df5b558884 100644 --- a/ckan/public/css/style.css +++ b/ckan/public/css/style.css @@ -615,9 +615,9 @@ ul.userlist .badge { margin-top: 5px; } -/* ================== */ -/* = User Read page = */ -/* ================== */ +/* ================================= */ +/* = User Read and Dashboard pages = */ +/* ================================= */ body.user.read #sidebar { display: none; } body.user.read #content { @@ -625,11 +625,11 @@ body.user.read #content { width: 950px; } -.user.read .page_heading { +.user.read .page_heading, .user.dashboard .page_heading { font-weight: bold; } -.user.read .page_heading img.gravatar { +.user.read .page_heading img.gravatar, .user.dashboard .page_heading img.gravatar { padding: 2px; border: solid 1px #ddd; vertical-align: middle; @@ -637,7 +637,7 @@ body.user.read #content { margin-top: -3px; } -.user.read .page_heading .fullname { +.user.read .page_heading .fullname, .user.dashboard .page_heading .fullname { font-weight: normal; color: #999; } diff --git a/ckan/public/scripts/application.js b/ckan/public/scripts/application.js index 2eecaa34bd8..131d7b87d8a 100644 --- a/ckan/public/scripts/application.js +++ b/ckan/public/scripts/application.js @@ -491,7 +491,7 @@ CKAN.View.ResourceEditor = Backbone.View.extend({ CKAN.View.Resource = Backbone.View.extend({ initialize: function() { this.el = $(this.el); - _.bindAll(this,'updateName','updateIcon','name','askToDelete','openMyPanel','setErrors','setupDynamicExtras','addDynamicExtra', 'onDatastoreEnabledChange'); + _.bindAll(this,'updateName','updateIcon','name','askToDelete','openMyPanel','setErrors','setupDynamicExtras','addDynamicExtra' ); this.render(); }, render: function() { @@ -526,12 +526,8 @@ CKAN.View.Resource = Backbone.View.extend({ // Hook to open panel link this.li.find('.resource-open-my-panel').click(this.openMyPanel); this.table.find('.js-resource-edit-delete').click(this.askToDelete); - this.table.find('.js-datastore-enabled-checkbox').change(this.onDatastoreEnabledChange); // Hook to markdown editor CKAN.Utils.setupMarkdownEditor(this.table.find('.markdown-editor')); - if (resource_object.resource.webstore_url) { - this.table.find('.js-datastore-enabled-checkbox').prop('checked', true); - } // Set initial state this.updateName(); @@ -729,12 +725,6 @@ CKAN.View.Resource = Backbone.View.extend({ removeFromDom: function() { this.li.remove(); this.table.remove(); - }, - onDatastoreEnabledChange: function(e) { - var isChecked = this.table.find('.js-datastore-enabled-checkbox').prop('checked'); - var webstore_url = isChecked ? 'enabled' : null; - this.model.set({webstore_url: webstore_url}); - this.table.find('.js-datastore-enabled-text').val(webstore_url); } }); @@ -867,7 +857,6 @@ CKAN.View.ResourceAddUpload = Backbone.View.extend({ , hash: data._checksum , cache_url: data._location , cache_url_updated: lastmod - , webstore_url: data._location } , { error: function(model, error) { @@ -934,7 +923,6 @@ CKAN.View.ResourceAddUrl = Backbone.View.extend({ size: data.size, mimetype: data.mimetype, last_modified: data.last_modified, - webstore_url: 'enabled', url_error: (data.url_errors || [""])[0] }); self.collection.add(newResource); @@ -944,9 +932,6 @@ CKAN.View.ResourceAddUrl = Backbone.View.extend({ } else { newResource.set({url: urlVal, resource_type: this.options.mode}); - if (newResource.get('resource_type')=='file') { - newResource.set({webstore_url: 'enabled'}); - } this.collection.add(newResource); this.resetForm(); } @@ -1034,7 +1019,7 @@ CKAN.Utils = function($, my) { input_box.attr('name', new_name); input_box.attr('id', new_name); - + var $new = $('

'); $new.append($('').attr('name', old_name).val(ui.item.value)); $new.append(' '); @@ -1443,7 +1428,7 @@ CKAN.Utils = function($, my) { return; } var data = JSON.stringify({ - id: object_id, + id: object_id }); var nextState = 'unfollow'; var nextString = CKAN.Strings.unfollow; @@ -1457,7 +1442,7 @@ CKAN.Utils = function($, my) { return; } var data = JSON.stringify({ - id: object_id, + id: object_id }); var nextState = 'follow'; var nextString = CKAN.Strings.follow; @@ -1476,10 +1461,10 @@ CKAN.Utils = function($, my) { success: function(data) { button.setAttribute('data-state', nextState); button.innerHTML = nextString; - }, + } }); }; - + // This only needs to happen on dataset pages, but it doesn't seem to do // any harm to call it anyway. $('#user_follow_button').on('click', followButtonClicked); @@ -1585,6 +1570,14 @@ CKAN.DataPreview = function ($, my) { my.loadPreviewDialog = function(resourceData) { my.$dialog.html('

Loading ...

'); + function showError(msg){ + msg = msg || CKAN.Strings.errorLoadingPreview; + return $('#ckanext-datapreview') + .append('
') + .addClass('alert alert-error fade in') + .html(msg); + } + function initializeDataExplorer(dataset) { var views = [ { @@ -1618,6 +1611,7 @@ CKAN.DataPreview = function ($, my) { } }); + // ----------------------------- // Setup the Embed modal dialog. // ----------------------------- @@ -1674,7 +1668,7 @@ CKAN.DataPreview = function ($, my) { } // 4 situations - // a) have a webstore_url + // a) webstore_url is active (something was posted to the datastore) // b) csv or xls (but not webstore) // c) can be treated as plain text // d) none of the above but worth iframing (assumption is @@ -1697,14 +1691,38 @@ CKAN.DataPreview = function ($, my) { if (resourceData.webstore_url) { resourceData.elasticsearch_url = '/api/data/' + resourceData.id; var dataset = new recline.Model.Dataset(resourceData, 'elasticsearch'); - initializeDataExplorer(dataset); + var errorMsg = CKAN.Strings.errorLoadingPreview + ': ' + CKAN.Strings.errorDataStore; + dataset.fetch() + .done(function(dataset){ + initializeDataExplorer(dataset); + }) + .fail(function(error){ + if (error.message) errorMsg += ' (' + error.message + ')'; + showError(errorMsg); + }); + } else if (resourceData.formatNormalized in {'csv': '', 'xls': ''}) { // set format as this is used by Recline in setting format for DataProxy resourceData.format = resourceData.formatNormalized; var dataset = new recline.Model.Dataset(resourceData, 'dataproxy'); - initializeDataExplorer(dataset); - $('.recline-query-editor .text-query').hide(); + var errorMsg = CKAN.Strings.errorLoadingPreview + ': ' +CKAN.Strings.errorDataProxy; + dataset.fetch() + .done(function(dataset){ + + dataset.bind('query:fail', function(error) { + $('#ckanext-datapreview .data-view-container').hide(); + $('#ckanext-datapreview .header').hide(); + $('.preview-header .btn').hide(); + }); + + initializeDataExplorer(dataset); + $('.recline-query-editor .text-query').hide(); + }) + .fail(function(error){ + if (error.message) errorMsg += ' (' + error.message + ')'; + showError(errorMsg); + }); } else if (resourceData.formatNormalized in { 'rdf+xml': '', diff --git a/ckan/public/scripts/templates.js b/ckan/public/scripts/templates.js index 6ecad116a6b..596c94f45fc 100644 --- a/ckan/public/scripts/templates.js +++ b/ckan/public/scripts/templates.js @@ -27,7 +27,6 @@ CKAN.Templates.resourceEntry = ' \ '; var youCanUseMarkdownString = CKAN.Strings.youCanUseMarkdown.replace('%a', '').replace('%b', ''); -var shouldADataStoreBeEnabledString = CKAN.Strings.shouldADataStoreBeEnabled.replace('%a', '').replace('%b', ''); var datesAreInISOString = CKAN.Strings.datesAreInISO.replace('%a', '').replace('%b', '').replace('%c', '').replace('%d', ''); // TODO it would be nice to unify this with the markdown editor specified in helpers.py @@ -93,16 +92,6 @@ CKAN.Templates.resourceDetails = ' \ {{/if}} \ \ \ -
\ - \ -
\ - \ -
\ -
\
\ \
\ diff --git a/ckan/public/scripts/vendor/flot/0.7/excanvas.js b/ckan/public/scripts/vendor/flot/0.7/excanvas.js new file mode 100644 index 00000000000..c40d6f7014d --- /dev/null +++ b/ckan/public/scripts/vendor/flot/0.7/excanvas.js @@ -0,0 +1,1427 @@ +// Copyright 2006 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +// Known Issues: +// +// * Patterns only support repeat. +// * Radial gradient are not implemented. The VML version of these look very +// different from the canvas one. +// * Clipping paths are not implemented. +// * Coordsize. The width and height attribute have higher priority than the +// width and height style values which isn't correct. +// * Painting mode isn't implemented. +// * Canvas width/height should is using content-box by default. IE in +// Quirks mode will draw the canvas using border-box. Either change your +// doctype to HTML5 +// (http://www.whatwg.org/specs/web-apps/current-work/#the-doctype) +// or use Box Sizing Behavior from WebFX +// (http://webfx.eae.net/dhtml/boxsizing/boxsizing.html) +// * Non uniform scaling does not correctly scale strokes. +// * Filling very large shapes (above 5000 points) is buggy. +// * Optimize. There is always room for speed improvements. + +// Only add this code if we do not already have a canvas implementation +if (!document.createElement('canvas').getContext) { + +(function() { + + // alias some functions to make (compiled) code shorter + var m = Math; + var mr = m.round; + var ms = m.sin; + var mc = m.cos; + var abs = m.abs; + var sqrt = m.sqrt; + + // this is used for sub pixel precision + var Z = 10; + var Z2 = Z / 2; + + /** + * This funtion is assigned to the elements as element.getContext(). + * @this {HTMLElement} + * @return {CanvasRenderingContext2D_} + */ + function getContext() { + return this.context_ || + (this.context_ = new CanvasRenderingContext2D_(this)); + } + + var slice = Array.prototype.slice; + + /** + * Binds a function to an object. The returned function will always use the + * passed in {@code obj} as {@code this}. + * + * Example: + * + * g = bind(f, obj, a, b) + * g(c, d) // will do f.call(obj, a, b, c, d) + * + * @param {Function} f The function to bind the object to + * @param {Object} obj The object that should act as this when the function + * is called + * @param {*} var_args Rest arguments that will be used as the initial + * arguments when the function is called + * @return {Function} A new function that has bound this + */ + function bind(f, obj, var_args) { + var a = slice.call(arguments, 2); + return function() { + return f.apply(obj, a.concat(slice.call(arguments))); + }; + } + + function encodeHtmlAttribute(s) { + return String(s).replace(/&/g, '&').replace(/"/g, '"'); + } + + function addNamespacesAndStylesheet(doc) { + // create xmlns + if (!doc.namespaces['g_vml_']) { + doc.namespaces.add('g_vml_', 'urn:schemas-microsoft-com:vml', + '#default#VML'); + + } + if (!doc.namespaces['g_o_']) { + doc.namespaces.add('g_o_', 'urn:schemas-microsoft-com:office:office', + '#default#VML'); + } + + // Setup default CSS. Only add one style sheet per document + if (!doc.styleSheets['ex_canvas_']) { + var ss = doc.createStyleSheet(); + ss.owningElement.id = 'ex_canvas_'; + ss.cssText = 'canvas{display:inline-block;overflow:hidden;' + + // default size is 300x150 in Gecko and Opera + 'text-align:left;width:300px;height:150px}'; + } + } + + // Add namespaces and stylesheet at startup. + addNamespacesAndStylesheet(document); + + var G_vmlCanvasManager_ = { + init: function(opt_doc) { + if (/MSIE/.test(navigator.userAgent) && !window.opera) { + var doc = opt_doc || document; + // Create a dummy element so that IE will allow canvas elements to be + // recognized. + doc.createElement('canvas'); + doc.attachEvent('onreadystatechange', bind(this.init_, this, doc)); + } + }, + + init_: function(doc) { + // find all canvas elements + var els = doc.getElementsByTagName('canvas'); + for (var i = 0; i < els.length; i++) { + this.initElement(els[i]); + } + }, + + /** + * Public initializes a canvas element so that it can be used as canvas + * element from now on. This is called automatically before the page is + * loaded but if you are creating elements using createElement you need to + * make sure this is called on the element. + * @param {HTMLElement} el The canvas element to initialize. + * @return {HTMLElement} the element that was created. + */ + initElement: function(el) { + if (!el.getContext) { + el.getContext = getContext; + + // Add namespaces and stylesheet to document of the element. + addNamespacesAndStylesheet(el.ownerDocument); + + // Remove fallback content. There is no way to hide text nodes so we + // just remove all childNodes. We could hide all elements and remove + // text nodes but who really cares about the fallback content. + el.innerHTML = ''; + + // do not use inline function because that will leak memory + el.attachEvent('onpropertychange', onPropertyChange); + el.attachEvent('onresize', onResize); + + var attrs = el.attributes; + if (attrs.width && attrs.width.specified) { + // TODO: use runtimeStyle and coordsize + // el.getContext().setWidth_(attrs.width.nodeValue); + el.style.width = attrs.width.nodeValue + 'px'; + } else { + el.width = el.clientWidth; + } + if (attrs.height && attrs.height.specified) { + // TODO: use runtimeStyle and coordsize + // el.getContext().setHeight_(attrs.height.nodeValue); + el.style.height = attrs.height.nodeValue + 'px'; + } else { + el.height = el.clientHeight; + } + //el.getContext().setCoordsize_() + } + return el; + } + }; + + function onPropertyChange(e) { + var el = e.srcElement; + + switch (e.propertyName) { + case 'width': + el.getContext().clearRect(); + el.style.width = el.attributes.width.nodeValue + 'px'; + // In IE8 this does not trigger onresize. + el.firstChild.style.width = el.clientWidth + 'px'; + break; + case 'height': + el.getContext().clearRect(); + el.style.height = el.attributes.height.nodeValue + 'px'; + el.firstChild.style.height = el.clientHeight + 'px'; + break; + } + } + + function onResize(e) { + var el = e.srcElement; + if (el.firstChild) { + el.firstChild.style.width = el.clientWidth + 'px'; + el.firstChild.style.height = el.clientHeight + 'px'; + } + } + + G_vmlCanvasManager_.init(); + + // precompute "00" to "FF" + var decToHex = []; + for (var i = 0; i < 16; i++) { + for (var j = 0; j < 16; j++) { + decToHex[i * 16 + j] = i.toString(16) + j.toString(16); + } + } + + function createMatrixIdentity() { + return [ + [1, 0, 0], + [0, 1, 0], + [0, 0, 1] + ]; + } + + function matrixMultiply(m1, m2) { + var result = createMatrixIdentity(); + + for (var x = 0; x < 3; x++) { + for (var y = 0; y < 3; y++) { + var sum = 0; + + for (var z = 0; z < 3; z++) { + sum += m1[x][z] * m2[z][y]; + } + + result[x][y] = sum; + } + } + return result; + } + + function copyState(o1, o2) { + o2.fillStyle = o1.fillStyle; + o2.lineCap = o1.lineCap; + o2.lineJoin = o1.lineJoin; + o2.lineWidth = o1.lineWidth; + o2.miterLimit = o1.miterLimit; + o2.shadowBlur = o1.shadowBlur; + o2.shadowColor = o1.shadowColor; + o2.shadowOffsetX = o1.shadowOffsetX; + o2.shadowOffsetY = o1.shadowOffsetY; + o2.strokeStyle = o1.strokeStyle; + o2.globalAlpha = o1.globalAlpha; + o2.font = o1.font; + o2.textAlign = o1.textAlign; + o2.textBaseline = o1.textBaseline; + o2.arcScaleX_ = o1.arcScaleX_; + o2.arcScaleY_ = o1.arcScaleY_; + o2.lineScale_ = o1.lineScale_; + } + + var colorData = { + aliceblue: '#F0F8FF', + antiquewhite: '#FAEBD7', + aquamarine: '#7FFFD4', + azure: '#F0FFFF', + beige: '#F5F5DC', + bisque: '#FFE4C4', + black: '#000000', + blanchedalmond: '#FFEBCD', + blueviolet: '#8A2BE2', + brown: '#A52A2A', + burlywood: '#DEB887', + cadetblue: '#5F9EA0', + chartreuse: '#7FFF00', + chocolate: '#D2691E', + coral: '#FF7F50', + cornflowerblue: '#6495ED', + cornsilk: '#FFF8DC', + crimson: '#DC143C', + cyan: '#00FFFF', + darkblue: '#00008B', + darkcyan: '#008B8B', + darkgoldenrod: '#B8860B', + darkgray: '#A9A9A9', + darkgreen: '#006400', + darkgrey: '#A9A9A9', + darkkhaki: '#BDB76B', + darkmagenta: '#8B008B', + darkolivegreen: '#556B2F', + darkorange: '#FF8C00', + darkorchid: '#9932CC', + darkred: '#8B0000', + darksalmon: '#E9967A', + darkseagreen: '#8FBC8F', + darkslateblue: '#483D8B', + darkslategray: '#2F4F4F', + darkslategrey: '#2F4F4F', + darkturquoise: '#00CED1', + darkviolet: '#9400D3', + deeppink: '#FF1493', + deepskyblue: '#00BFFF', + dimgray: '#696969', + dimgrey: '#696969', + dodgerblue: '#1E90FF', + firebrick: '#B22222', + floralwhite: '#FFFAF0', + forestgreen: '#228B22', + gainsboro: '#DCDCDC', + ghostwhite: '#F8F8FF', + gold: '#FFD700', + goldenrod: '#DAA520', + grey: '#808080', + greenyellow: '#ADFF2F', + honeydew: '#F0FFF0', + hotpink: '#FF69B4', + indianred: '#CD5C5C', + indigo: '#4B0082', + ivory: '#FFFFF0', + khaki: '#F0E68C', + lavender: '#E6E6FA', + lavenderblush: '#FFF0F5', + lawngreen: '#7CFC00', + lemonchiffon: '#FFFACD', + lightblue: '#ADD8E6', + lightcoral: '#F08080', + lightcyan: '#E0FFFF', + lightgoldenrodyellow: '#FAFAD2', + lightgreen: '#90EE90', + lightgrey: '#D3D3D3', + lightpink: '#FFB6C1', + lightsalmon: '#FFA07A', + lightseagreen: '#20B2AA', + lightskyblue: '#87CEFA', + lightslategray: '#778899', + lightslategrey: '#778899', + lightsteelblue: '#B0C4DE', + lightyellow: '#FFFFE0', + limegreen: '#32CD32', + linen: '#FAF0E6', + magenta: '#FF00FF', + mediumaquamarine: '#66CDAA', + mediumblue: '#0000CD', + mediumorchid: '#BA55D3', + mediumpurple: '#9370DB', + mediumseagreen: '#3CB371', + mediumslateblue: '#7B68EE', + mediumspringgreen: '#00FA9A', + mediumturquoise: '#48D1CC', + mediumvioletred: '#C71585', + midnightblue: '#191970', + mintcream: '#F5FFFA', + mistyrose: '#FFE4E1', + moccasin: '#FFE4B5', + navajowhite: '#FFDEAD', + oldlace: '#FDF5E6', + olivedrab: '#6B8E23', + orange: '#FFA500', + orangered: '#FF4500', + orchid: '#DA70D6', + palegoldenrod: '#EEE8AA', + palegreen: '#98FB98', + paleturquoise: '#AFEEEE', + palevioletred: '#DB7093', + papayawhip: '#FFEFD5', + peachpuff: '#FFDAB9', + peru: '#CD853F', + pink: '#FFC0CB', + plum: '#DDA0DD', + powderblue: '#B0E0E6', + rosybrown: '#BC8F8F', + royalblue: '#4169E1', + saddlebrown: '#8B4513', + salmon: '#FA8072', + sandybrown: '#F4A460', + seagreen: '#2E8B57', + seashell: '#FFF5EE', + sienna: '#A0522D', + skyblue: '#87CEEB', + slateblue: '#6A5ACD', + slategray: '#708090', + slategrey: '#708090', + snow: '#FFFAFA', + springgreen: '#00FF7F', + steelblue: '#4682B4', + tan: '#D2B48C', + thistle: '#D8BFD8', + tomato: '#FF6347', + turquoise: '#40E0D0', + violet: '#EE82EE', + wheat: '#F5DEB3', + whitesmoke: '#F5F5F5', + yellowgreen: '#9ACD32' + }; + + + function getRgbHslContent(styleString) { + var start = styleString.indexOf('(', 3); + var end = styleString.indexOf(')', start + 1); + var parts = styleString.substring(start + 1, end).split(','); + // add alpha if needed + if (parts.length == 4 && styleString.substr(3, 1) == 'a') { + alpha = Number(parts[3]); + } else { + parts[3] = 1; + } + return parts; + } + + function percent(s) { + return parseFloat(s) / 100; + } + + function clamp(v, min, max) { + return Math.min(max, Math.max(min, v)); + } + + function hslToRgb(parts){ + var r, g, b; + h = parseFloat(parts[0]) / 360 % 360; + if (h < 0) + h++; + s = clamp(percent(parts[1]), 0, 1); + l = clamp(percent(parts[2]), 0, 1); + if (s == 0) { + r = g = b = l; // achromatic + } else { + var q = l < 0.5 ? l * (1 + s) : l + s - l * s; + var p = 2 * l - q; + r = hueToRgb(p, q, h + 1 / 3); + g = hueToRgb(p, q, h); + b = hueToRgb(p, q, h - 1 / 3); + } + + return '#' + decToHex[Math.floor(r * 255)] + + decToHex[Math.floor(g * 255)] + + decToHex[Math.floor(b * 255)]; + } + + function hueToRgb(m1, m2, h) { + if (h < 0) + h++; + if (h > 1) + h--; + + if (6 * h < 1) + return m1 + (m2 - m1) * 6 * h; + else if (2 * h < 1) + return m2; + else if (3 * h < 2) + return m1 + (m2 - m1) * (2 / 3 - h) * 6; + else + return m1; + } + + function processStyle(styleString) { + var str, alpha = 1; + + styleString = String(styleString); + if (styleString.charAt(0) == '#') { + str = styleString; + } else if (/^rgb/.test(styleString)) { + var parts = getRgbHslContent(styleString); + var str = '#', n; + for (var i = 0; i < 3; i++) { + if (parts[i].indexOf('%') != -1) { + n = Math.floor(percent(parts[i]) * 255); + } else { + n = Number(parts[i]); + } + str += decToHex[clamp(n, 0, 255)]; + } + alpha = parts[3]; + } else if (/^hsl/.test(styleString)) { + var parts = getRgbHslContent(styleString); + str = hslToRgb(parts); + alpha = parts[3]; + } else { + str = colorData[styleString] || styleString; + } + return {color: str, alpha: alpha}; + } + + var DEFAULT_STYLE = { + style: 'normal', + variant: 'normal', + weight: 'normal', + size: 10, + family: 'sans-serif' + }; + + // Internal text style cache + var fontStyleCache = {}; + + function processFontStyle(styleString) { + if (fontStyleCache[styleString]) { + return fontStyleCache[styleString]; + } + + var el = document.createElement('div'); + var style = el.style; + try { + style.font = styleString; + } catch (ex) { + // Ignore failures to set to invalid font. + } + + return fontStyleCache[styleString] = { + style: style.fontStyle || DEFAULT_STYLE.style, + variant: style.fontVariant || DEFAULT_STYLE.variant, + weight: style.fontWeight || DEFAULT_STYLE.weight, + size: style.fontSize || DEFAULT_STYLE.size, + family: style.fontFamily || DEFAULT_STYLE.family + }; + } + + function getComputedStyle(style, element) { + var computedStyle = {}; + + for (var p in style) { + computedStyle[p] = style[p]; + } + + // Compute the size + var canvasFontSize = parseFloat(element.currentStyle.fontSize), + fontSize = parseFloat(style.size); + + if (typeof style.size == 'number') { + computedStyle.size = style.size; + } else if (style.size.indexOf('px') != -1) { + computedStyle.size = fontSize; + } else if (style.size.indexOf('em') != -1) { + computedStyle.size = canvasFontSize * fontSize; + } else if(style.size.indexOf('%') != -1) { + computedStyle.size = (canvasFontSize / 100) * fontSize; + } else if (style.size.indexOf('pt') != -1) { + computedStyle.size = fontSize / .75; + } else { + computedStyle.size = canvasFontSize; + } + + // Different scaling between normal text and VML text. This was found using + // trial and error to get the same size as non VML text. + computedStyle.size *= 0.981; + + return computedStyle; + } + + function buildStyle(style) { + return style.style + ' ' + style.variant + ' ' + style.weight + ' ' + + style.size + 'px ' + style.family; + } + + function processLineCap(lineCap) { + switch (lineCap) { + case 'butt': + return 'flat'; + case 'round': + return 'round'; + case 'square': + default: + return 'square'; + } + } + + /** + * This class implements CanvasRenderingContext2D interface as described by + * the WHATWG. + * @param {HTMLElement} surfaceElement The element that the 2D context should + * be associated with + */ + function CanvasRenderingContext2D_(surfaceElement) { + this.m_ = createMatrixIdentity(); + + this.mStack_ = []; + this.aStack_ = []; + this.currentPath_ = []; + + // Canvas context properties + this.strokeStyle = '#000'; + this.fillStyle = '#000'; + + this.lineWidth = 1; + this.lineJoin = 'miter'; + this.lineCap = 'butt'; + this.miterLimit = Z * 1; + this.globalAlpha = 1; + this.font = '10px sans-serif'; + this.textAlign = 'left'; + this.textBaseline = 'alphabetic'; + this.canvas = surfaceElement; + + var el = surfaceElement.ownerDocument.createElement('div'); + el.style.width = surfaceElement.clientWidth + 'px'; + el.style.height = surfaceElement.clientHeight + 'px'; + el.style.overflow = 'hidden'; + el.style.position = 'absolute'; + surfaceElement.appendChild(el); + + this.element_ = el; + this.arcScaleX_ = 1; + this.arcScaleY_ = 1; + this.lineScale_ = 1; + } + + var contextPrototype = CanvasRenderingContext2D_.prototype; + contextPrototype.clearRect = function() { + if (this.textMeasureEl_) { + this.textMeasureEl_.removeNode(true); + this.textMeasureEl_ = null; + } + this.element_.innerHTML = ''; + }; + + contextPrototype.beginPath = function() { + // TODO: Branch current matrix so that save/restore has no effect + // as per safari docs. + this.currentPath_ = []; + }; + + contextPrototype.moveTo = function(aX, aY) { + var p = this.getCoords_(aX, aY); + this.currentPath_.push({type: 'moveTo', x: p.x, y: p.y}); + this.currentX_ = p.x; + this.currentY_ = p.y; + }; + + contextPrototype.lineTo = function(aX, aY) { + var p = this.getCoords_(aX, aY); + this.currentPath_.push({type: 'lineTo', x: p.x, y: p.y}); + + this.currentX_ = p.x; + this.currentY_ = p.y; + }; + + contextPrototype.bezierCurveTo = function(aCP1x, aCP1y, + aCP2x, aCP2y, + aX, aY) { + var p = this.getCoords_(aX, aY); + var cp1 = this.getCoords_(aCP1x, aCP1y); + var cp2 = this.getCoords_(aCP2x, aCP2y); + bezierCurveTo(this, cp1, cp2, p); + }; + + // Helper function that takes the already fixed cordinates. + function bezierCurveTo(self, cp1, cp2, p) { + self.currentPath_.push({ + type: 'bezierCurveTo', + cp1x: cp1.x, + cp1y: cp1.y, + cp2x: cp2.x, + cp2y: cp2.y, + x: p.x, + y: p.y + }); + self.currentX_ = p.x; + self.currentY_ = p.y; + } + + contextPrototype.quadraticCurveTo = function(aCPx, aCPy, aX, aY) { + // the following is lifted almost directly from + // http://developer.mozilla.org/en/docs/Canvas_tutorial:Drawing_shapes + + var cp = this.getCoords_(aCPx, aCPy); + var p = this.getCoords_(aX, aY); + + var cp1 = { + x: this.currentX_ + 2.0 / 3.0 * (cp.x - this.currentX_), + y: this.currentY_ + 2.0 / 3.0 * (cp.y - this.currentY_) + }; + var cp2 = { + x: cp1.x + (p.x - this.currentX_) / 3.0, + y: cp1.y + (p.y - this.currentY_) / 3.0 + }; + + bezierCurveTo(this, cp1, cp2, p); + }; + + contextPrototype.arc = function(aX, aY, aRadius, + aStartAngle, aEndAngle, aClockwise) { + aRadius *= Z; + var arcType = aClockwise ? 'at' : 'wa'; + + var xStart = aX + mc(aStartAngle) * aRadius - Z2; + var yStart = aY + ms(aStartAngle) * aRadius - Z2; + + var xEnd = aX + mc(aEndAngle) * aRadius - Z2; + var yEnd = aY + ms(aEndAngle) * aRadius - Z2; + + // IE won't render arches drawn counter clockwise if xStart == xEnd. + if (xStart == xEnd && !aClockwise) { + xStart += 0.125; // Offset xStart by 1/80 of a pixel. Use something + // that can be represented in binary + } + + var p = this.getCoords_(aX, aY); + var pStart = this.getCoords_(xStart, yStart); + var pEnd = this.getCoords_(xEnd, yEnd); + + this.currentPath_.push({type: arcType, + x: p.x, + y: p.y, + radius: aRadius, + xStart: pStart.x, + yStart: pStart.y, + xEnd: pEnd.x, + yEnd: pEnd.y}); + + }; + + contextPrototype.rect = function(aX, aY, aWidth, aHeight) { + this.moveTo(aX, aY); + this.lineTo(aX + aWidth, aY); + this.lineTo(aX + aWidth, aY + aHeight); + this.lineTo(aX, aY + aHeight); + this.closePath(); + }; + + contextPrototype.strokeRect = function(aX, aY, aWidth, aHeight) { + var oldPath = this.currentPath_; + this.beginPath(); + + this.moveTo(aX, aY); + this.lineTo(aX + aWidth, aY); + this.lineTo(aX + aWidth, aY + aHeight); + this.lineTo(aX, aY + aHeight); + this.closePath(); + this.stroke(); + + this.currentPath_ = oldPath; + }; + + contextPrototype.fillRect = function(aX, aY, aWidth, aHeight) { + var oldPath = this.currentPath_; + this.beginPath(); + + this.moveTo(aX, aY); + this.lineTo(aX + aWidth, aY); + this.lineTo(aX + aWidth, aY + aHeight); + this.lineTo(aX, aY + aHeight); + this.closePath(); + this.fill(); + + this.currentPath_ = oldPath; + }; + + contextPrototype.createLinearGradient = function(aX0, aY0, aX1, aY1) { + var gradient = new CanvasGradient_('gradient'); + gradient.x0_ = aX0; + gradient.y0_ = aY0; + gradient.x1_ = aX1; + gradient.y1_ = aY1; + return gradient; + }; + + contextPrototype.createRadialGradient = function(aX0, aY0, aR0, + aX1, aY1, aR1) { + var gradient = new CanvasGradient_('gradientradial'); + gradient.x0_ = aX0; + gradient.y0_ = aY0; + gradient.r0_ = aR0; + gradient.x1_ = aX1; + gradient.y1_ = aY1; + gradient.r1_ = aR1; + return gradient; + }; + + contextPrototype.drawImage = function(image, var_args) { + var dx, dy, dw, dh, sx, sy, sw, sh; + + // to find the original width we overide the width and height + var oldRuntimeWidth = image.runtimeStyle.width; + var oldRuntimeHeight = image.runtimeStyle.height; + image.runtimeStyle.width = 'auto'; + image.runtimeStyle.height = 'auto'; + + // get the original size + var w = image.width; + var h = image.height; + + // and remove overides + image.runtimeStyle.width = oldRuntimeWidth; + image.runtimeStyle.height = oldRuntimeHeight; + + if (arguments.length == 3) { + dx = arguments[1]; + dy = arguments[2]; + sx = sy = 0; + sw = dw = w; + sh = dh = h; + } else if (arguments.length == 5) { + dx = arguments[1]; + dy = arguments[2]; + dw = arguments[3]; + dh = arguments[4]; + sx = sy = 0; + sw = w; + sh = h; + } else if (arguments.length == 9) { + sx = arguments[1]; + sy = arguments[2]; + sw = arguments[3]; + sh = arguments[4]; + dx = arguments[5]; + dy = arguments[6]; + dw = arguments[7]; + dh = arguments[8]; + } else { + throw Error('Invalid number of arguments'); + } + + var d = this.getCoords_(dx, dy); + + var w2 = sw / 2; + var h2 = sh / 2; + + var vmlStr = []; + + var W = 10; + var H = 10; + + // For some reason that I've now forgotten, using divs didn't work + vmlStr.push(' ' , + '', + ''); + + this.element_.insertAdjacentHTML('BeforeEnd', vmlStr.join('')); + }; + + contextPrototype.stroke = function(aFill) { + var W = 10; + var H = 10; + // Divide the shape into chunks if it's too long because IE has a limit + // somewhere for how long a VML shape can be. This simple division does + // not work with fills, only strokes, unfortunately. + var chunkSize = 5000; + + var min = {x: null, y: null}; + var max = {x: null, y: null}; + + for (var j = 0; j < this.currentPath_.length; j += chunkSize) { + var lineStr = []; + var lineOpen = false; + + lineStr.push(''); + + if (!aFill) { + appendStroke(this, lineStr); + } else { + appendFill(this, lineStr, min, max); + } + + lineStr.push(''); + + this.element_.insertAdjacentHTML('beforeEnd', lineStr.join('')); + } + }; + + function appendStroke(ctx, lineStr) { + var a = processStyle(ctx.strokeStyle); + var color = a.color; + var opacity = a.alpha * ctx.globalAlpha; + var lineWidth = ctx.lineScale_ * ctx.lineWidth; + + // VML cannot correctly render a line if the width is less than 1px. + // In that case, we dilute the color to make the line look thinner. + if (lineWidth < 1) { + opacity *= lineWidth; + } + + lineStr.push( + '' + ); + } + + function appendFill(ctx, lineStr, min, max) { + var fillStyle = ctx.fillStyle; + var arcScaleX = ctx.arcScaleX_; + var arcScaleY = ctx.arcScaleY_; + var width = max.x - min.x; + var height = max.y - min.y; + if (fillStyle instanceof CanvasGradient_) { + // TODO: Gradients transformed with the transformation matrix. + var angle = 0; + var focus = {x: 0, y: 0}; + + // additional offset + var shift = 0; + // scale factor for offset + var expansion = 1; + + if (fillStyle.type_ == 'gradient') { + var x0 = fillStyle.x0_ / arcScaleX; + var y0 = fillStyle.y0_ / arcScaleY; + var x1 = fillStyle.x1_ / arcScaleX; + var y1 = fillStyle.y1_ / arcScaleY; + var p0 = ctx.getCoords_(x0, y0); + var p1 = ctx.getCoords_(x1, y1); + var dx = p1.x - p0.x; + var dy = p1.y - p0.y; + angle = Math.atan2(dx, dy) * 180 / Math.PI; + + // The angle should be a non-negative number. + if (angle < 0) { + angle += 360; + } + + // Very small angles produce an unexpected result because they are + // converted to a scientific notation string. + if (angle < 1e-6) { + angle = 0; + } + } else { + var p0 = ctx.getCoords_(fillStyle.x0_, fillStyle.y0_); + focus = { + x: (p0.x - min.x) / width, + y: (p0.y - min.y) / height + }; + + width /= arcScaleX * Z; + height /= arcScaleY * Z; + var dimension = m.max(width, height); + shift = 2 * fillStyle.r0_ / dimension; + expansion = 2 * fillStyle.r1_ / dimension - shift; + } + + // We need to sort the color stops in ascending order by offset, + // otherwise IE won't interpret it correctly. + var stops = fillStyle.colors_; + stops.sort(function(cs1, cs2) { + return cs1.offset - cs2.offset; + }); + + var length = stops.length; + var color1 = stops[0].color; + var color2 = stops[length - 1].color; + var opacity1 = stops[0].alpha * ctx.globalAlpha; + var opacity2 = stops[length - 1].alpha * ctx.globalAlpha; + + var colors = []; + for (var i = 0; i < length; i++) { + var stop = stops[i]; + colors.push(stop.offset * expansion + shift + ' ' + stop.color); + } + + // When colors attribute is used, the meanings of opacity and o:opacity2 + // are reversed. + lineStr.push(''); + } else if (fillStyle instanceof CanvasPattern_) { + if (width && height) { + var deltaLeft = -min.x; + var deltaTop = -min.y; + lineStr.push(''); + } + } else { + var a = processStyle(ctx.fillStyle); + var color = a.color; + var opacity = a.alpha * ctx.globalAlpha; + lineStr.push(''); + } + } + + contextPrototype.fill = function() { + this.stroke(true); + }; + + contextPrototype.closePath = function() { + this.currentPath_.push({type: 'close'}); + }; + + /** + * @private + */ + contextPrototype.getCoords_ = function(aX, aY) { + var m = this.m_; + return { + x: Z * (aX * m[0][0] + aY * m[1][0] + m[2][0]) - Z2, + y: Z * (aX * m[0][1] + aY * m[1][1] + m[2][1]) - Z2 + }; + }; + + contextPrototype.save = function() { + var o = {}; + copyState(this, o); + this.aStack_.push(o); + this.mStack_.push(this.m_); + this.m_ = matrixMultiply(createMatrixIdentity(), this.m_); + }; + + contextPrototype.restore = function() { + if (this.aStack_.length) { + copyState(this.aStack_.pop(), this); + this.m_ = this.mStack_.pop(); + } + }; + + function matrixIsFinite(m) { + return isFinite(m[0][0]) && isFinite(m[0][1]) && + isFinite(m[1][0]) && isFinite(m[1][1]) && + isFinite(m[2][0]) && isFinite(m[2][1]); + } + + function setM(ctx, m, updateLineScale) { + if (!matrixIsFinite(m)) { + return; + } + ctx.m_ = m; + + if (updateLineScale) { + // Get the line scale. + // Determinant of this.m_ means how much the area is enlarged by the + // transformation. So its square root can be used as a scale factor + // for width. + var det = m[0][0] * m[1][1] - m[0][1] * m[1][0]; + ctx.lineScale_ = sqrt(abs(det)); + } + } + + contextPrototype.translate = function(aX, aY) { + var m1 = [ + [1, 0, 0], + [0, 1, 0], + [aX, aY, 1] + ]; + + setM(this, matrixMultiply(m1, this.m_), false); + }; + + contextPrototype.rotate = function(aRot) { + var c = mc(aRot); + var s = ms(aRot); + + var m1 = [ + [c, s, 0], + [-s, c, 0], + [0, 0, 1] + ]; + + setM(this, matrixMultiply(m1, this.m_), false); + }; + + contextPrototype.scale = function(aX, aY) { + this.arcScaleX_ *= aX; + this.arcScaleY_ *= aY; + var m1 = [ + [aX, 0, 0], + [0, aY, 0], + [0, 0, 1] + ]; + + setM(this, matrixMultiply(m1, this.m_), true); + }; + + contextPrototype.transform = function(m11, m12, m21, m22, dx, dy) { + var m1 = [ + [m11, m12, 0], + [m21, m22, 0], + [dx, dy, 1] + ]; + + setM(this, matrixMultiply(m1, this.m_), true); + }; + + contextPrototype.setTransform = function(m11, m12, m21, m22, dx, dy) { + var m = [ + [m11, m12, 0], + [m21, m22, 0], + [dx, dy, 1] + ]; + + setM(this, m, true); + }; + + /** + * The text drawing function. + * The maxWidth argument isn't taken in account, since no browser supports + * it yet. + */ + contextPrototype.drawText_ = function(text, x, y, maxWidth, stroke) { + var m = this.m_, + delta = 1000, + left = 0, + right = delta, + offset = {x: 0, y: 0}, + lineStr = []; + + var fontStyle = getComputedStyle(processFontStyle(this.font), + this.element_); + + var fontStyleString = buildStyle(fontStyle); + + var elementStyle = this.element_.currentStyle; + var textAlign = this.textAlign.toLowerCase(); + switch (textAlign) { + case 'left': + case 'center': + case 'right': + break; + case 'end': + textAlign = elementStyle.direction == 'ltr' ? 'right' : 'left'; + break; + case 'start': + textAlign = elementStyle.direction == 'rtl' ? 'right' : 'left'; + break; + default: + textAlign = 'left'; + } + + // 1.75 is an arbitrary number, as there is no info about the text baseline + switch (this.textBaseline) { + case 'hanging': + case 'top': + offset.y = fontStyle.size / 1.75; + break; + case 'middle': + break; + default: + case null: + case 'alphabetic': + case 'ideographic': + case 'bottom': + offset.y = -fontStyle.size / 2.25; + break; + } + + switch(textAlign) { + case 'right': + left = delta; + right = 0.05; + break; + case 'center': + left = right = delta / 2; + break; + } + + var d = this.getCoords_(x + offset.x, y + offset.y); + + lineStr.push(''); + + if (stroke) { + appendStroke(this, lineStr); + } else { + // TODO: Fix the min and max params. + appendFill(this, lineStr, {x: -left, y: 0}, + {x: right, y: fontStyle.size}); + } + + var skewM = m[0][0].toFixed(3) + ',' + m[1][0].toFixed(3) + ',' + + m[0][1].toFixed(3) + ',' + m[1][1].toFixed(3) + ',0,0'; + + var skewOffset = mr(d.x / Z) + ',' + mr(d.y / Z); + + lineStr.push('', + '', + ''); + + this.element_.insertAdjacentHTML('beforeEnd', lineStr.join('')); + }; + + contextPrototype.fillText = function(text, x, y, maxWidth) { + this.drawText_(text, x, y, maxWidth, false); + }; + + contextPrototype.strokeText = function(text, x, y, maxWidth) { + this.drawText_(text, x, y, maxWidth, true); + }; + + contextPrototype.measureText = function(text) { + if (!this.textMeasureEl_) { + var s = ''; + this.element_.insertAdjacentHTML('beforeEnd', s); + this.textMeasureEl_ = this.element_.lastChild; + } + var doc = this.element_.ownerDocument; + this.textMeasureEl_.innerHTML = ''; + this.textMeasureEl_.style.font = this.font; + // Don't use innerHTML or innerText because they allow markup/whitespace. + this.textMeasureEl_.appendChild(doc.createTextNode(text)); + return {width: this.textMeasureEl_.offsetWidth}; + }; + + /******** STUBS ********/ + contextPrototype.clip = function() { + // TODO: Implement + }; + + contextPrototype.arcTo = function() { + // TODO: Implement + }; + + contextPrototype.createPattern = function(image, repetition) { + return new CanvasPattern_(image, repetition); + }; + + // Gradient / Pattern Stubs + function CanvasGradient_(aType) { + this.type_ = aType; + this.x0_ = 0; + this.y0_ = 0; + this.r0_ = 0; + this.x1_ = 0; + this.y1_ = 0; + this.r1_ = 0; + this.colors_ = []; + } + + CanvasGradient_.prototype.addColorStop = function(aOffset, aColor) { + aColor = processStyle(aColor); + this.colors_.push({offset: aOffset, + color: aColor.color, + alpha: aColor.alpha}); + }; + + function CanvasPattern_(image, repetition) { + assertImageIsValid(image); + switch (repetition) { + case 'repeat': + case null: + case '': + this.repetition_ = 'repeat'; + break + case 'repeat-x': + case 'repeat-y': + case 'no-repeat': + this.repetition_ = repetition; + break; + default: + throwException('SYNTAX_ERR'); + } + + this.src_ = image.src; + this.width_ = image.width; + this.height_ = image.height; + } + + function throwException(s) { + throw new DOMException_(s); + } + + function assertImageIsValid(img) { + if (!img || img.nodeType != 1 || img.tagName != 'IMG') { + throwException('TYPE_MISMATCH_ERR'); + } + if (img.readyState != 'complete') { + throwException('INVALID_STATE_ERR'); + } + } + + function DOMException_(s) { + this.code = this[s]; + this.message = s +': DOM Exception ' + this.code; + } + var p = DOMException_.prototype = new Error; + p.INDEX_SIZE_ERR = 1; + p.DOMSTRING_SIZE_ERR = 2; + p.HIERARCHY_REQUEST_ERR = 3; + p.WRONG_DOCUMENT_ERR = 4; + p.INVALID_CHARACTER_ERR = 5; + p.NO_DATA_ALLOWED_ERR = 6; + p.NO_MODIFICATION_ALLOWED_ERR = 7; + p.NOT_FOUND_ERR = 8; + p.NOT_SUPPORTED_ERR = 9; + p.INUSE_ATTRIBUTE_ERR = 10; + p.INVALID_STATE_ERR = 11; + p.SYNTAX_ERR = 12; + p.INVALID_MODIFICATION_ERR = 13; + p.NAMESPACE_ERR = 14; + p.INVALID_ACCESS_ERR = 15; + p.VALIDATION_ERR = 16; + p.TYPE_MISMATCH_ERR = 17; + + // set up externs + G_vmlCanvasManager = G_vmlCanvasManager_; + CanvasRenderingContext2D = CanvasRenderingContext2D_; + CanvasGradient = CanvasGradient_; + CanvasPattern = CanvasPattern_; + DOMException = DOMException_; +})(); + +} // if diff --git a/ckan/public/scripts/vendor/html5shiv/html5.js b/ckan/public/scripts/vendor/html5shiv/html5.js index 74c9564f9ac..7656f7a019c 100644 --- a/ckan/public/scripts/vendor/html5shiv/html5.js +++ b/ckan/public/scripts/vendor/html5shiv/html5.js @@ -1,3 +1,7 @@ -/*! HTML5 Shiv pre3.5 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed - Uncompressed source: https://github.com/aFarkas/html5shiv */ -(function(a,b){function h(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function i(){var a=l.elements;return typeof a=="string"?a.split(" "):a}function j(a){var b={},c=a.createElement,f=a.createDocumentFragment,g=f();a.createElement=function(a){l.shivMethods||c(a);var f;return b[a]?f=b[a].cloneNode():e.test(a)?f=(b[a]=c(a)).cloneNode():f=c(a),f.canHaveChildren&&!d.test(a)?g.appendChild(f):f},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+i().join().replace(/\w+/g,function(a){return b[a]=c(a),g.createElement(a),'c("'+a+'")'})+");return n}")(l,g)}function k(a){var b;return a.documentShived?a:(l.shivCSS&&!f&&(b=!!h(a,"article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio{display:none}canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden]{display:none}audio[controls]{display:inline-block;*display:inline;*zoom:1}mark{background:#FF0;color:#000}")),g||(b=!j(a)),b&&(a.documentShived=b),a)}function p(a){var b,c=a.getElementsByTagName("*"),d=c.length,e=RegExp("^(?:"+i().join("|")+")$","i"),f=[];while(d--)b=c[d],e.test(b.nodeName)&&f.push(b.applyElement(q(b)));return f}function q(a){var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(n+":"+a.nodeName);while(d--)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function r(a){var b,c=a.split("{"),d=c.length,e=RegExp("(^|[\\s,>+~])("+i().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),f="$1"+n+"\\:$2";while(d--)b=c[d]=c[d].split("}"),b[b.length-1]=b[b.length-1].replace(e,f),c[d]=b.join("}");return c.join("{")}function s(a){var b=a.length;while(b--)a[b].removeNode()}function t(a){var b,c,d=a.namespaces,e=a.parentWindow;return!o||a.printShived?a:(typeof d[n]=="undefined"&&d.add(n),e.attachEvent("onbeforeprint",function(){var d,e,f,g=a.styleSheets,i=[],j=g.length,k=Array(j);while(j--)k[j]=g[j];while(f=k.pop())if(!f.disabled&&m.test(f.media)){for(d=f.imports,j=0,e=d.length;j",f="hidden"in c,f&&typeof injectElementWithStyles=="function"&&injectElementWithStyles("#modernizr{}",function(b){b.hidden=!0,f=(a.getComputedStyle?getComputedStyle(b,null):b.currentStyle).display=="none"}),g=c.childNodes.length==1||function(){try{b.createElement("a")}catch(a){return!0}var c=b.createDocumentFragment();return typeof c.cloneNode=="undefined"||typeof c.createDocumentFragment=="undefined"||typeof c.createElement=="undefined"}()})();var l={elements:c.elements||"abbr article aside audio bdi canvas data datalist details figcaption figure footer header hgroup mark meter nav output progress section summary time video",shivCSS:c.shivCSS!==!1,shivMethods:c.shivMethods!==!1,type:"default",shivDocument:k};a.html5=l,k(b);var m=/^$|\b(?:all|print)\b/,n="html5shiv",o=!g&&function(){var c=b.documentElement;return typeof b.namespaces!="undefined"&&typeof b.parentWindow!="undefined"&&typeof c.applyElement!="undefined"&&typeof c.removeNode!="undefined"&&typeof a.attachEvent!="undefined"}();l.type+=" print",l.shivPrint=t,t(b)})(this,document) \ No newline at end of file +/*! HTML5 Shiv v3.6RC1 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed + Uncompressed source: https://github.com/aFarkas/html5shiv */ +(function(l,f){function m(){var a=e.elements;return"string"==typeof a?a.split(" "):a}function i(a){var b=n[a[o]];b||(b={},h++,a[o]=h,n[h]=b);return b}function p(a,b,c){b||(b=f);if(g)return b.createElement(a);c||(c=i(b));b=c.cache[a]?c.cache[a].cloneNode():r.test(a)?(c.cache[a]=c.createElem(a)).cloneNode():c.createElem(a);return b.canHaveChildren&&!s.test(a)?c.frag.appendChild(b):b}function t(a,b){if(!b.cache)b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag(); +a.createElement=function(c){return!e.shivMethods?b.createElem(c):p(c,a,b)};a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+m().join().replace(/\w+/g,function(a){b.createElem(a);b.frag.createElement(a);return'c("'+a+'")'})+");return n}")(e,b.frag)}function q(a){a||(a=f);var b=i(a);if(e.shivCSS&&!j&&!b.hasCSS){var c,d=a;c=d.createElement("p");d=d.getElementsByTagName("head")[0]||d.documentElement;c.innerHTML="x"; +c=d.insertBefore(c.lastChild,d.firstChild);b.hasCSS=!!c}g||t(a,b);return a}var k=l.html5||{},s=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,r=/^<|^(?:a|b|button|code|div|fieldset|form|h1|h2|h3|h4|h5|h6|i|iframe|img|input|label|li|link|ol|option|p|param|q|script|select|span|strong|style|table|tbody|td|textarea|tfoot|th|thead|tr|ul)$/i,j,o="_html5shiv",h=0,n={},g;(function(){try{var a=f.createElement("a");a.innerHTML="";j="hidden"in a;var b;if(!(b=1==a.childNodes.length)){f.createElement("a"); +var c=f.createDocumentFragment();b="undefined"==typeof c.cloneNode||"undefined"==typeof c.createDocumentFragment||"undefined"==typeof c.createElement}g=b}catch(d){g=j=!0}})();var e={elements:k.elements||"abbr article aside audio bdi canvas data datalist details figcaption figure footer header hgroup mark meter nav output progress section summary time video",shivCSS:!1!==k.shivCSS,supportsUnknownElements:g,shivMethods:!1!==k.shivMethods,type:"default",shivDocument:q,createElement:p,createDocumentFragment:function(a, +b){a||(a=f);if(g)return a.createDocumentFragment();for(var b=b||i(a),c=b.frag.cloneNode(),d=0,e=m(),h=e.length;d +${h.activity_div( + template=_("{actor} started following {object}"), + activity=activity, + actor=h.linked_user(activity.user_id), + object=h.dataset_link(activity.data.dataset), + )} + diff --git a/ckan/templates/activity_streams/follow_user.html b/ckan/templates/activity_streams/follow_user.html new file mode 100644 index 00000000000..f0e22d1f64b --- /dev/null +++ b/ckan/templates/activity_streams/follow_user.html @@ -0,0 +1,14 @@ + +${h.activity_div( + template=_("{actor} started following {object}"), + activity=activity, + actor=h.linked_user(activity.user_id), + object=h.linked_user(activity.data.user.name), + )} + diff --git a/ckan/templates/group/read.html b/ckan/templates/group/read.html index c4744020b54..c6c5839cc61 100644 --- a/ckan/templates/group/read.html +++ b/ckan/templates/group/read.html @@ -25,8 +25,8 @@

Administrators

- ${facet_div('tags', 'Tags')} - ${facet_div('res_format', 'Resource Formats')} + ${facet_div('tags', _('Tags'))} + ${facet_div('res_format', _('Resource Formats'))} diff --git a/ckan/templates/js_strings.html b/ckan/templates/js_strings.html index 7dd09bd2282..bcce855880f 100644 --- a/ckan/templates/js_strings.html +++ b/ckan/templates/js_strings.html @@ -65,11 +65,13 @@ addExtraField = _('Add Extra Field'), deleteResource = _('Delete Resource'), youCanUseMarkdown = _('You can use %aMarkdown formatting%b here.'), - shouldADataStoreBeEnabled = _('Should a %aDataStore table and Data API%b be enabled for this resource?'), datesAreInISO = _('Dates are in %aISO Format%b — eg. %c2012-12-25%d or %c2010-05-31T14:30%d.'), dataFileUploaded = _('Data File (Uploaded)'), follow = _('Follow'), unfollow = _('Unfollow'), + errorLoadingPreview = _('Could not load preview'), + errorDataProxy = _('DataProxy returned an error'), + errorDataStore = _('DataStore returned an error') ), indent=4)} diff --git a/ckan/templates/package/read.html b/ckan/templates/package/read.html index 29a03720bbd..7c76ef39192 100644 --- a/ckan/templates/package/read.html +++ b/ckan/templates/package/read.html @@ -45,7 +45,7 @@

Tags

${tag_list(c.pkg_dict.get('tags', ''))} -
diff --git a/ckanext/publisher_form/forms.py b/ckanext/publisher_form/forms.py index 672209f6b4a..0d23d7aed10 100644 --- a/ckanext/publisher_form/forms.py +++ b/ckanext/publisher_form/forms.py @@ -9,7 +9,6 @@ from ckan.logic import tuplize_dict, clean_dict, parse_params import ckan.logic.schema as default_schema from ckan.logic.schema import group_form_schema -from ckan.logic.schema import package_form_schema import ckan.logic.validators as val from ckan.lib.base import BaseController, render, c, model, abort, request from ckan.lib.base import redirect, _, config, h diff --git a/ckanext/stats/controller.py b/ckanext/stats/controller.py index 4e86ab1d98d..37114037b0a 100644 --- a/ckanext/stats/controller.py +++ b/ckanext/stats/controller.py @@ -17,6 +17,21 @@ def index(self): c.deleted_packages_by_week = rev_stats.get_by_week('deleted_packages') c.num_packages_by_week = rev_stats.get_num_packages_by_week() c.package_revisions_by_week = rev_stats.get_by_week('package_revisions') + + c.packages_by_week = []; + for week_date, num_packages, cumulative_num_packages in c.num_packages_by_week: + c.packages_by_week.append('[new Date(%s), %s]' % (week_date.replace('-', ','), cumulative_num_packages)); + + + c.all_package_revisions = []; + for week_date, revs, num_revisions, cumulative_num_revisions in c.package_revisions_by_week: + c.all_package_revisions.append('[new Date(%s), %s]' % (week_date.replace('-', ','), num_revisions)); + + c.new_datasets = [] + for week_date, pkgs, num_packages, cumulative_num_packages in c.new_packages_by_week: + c.new_datasets.append('[new Date(%s), %s]' % (week_date.replace('-', ','), num_packages)); + + return p.toolkit.render('ckanext/stats/index.html') def leaderboard(self, id=None): diff --git a/ckanext/stats/templates/ckanext/stats/index.html b/ckanext/stats/templates/ckanext/stats/index.html index 681527d330d..927ca1add18 100644 --- a/ckanext/stats/templates/ckanext/stats/index.html +++ b/ckanext/stats/templates/ckanext/stats/index.html @@ -9,6 +9,15 @@ Statistics + + + + + @@ -47,8 +61,8 @@

Revisions to Datasets per week

Top Rated Datasets

- - +
DatasetAverage ratingNumber of ratings
+ @@ -56,32 +70,32 @@

Top Rated Datasets

No ratings

Most Edited Datasets

-
DatasetAverage ratingNumber of ratings
${h.link_to(package.title or package.name, h.url_for(controller='package', action='read', id=package.name))}${rating}${num_ratings}
- +
DatasetNumber of edits
+
DatasetNumber of edits
${h.link_to(package.title or package.name, h.url_for(controller='package', action='read', id=package.name))}${edits}

Largest Groups

- - +
GroupNumber of datasets
+
GroupNumber of datasets
${h.link_to(group.title or group.name, h.url_for(controller='group', action='read', id=group.name))}${num_packages}

Top Tags

- +
- +
${h.link_to(tag.name, h.url_for(controller='tag', action='read', id=tag.name))}${num_packages}${h.link_to(tag.name, h.url_for(controller='tag', action='read', id=tag.name))}${num_packages}

Users owning most datasets

- +
- +
${h.linked_user(user)}${num_packages}${h.linked_user(user)}${num_packages}
@@ -100,8 +114,8 @@

Users owning most datasets

$('body').addClass('no-sidebar'); - - + ${jsConditionalForIe(8, '<script language="javascript" type="text/javascript" src="' + h.url_for_static('/scripts/vendor/flot/0.7/excanvas.js') + '"></script>', 'lte')} + diff --git a/ckanext/test_tag_vocab_plugin.py b/ckanext/test_tag_vocab_plugin.py index 043234890b2..2fe95f4ed9d 100644 --- a/ckanext/test_tag_vocab_plugin.py +++ b/ckanext/test_tag_vocab_plugin.py @@ -7,7 +7,7 @@ from genshi.filters import Transformer from ckan.logic import get_action from ckan.logic.converters import convert_to_tags, convert_from_tags, free_tags_only -from ckan.logic.schema import package_form_schema, default_package_schema +from ckan.logic.schema import default_package_schema from ckan.lib.navl.validators import ignore_missing, keep_extras from ckan import plugins diff --git a/doc/apiv3.rst b/doc/apiv3.rst index a76c6be6e00..f9b5a580952 100644 --- a/doc/apiv3.rst +++ b/doc/apiv3.rst @@ -81,17 +81,13 @@ will result in the following parameters being sent to the This interface is *slightly* more limited than the POST interface because it doesn't allow passing nested dicts into the action be accessed. As a -consequence of this, currently the *resource_search*, *tag_search* and -*tag_autocomplete* actions are **limited** in their functionality. +consequence of this, currently the *resource_search* action is **limited** in +its functionality when accessed with a GET request. `resource_search`: This action is not currently usable via a GET request as it relies upon a nested dict of fields. -`tag_search` and `tag_autocomplete`: - The `fields` argument is not available when accessing this action with a - GET request. - Also, it is worth bearing this limitation in mind when creating your own actions via the `IActions` interface. diff --git a/doc/coding-standards.rst b/doc/coding-standards.rst index 770c6a8a631..32987bc6150 100644 --- a/doc/coding-standards.rst +++ b/doc/coding-standards.rst @@ -62,6 +62,33 @@ Longer example CKAN commit message: Also fix templates/user/layout.html so that the Followers tab appears on both your own user page (when logged in) and on other user's pages. +Feature Branches +---------------- + +All ticketed work should be developed on a corresponding feature branch forked +from master. The name of the branch should inlude the ticket's number, the +ticket type, and a brief one-line synopsis of the purpose of the ticket. eg: +``2298-feature-add-sort-by-controls-to-search-page``. This allows the ticket +number to be esaily searchable through github's web interface. + +Once work on the branch has been completed and it is ready to be merged into +master, make a pull request on github. Another member of the CKAN team will +review the changes; and provide feedback through the github pull request page. +If the piece of work touches on an area of code `owned` by another team member, +then notify them of the changes by email. + +Submitting Code Patches +----------------------- + +See the wiki for instructions on `how to submit a patch`_ via GitHub or email. + +.. _how to submit a patch: http://wiki.ckan.org/Submitting_a_code_patch + +Releases +-------- + +See :doc:`release-cycle` for details on the release process. + Merging ------- @@ -70,19 +97,102 @@ When merging a feature or bug branch into master: - Use the ``--no-ff`` option in the ``git merge`` command - Add an entry to the ``CHANGELOG`` file -Frontend Coding Standards -========================= +The full postgresql test suite must pass before merging into master. :: + + nosetests --ckan --with-pylons=test-core.ini ckan -TODO +See :doc:`test` for more information on running tests, including running the +core extension tests. -http://aron.github.com/ckan-style/styleguide/ +Python Coding Standards +======================= -Backend Coding Standards -======================== +For python code, we follow `PEP 8`_, plus a few of our own rules. The +important bits are laid out below, but if in doubt, refer to `PEP 8`_ and +common sense. -TODO +Layout and formatting +--------------------- -http://wiki.okfn.org/Coding_Standards +- Don't use tabs. Use 4 spaces. + +- Maximum line length is 79 characters. + +- Continuation lines should align vertically within the parentheses, or with + a hanging indent. See `PEP 8's Indent Section`_ for more details. + +- Avoid extraneous whitespace. See `PEP 8's Whitespace Section`_ for more details. + +- Clean up formatting issues in master, not on a feature branch. Unless of + course you're changing that piece of code anyway. This will help avoid + spurious merge conflicts, and aid in reading pull requests. + +- Use the single-quote character, ``'``, rather than the double-quote + character, ``"``, for string literals. + +.. _PEP 8: http://www.python.org/dev/peps/pep-0008/ +.. _PEP 8's Indent Section: http://www.python.org/dev/peps/pep-0008/#indentation +.. _PEP 8's Whitespace Section: http://www.python.org/dev/peps/pep-0008/#whitespace-in-expressions-and-statements + +Imports +------- + +- Import whole modules, rather than using ``from foo import bar``. It's ok + to alias imported modules to make things more concise, ie this *is* + acceptable: :: + + import foo.bar.baz as f + +- Make all imports at the start of the file, after the module docstring. + Imports should be grouped in the following order: + + 1. Standard library imports + 2. Third-party imports + 3. CKAN imports + +Logging +------- + +- Keep messages short. + +- Don't include object representations in the log message. It **is** useful + to include an domain model identifier where appropriate. + +- Choose an appropriate log-level: + + +----------+--------------------------------------------------------------+ + | Level | Description | + +==========+==============================================================+ + | DEBUG | Detailed information, of no interest when everything is | + | | working well but invaluable when diagnosing problems. | + +----------+--------------------------------------------------------------+ + | INFO | Affirmations that things are working as expected, e.g. | + | | "service has started" or "indexing run complete". Often | + | | ignored. | + +----------+--------------------------------------------------------------+ + | WARNING | There may be a problem in the near future, and this gives | + | | advance warning of it. But the application is able to proceed| + | | normally. | + +----------+--------------------------------------------------------------+ + | ERROR | The application has been unable to proceed as expected, due | + | | to the problem being logged. | + +----------+--------------------------------------------------------------+ + | CRITICAL | This is a serious error, and some kind of application | + | | meltdown might be imminent. | + +----------+--------------------------------------------------------------+ + + (`Source + `_) + +i18n +---- + +To construct an internationalised string, use `str.format`_, giving +meaningful names to each replacement field. For example: :: + + _(' ... {foo} ... {bar} ...').format(foo='foo-value', bar='bar-value') + +.. _str.format: http://docs.python.org/library/stdtypes.html#str.format Docstring Standards ------------------- @@ -171,7 +281,7 @@ brackets. Where relevant also indicate the default value: (optional, default: .. _Sphinx directives: http://sphinx.pocoo.org/markup/desc.html#info-field-lists You can also use a little inline `reStructuredText markup`_ in docstrings, e.g. -``*stars for emphasis*`` or ````double-backticks for literal text````. +``*stars for emphasis*`` or ````double-backticks for literal text```` .. _reStructuredText markup: http://docutils.sourceforge.net/docs/user/rst/quickref.html#inline-markup @@ -222,3 +332,987 @@ Example of a ckan.logic.action API docstring: ''' .. _Autodoc: http://sphinx.pocoo.org/ext/autodoc.html + +Tools +----- + +Running the `PEP 8 style guide checker`_ is good for checking adherence to `PEP +8`_ formatting. As mentioned above, only perform style clean-ups on master to +help avoid spurious merge conflicts. + +`PyLint`_ is a useful tool for analysing python source code for errors and signs of poor quality. + +`pyflakes`_ is another useful tool for passive analysis of python source code. +There's also a `pyflakes vim plugin`_ which will highlight unused variables, +undeclared variables, syntax errors and unused imports. + +.. _PEP 8 style guide checker: http://pypi.python.org/pypi/pep8 +.. _PyLint: http://www.logilab.org/857 +.. _pyflakes: http://pypi.python.org/pypi/pyflakes +.. _pyflakes vim plugin: http://www.vim.org/scripts/script.php?script_id=2441 + +CKAN Code Areas +=============== + +This section describes some guidelines for making changes in particular areas +of the codebase, as well as general concepts particular to CKAN. + +General +------- + +Some rules to adhere to when making changes to the codebase in general. + +.. todo:: Is there anything to include in this 'General' section? + +Domain Models +------------- + +This section describes things to bear in mind when making changes to the domain +models. For more information about CKAN's domain models, see +:doc:`domain-model`. + +The structure of the CKAN data is described in the 'model'. This is in the code +at `ckan/model`. + +Many of the domain objects are Revisioned and some are Stateful. These are +concepts introduced by `vdm`_. + +.. _vdm: http://okfn.org/projects/vdm/ +.. _sqlalchemy migrate: http://code.google.com/p/sqlalchemy-migrate SQLAlchemy Migrate + +Migration +````````` +When edits are made to the model code, then before the code can be used on a +CKAN instance with existing data, the existing data has to be migrated. This is +achieved with a migration script. + +CKAN currently uses to manage these scripts. When you deploy new code to a +CKAN instance, as part of the process you run any required migration scripts +with: :: + + paster --plugin=ckan db upgrade --config={.ini file} + +The scripts give their model version numbers in their filenames and are stored +in ``ckan/migration/versions/``. + +The current version the database is migrated to is also stored in the database. +When you run the upgrade, as each migration script is run it prints to the +console something like ``11->12``. If no upgrade is required because it is up +to date, then nothing is printed. + +Creating a new migration script +``````````````````````````````` +A migration script should be checked into CKAN at the same time as the model +changes it is related to. Before pushing the changes, ensure the tests pass +when running against the migrated model, which requires the +``--ckan-migration`` setting. + +To create a new migration script, create a python file in +``ckan/migration/versions/`` and name it with a prefix numbered one higher than +the previous one and some words describing the change. + +You need to use the special engine provided by the SqlAlchemy Migrate. Here is +the standard header for your migrate script: :: + + from sqlalchemy import * + from migrate import * + +The migration operations go in the upgrade function: :: + + def upgrade(migrate_engine): + metadata = MetaData() + metadata.bind = migrate_engine + +The following process should be followed when doing a migration. This process +is here to make the process easier and to validate if any mistakes have been +made: + +1. Get a dump of the database schema before you add your new migrate scripts. :: + + paster --plugin=ckan db clean --config={.ini file} + paster --plugin=ckan db upgrade --config={.ini file} + pg_dump -h host -s -f old.sql dbname + +2. Get a dump of the database as you have specified it in the model. :: + + paster --plugin=ckan db clean --config={.ini file} + + #this makes the database as defined in the model + paster --plugin=ckan db create-from-model -config={.ini file} + pg_dump -h host -s -f new.sql dbname + +3. Get agpdiff (apt-get it). It produces sql it thinks that you need to run on + the database in order to get it to the updated schema. :: + + apgdiff old.sql new.sql > upgrade.diff + +(or if you don't want to install java use http://apgdiff.startnet.biz/diff_online.php) + +4. The upgrade.diff file created will have all the changes needed in sql. + Delete the drop index lines as they are not created in the model. + +5. Put the resulting sql in your migrate script, e.g. :: + + migrate_engine.execute('''update table .........; update table ....''') + +6. Do a dump again, then a diff again to see if the the only thing left are drop index statements. + +7. run nosetests with ``--ckan-migration`` flag. + +It's that simple. Well almost. + +* If you are doing any table/field renaming adding that to your new migrate + script first and use this as a base for your diff (i.e add a migrate script + with these renaming before 1). This way the resulting sql won't try to drop and + recreate the field/table! + +* It sometimes drops the foreign key constraints in the wrong order causing an + error so you may need to rearrange the order in the resulting upgrade.diff. + +* If you need to do any data transfer in the migrations then do it between the + dropping of the constraints and adding of new ones. + +* May need to add some tests if you are doing data migrations. + +An example of a script doing it this way is ``034_resource_group_table.py``. +This script copies the definitions of the original tables in order to do the +renaming the tables/fields. + +In order to do some basic data migration testing extra assertions should be +added to the migration script. Examples of this can also be found in +``034_resource_group_table.py`` for example. + +This statement is run at the top of the migration script to get the count of +rows: :: + + package_count = migrate_engine.execute('''select count(*) from package''').first()[0] + +And the following is run after to make sure that row count is the same: :: + + resource_group_after = migrate_engine.execute('''select count(*) from resource_group''').first()[0] + assert resource_group_after == package_count + +The Action Layer +---------------- + +When making changes to the action layer, found in the four modules +``ckan/logic/action/{create,delete,get,update}`` there are a few things to bear +in mind. + +Server Errors +````````````` + +When writing action layer code, bear in mind that the input provided in the +``data_dict`` may be user-provided. This means that required fields should be +checked for existence and validity prior to use. For example, code such as :: + + id = data_dict['id'] + +will raise a ``KeyError`` if the user hasn't provided an ``id`` field in their +data dict. This results in a 500 error, and no message to explain what went +wrong. The correct response by the action function would be to raise a +``ValidationError`` instead, as this will be caught and will provide the user +with a `bad request` response, alongside an error message explaining the issue. + +To this end, there's a helper function, ``logic.get_or_bust()`` which can be +used to safely retrieve a value from a dict: :: + + id = _get_or_bust(data_dict, "id") + +Function visibility +``````````````````` + +**All** publicly visible functions in the +``ckan.logic.action.{create,delete,get,update}`` namespaces will be exposed +through the :doc:`apiv3`. **This includes functions imported** by those +modules, **as well as any helper functions** defined within those modules. To +prevent inadvertent exposure of non-action functions through the action api, +care should be taken to: + +1. Import modules correctly (see `Imports`_). For example: :: + + import ckan.lib.search as search + + search.query_for(...) + +2. Hide any locally defined helper functions: :: + + def _a_useful_helper_function(x, y, z): + '''This function is not exposed because it is marked as private``` + return x+y+z + +3. Bring imported convenience functions into the module namespace as private + members: :: + + _get_or_bust = logic.get_or_bust + +Documentation +````````````` + +Please refer to `CKAN Action API Docstrings`_ for information about writing +docstrings for the action functions. It is **very** important that action +functions are documented as they are not only consumed by CKAN developers but +by CKAN users. + +Controllers +----------- + +Guidelines when writing controller actions: + +- Use ``get_action``, rather than calling the action directly; and rather than + calling the action directly, as this allows extensions to overide the action's + behaviour. ie use :: + + ckan.logic.get_action('group_activity_list_html')(...) + + Instead of :: + + ckan.logic.action.get.group_activity_list_html(...) + +- Controllers have access to helper functions in ``ckan.lib.helpers``. When + developing for ckan core, only use the helper functions found in + ``ckan.lib.helpers.__allowed_functions__`` because any instance may set the + ``ckan.restrict_template_vars`` configuration value to ``True``. + +.. todo:: Anything else for contrllers? + +Templating +---------- + +Helper Functions +```````````````` + +Templates have access to a set of helper functions in ``ckan.lib.helpers``. +When developing for ckan core, only use the helper functions found in +``ckan.lib.helpers.__allowed_functions__`` because any instance may set the +``ckan.restrict_template_vars`` configuration value to ``True``. + +.. todo:: Jinja2 templates + +Testing +------- + +- Functional tests which test the behaviour of the web user interface, and the + APIs should be placed within ``ckan/tests/functional``. These tests can be a + lot slower to run that unit tests which don't access the database or solr. So + try to bear that in mind, and attempt to cover just what is neccessary, leaving + what can be tested via unit-testing in unit-tests. + +- ``nose.tools.assert_in`` and ``nose.tools.assert_not_in`` are only available + in Python>=2.7. So import them from ``ckan.tests``, which will provide + alternatives if they're not available. + +- the `mock`_ library can be used to create and interrogate mock objects. + +See :doc:`test` for further information on testing in CKAN. + +.. _mock: http://pypi.python.org/pypi/mock + +Writing Extensions +------------------ + +Please see :doc:`writing-extensions` for information about writing ckan +extensions, including details on the API available to extensions. + +Deprecation +----------- + +- Anything that may be used by extensions (see :doc:`writing-extensions`) needs + to maintain backward compatibility at call-site. ie - template helper + functions and functions defined in the plugins toolkit. + +- The length of time of deprecation is evaluated on a function-by-function + basis. At minimum, a function should be marked as deprecated during a point + release. + +- To mark a helper function, use the ``deprecated`` decorator found in + ``ckan.lib.maintain`` eg: :: + + + @deprecated() + def facet_items(*args, **kwargs): + """ + DEPRECATED: Use the new facet data structure, and `unselected_facet_items()` + """ + # rest of function definition. + +Javascript Coding Standards +=========================== + +Formatting +---------- + +.. _OKFN Coding Standards: http://wiki.okfn.org/Coding_Standards#Javascript +.. _idiomatic.js: https://github.com/rwldrn/idiomatic.js/ +.. _Douglas Crockford's: http://javascript.crockford.com/code.html + +All JavaScript documents must use **two spaces** for indentation and files +should have no trailing whitespace. This is contrary to the `OKFN Coding +Standards`_ but matches what's in use in the current code base. + +Coding style must follow the `idiomatic.js`_ style but with the following +exceptions. + +.. note:: Idiomatic is heavily based upon `Douglas Crockford's`_ style + guide which is recommended by the `OKFN Coding Standards`_. + +White Space +``````````` + +Two spaces must be used for indentation at all times. Unlike in idiomatic +whitespace must not be used _inside_ parentheses between the parentheses +and their Contents. :: + + // BAD: Too much whitespace. + function getUrl( full ) { + var url = '/styleguide/javascript/'; + if ( full ) { + url = 'http://okfn.github.com/ckan' + url; + } + return url; + } + + // GOOD: + function getUrl(full) { + var url = '/styleguide/javascript/'; + if (full) { + url = 'http://okfn.github.com/ckan' + url; + } + return url; + } + +.. note:: See section 2.D.1.1 of idiomatic for more examples of this syntax. + +Quotes +`````` + +Single quotes should be used everywhere unless writing JSON or the string +contains them. This makes it easier to create strings containing HTML. :: + + jQuery('
').appendTo('body'); + +Object properties need not be quoted unless required by the interpreter. :: + + var object = { + name: 'bill', + 'class': 'user-name' + }; + +Variable declarations +````````````````````` + +One ``var`` statement must be used per variable assignment. These must be +declared at the top of the function in which they are being used. :: + + // GOOD: + var good = "string"; + var alsoGood = "another; + + // GOOD: + var good = "string"; + var okay = [ + "hmm", "a bit", "better" + ]; + + // BAD: + var good = "string", + iffy = [ + "hmm", "not", "great" + ]; + +Declare variables at the top of the function in which they are first used. This +avoids issues with variable hoisting. If a variable is not assigned a value +until later in the function then it it okay to define more than one per +statement. :: + + // BAD: contrived example. + function lowercaseNames(names) { + var names = []; + + for (var index = 0, length = names.length; index < length; index += 1) { + var name = names[index]; + names.push(name.toLowerCase()); + } + + var sorted = names.sort(); + return sorted; + } + + // GOOD: + function lowercaseNames(names) { + var names = []; + var index, sorted, name; + + for (index = 0, length = names.length; index < length; index += 1) { + name = names[index]; + names.push(names[index].toLowerCase()); + } + + sorted = names.sort(); + return sorted; + } + +Naming +------ + +All properties, functions and methods must use lowercase camelCase: :: + + var myUsername = 'bill'; + var methods = { + getSomething: function () {} + }; + +Constructor functions must use uppercase CamelCase: :: + + function DatasetSearchView() { + } + +Constants must be uppercase with spaces delimited by underscores: :: + + var env = { + PRODUCTION: 'production', + DEVELOPMENT: 'development', + TESTING: 'testing' + }; + +Event handlers and callback functions should be prefixed with "on": :: + + function onDownloadClick(event) {} + + jQuery('.download').click(onDownloadClick); + +Boolean variables or methods returning boolean functions should prefix +the variable name with "is": :: + + function isAdmin() {} + + var canEdit = isUser() && isAdmin(); + + +.. note:: Alternatives are "has", "can" and "should" if they make more sense + +Private methods should be prefixed with an underscore: :: + + View.extend({ + "click": "_onClick", + _onClick: function (event) { + } + }); + +Functions should be declared as named functions rather than assigning an +anonymous function to a variable. :: + + // GOOD: + function getName() { + } + + // BAD: + var getName = function () { + }; + +Named functions are generally easier to debug as they appear named in the +debugger. + +Comments +-------- + +Comments should be used to explain anything that may be unclear when you return +to it in six months time. Single line comments should be used for all inline +comments that do not form part of the documentation. :: + + // Export the function to either the exports or global object depending + // on the current environment. This can be either an AMD module, CommonJS + // module or a browser. + if (typeof module.define === 'function' && module.define.amd) { + module.define('broadcast', function () { + return Broadcast; + }); + } else if (module.exports) { + module.exports = Broadcast; + } else { + module.Broadcast = Broadcast; + } + +File Structure +-------------- + +All public JavaScript files should be contained within a _javascript_ directory +within the _public_ directory and files should be structured accordingly. :: + + lib/ + main.js + utils.js + components/ + vendor/ + jquery.js + jquery.plugin.js + underscore.js + templates/ + test/ + index.html + spec/ + main-spec.js + utils-spec.js + vendor/ + mocha.js + mocha.css + chai.js + +All files and directories should be lowercase with hyphens used to separate words. + +lib + Should contain all application files. These can be structured appropriately. + It is recommended that *main.js* be used as the bootstrap filename that sets + up the page. + +vendor + Should contain all external dependencies. These should not contain + version numbers in the filename. This information should be available in + the header comment of the file. Library plugins should be prefixed with the + library name. eg the hover intent jQuery plugin would have the filename + *jquery.hover-intent.js*. + +templates + Should be stored in a seperate directory and have the .html + extension. +test + Contains the test runner *index.html*. *vendor* contains all test + dependencies and libraries. *spec* contains the actual test files. Each + test file should be the filename with *-spec* appended. + +JSHint +------ + +All JavaScript should pass `JSHint`_ before being committed. This can +be installed using ``npm`` (which is bundled with `node`_) by running: :: + + $ npm -g install jshint + +Each project should include a jshint.json file with appropriate configuration +options for the tool. Most text editors can also be configured to read from +this file. + +.. _node: http://nodejs.org +.. _jshint: http://www.jshint.com + +Documentation +------------- + +*TODO* + +Testing +------- + +*TODO* + +Best Practices +-------------- + +Forms +````` + +All forms should work without JavaScript enabled. This means that they must +submit ``application/x-www-form-urlencoded`` data to the server and receive an appropriate +response. The server should check for the ``X-Requested-With: XMLHTTPRequest`` +header to determine if the request is an ajax one. If so it can return an +appropriate format, otherwise it should issue a 303 redirect. + +The one exception to this rule is if a form or button is injected with +JavaScript after the page has loaded. It's then not part of the HTML document +and can submit any data format it pleases. + +Ajax +```````` + +Ajax requests can be used to improve the experience of submitting forms and +other actions that require server interactions. Nearly all requests will +go through the following states. + +1. User clicks button. +2. JavaScript intercepts the click and disables the button (add ``disabled`` + attr). +3. A loading indicator is displayed (add class ``.loading`` to button). +4. The request is made to the server. +5. a) On success the interface is updated. + b) On error a message is displayed to the user if there is no other way to + resolve the issue. +6. The loading indicator is removed. +7. The button is re-enabled. + +Here's a possible example for submitting a search form using jQuery. :: + + jQuery('#search-form').submit(function (event) { + var form = $(this); + var button = form.find('[type=submit]'); + + // Prevent the browser submitting the form. + event.preventDefault(); + + button.prop('disabled', true).addClass('loading'); + + jQuery.ajax({ + type: this.method, + data: form.serialize(), + success: function (results) { + updatePageWithResults(results); + }, + error: function () { + showSearchError('Sorry we were unable to complete this search'); + }, + complete: function () { + button.prop('disabled', false).removeClass('loading'); + } + }); + }); + +This covers possible issues that might arise from submitting the form as well +as providing the user with adequate feedback that the page is doing something. +Disabling the button prevents the form being submitted twice and the error +feedback should hopefully offer a solution for the error that occurred. + +Event Handlers +`````````````` + +When using event handlers to listen for browser events it's a common +requirement to want to cancel the default browser action. This should be +done by calling the ``event.preventDefault()`` method: :: + + jQuery('button').click(function (event) { + event.preventDefault(); + }); + +It is also possible to return ``false`` from the callback function. Avoid doing +this as it also calls the ``event.stopPropagation()`` method which prevents the +event from bubbling up the DOM tree. This prevents other handlers listening +for the same event. For example an analytics click handler attached to the +```` element. + +Also jQuery (1.7+) now provides the `.on()`_ and `.off()`_ methods as +alternatives to ``.bind()``, ``.unbind()``, ``.delegate()`` and +``.undelegate()`` and they should be preferred for all tasks. + +.. _.on(): http://api.jquery.com/on/ +.. _.off(): http://api.jquery.com/off/ + +Closures +```````` + +*TODO* + +Templating +`````````` + +*TODO* + +Resources +--------- + +*TODO* + +HTML Coding Standards +===================== + +Formatting +---------- + +All HTML documents must use **two spaces** for indentation and there should be +no trailing whitespace. XHTML syntax must be used (this is more a Genshi +requirement) and all attributes must use double quotes around attributes. :: + + + + +HTML5 elements should be used where appropriate reserving ``
`` and ```` +elements for situations where there is no semantic value (such as wrapping +elements to provide styling hooks). + +Doctype and layout +------------------ + +All documents must be using the HTML5 doctype and the ```` element should +have a ``"lang"`` attribute. The ```` should also at a minimum include +``"viewport"`` and ``"charset"`` meta tags. :: + + + + + + + Example Site + + + + +Forms +----- + +Form fields must always include a ``