Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Merge pull request #121 from khchen428/master

#53 Make it easier to disallow robots crawling.
  • Loading branch information...
commit fbad81395a251e3659b141d0249afd06f5cfde3f 2 parents 5c43d5a + 979643f
@kumar303 kumar303 authored
View
5 project/settings/base.py
@@ -45,6 +45,11 @@
'django_browserid.context_processors.browserid_form',
]
+# Should robots.txt deny everything or disallow a calculated list of URLs we
+# don't want to be crawled? Default is false, disallow everything.
+# Also see http://www.google.com/support/webmasters/bin/answer.py?answer=93710
+ENGAGE_ROBOTS = False
+
# Always generate a CSRF token for anonymous users.
ANON_ALWAYS = True
View
5 project/settings/local.py-dist
@@ -60,6 +60,9 @@ PASSWORD_HASHERS = get_password_hashers(base.BASE_PASSWORD_HASHERS, HMAC_KEYS)
# Make this unique, and don't share it with anybody. It cannot be blank.
SECRET_KEY = ''
+# Should robots.txt allow web crawlers? Set this to True for production
+ENGAGE_ROBOTS = True
+
# Uncomment these to activate and customize Celery:
# CELERY_ALWAYS_EAGER = False # required to activate celeryd
# BROKER_HOST = 'localhost'
@@ -80,4 +83,4 @@ SECRET_KEY = ''
# Uncomment this line if you are running a local development install without
# HTTPS to disable HTTPS-only cookies.
-#SESSION_COOKIE_SECURE = False
+#SESSION_COOKIE_SECURE = False
View
8 project/urls.py
@@ -14,6 +14,14 @@
urlpatterns = patterns('',
# Example:
(r'', include(urls)),
+
+ # Generate a robots.txt
+ (r'^robots\.txt$',
+ lambda r: HttpResponse(
+ "User-agent: *\n%s: /" % 'Allow' if settings.ENGAGE_ROBOTS else 'Disallow' ,
+ mimetype="text/plain"
+ )
+ )
# Uncomment the admin/doc line below to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
Please sign in to comment.
Something went wrong with that request. Please try again.