From c9fe6ed11180845cd4f0ebcf5fec3b4a4604472f Mon Sep 17 00:00:00 2001 From: hpincket Date: Wed, 31 Aug 2016 21:31:26 -0700 Subject: [PATCH] FLAKE8 FIXES, hope I didn't break anything --- .flake8 | 3 + api/__init__.py | 21 +-- api/courses.py | 36 ++--- api/forms.py | 15 +- api/meta.py | 48 +++--- api/scripts/add_client.py | 15 +- api/scripts/add_documentation.py | 11 +- api/scripts/add_member.py | 9 +- api/scripts/disable_client.py | 10 +- api/scripts/eateries.py | 159 ++++++++++--------- api/scripts/email_handler.py | 214 +++++++++++++------------- api/scripts/enable_client.py | 10 +- api/scripts/selfservice_scraper.py | 6 +- api/scripts/stats.py | 11 +- api/scripts/util/logger.py | 2 +- api/scripts/wifi_counter.py | 16 +- api/student.py | 78 +++++----- docs/example-script.py | 15 +- tests/test_courses.py | 13 +- wrappers/brown-python/brown/client.py | 15 +- 20 files changed, 362 insertions(+), 345 deletions(-) create mode 100644 .flake8 diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..86f45e6 --- /dev/null +++ b/.flake8 @@ -0,0 +1,3 @@ +[flake8] +ignore=E501 +exclude=venv,env diff --git a/api/__init__.py b/api/__init__.py index 0d162cc..078612b 100644 --- a/api/__init__.py +++ b/api/__init__.py @@ -4,6 +4,11 @@ from functools import wraps import pymongo import os +import api.meta +import api.dining +import api.wifi +import api.laundry +import api.courses def make_json_error(ex): @@ -28,7 +33,6 @@ def support_jsonp(f): def decorated_function(*args, **kwargs): callback = request.args.get('callback', False) if callback: - from json import dumps content = callback.encode("utf-8") + b'(' + f(*args, **kwargs).data + b')' return current_app.response_class( content, mimetype='application/javascript') @@ -79,6 +83,7 @@ def __init__(self, environ, populate_request=True, shallow=False): # BASIC AUTH + def check_auth(username, password): """This function is called to check if a username / password combination is valid. @@ -91,12 +96,14 @@ def check_auth(username, password): print("The dashboard password's environment variable was not found.") return username == 'admin' and password == correct_password + def authenticate(): """Sends a 401 response that enables basic auth""" return Response( - 'Could not verify your access level for that URL.\n' - 'You have to login with proper credentials', 401, - {'WWW-Authenticate': 'Basic realm="Login Required"'}) + 'Could not verify your access level for that URL.\n' + 'You have to login with proper credentials', 401, + {'WWW-Authenticate': 'Basic realm="Login Required"'}) + def requires_auth(f): @wraps(f) @@ -108,10 +115,4 @@ def decorated(*args, **kwargs): return decorated -import api.meta -import api.dining -import api.wifi -import api.laundry -import api.courses - __all__ = ['api', ] diff --git a/api/courses.py b/api/courses.py index 3e81c24..fb24054 100644 --- a/api/courses.py +++ b/api/courses.py @@ -1,12 +1,10 @@ from flask import request, jsonify from api import app, make_json_error, support_jsonp -from api.meta import is_valid_client, require_client_id, log_client, INVALID_CLIENT_MSG +from api.meta import require_client_id from mongoengine import connect import json import urllib -from collections import defaultdict -import random -from api.scripts.coursemodels import * +from api.scripts.coursemodels import BannerCourse, NonconflictEntry import bson from datetime import date import os @@ -16,7 +14,7 @@ PAGINATION_LIMIT = 10 PAGINATION_MAX = 42 -#TODO: Maybe there's some way to use the same connection as given by 'db'. +# TODO: Maybe there's some way to use the same connection as given by 'db'. if 'MONGO_URI' in app.config: connect('brown', host=app.config['MONGO_URI']) @@ -35,7 +33,7 @@ def courses_index(): or Returns the sections ids specified ''' - arg_numbers = request.args.get('numbers',None) + arg_numbers = request.args.get('numbers', None) if arg_numbers is not None: numbers = [] full_numbers = [] @@ -119,9 +117,9 @@ def schedule_time(): time = int(request.args.get('time', '-1')) if time < 0: return make_json_error("Invalid or Missing parameter: time") - query_args = {'meeting.day_of_week':day, - "meeting.start_time": {'$lte': time}, - "meeting.end_time": {'$gte': time}} + query_args = {'meeting.day_of_week': day, + 'meeting.start_time': {'$lte': time}, + 'meeting.end_time': {'$gte': time}} return jsonify(paginate(filter_semester(query_args), params={'day': day, 'time': time}, raw=True)) @@ -136,21 +134,16 @@ def non_conflicting(): for course_number in courses: res = BannerCourse.objects(full_number=course_number) if len(res) <= 0: - return make_json_error(\ - "Invalid course section/lab/conference:"\ - +course_number) + return make_json_error("Invalid course section/lab/conference:" + course_number) course = res.first().id non_conflicting_list = NonconflictEntry.objects(course_id=course) if len(non_conflicting_list) <= 0: - return make_json_error(\ - "Error with course section/lab/conference:"\ - +course_number) + return make_json_error("Error with course section/lab/conference:" + course_number) non_conflicting_list = non_conflicting_list.first().non_conflicting if available_set is None: available_set = set(non_conflicting_list) else: - available_set = set.intersection(available_set, - non_conflicting_list) + available_set = set.intersection(available_set, non_conflicting_list) query_args = {"id__in": list(available_set)} else: # Slower method, a couple of seconds @@ -192,19 +185,20 @@ def paginate(query, params=None, raw=False): if '_id' not in query: query['_id'] = {} query['_id']['$gt'] = bson.objectid.ObjectId(offset) - res = list(BannerCourse.objects(__raw__=query).order_by('_id')[:limit+1]) + res = list(BannerCourse.objects(__raw__=query).order_by('_id')[:limit + 1]) next_url = "null" - if len(res) == limit+1: + if len(res) == limit + 1: next_url = request.base_url + "?" +\ urllib.parse.urlencode({"limit": limit, "offset": res[limit - 1].id}) client_id = request.args.get('client_id') next_url = next_url + "&" + urllib.parse.urlencode({"client_id": client_id}) if params is not None: - next_url = next_url+"&"+urllib.parse.urlencode(params) + next_url = next_url + "&" + urllib.parse.urlencode(params) res.pop() - if offset is None: offset = "null" + if offset is None: + offset = "null" ans = {"href": request.url, "items": [json.loads(elm.to_json()) for elm in res], "limit": limit, diff --git a/api/forms.py b/api/forms.py index d20c9d1..87a08d2 100644 --- a/api/forms.py +++ b/api/forms.py @@ -7,12 +7,13 @@ from api.scripts.add_documentation import add_documentation from api.scripts.add_member import add_member + class SignupForm(Form): name = StringField('Name', validators=[DataRequired()]) email = EmailField('Email', validators=[DataRequired(), Email()]) - + def validate(self): - if Form.validate(self): + if Form.validate(self): client_id = add_client_id(self.email.data, self.name.data) if client_id: send_id_email(self.email.data, self.name.data, client_id) @@ -23,6 +24,7 @@ def validate(self): else: return False + class DocumentationForm(Form): name = StringField('Name', validators=[DataRequired()]) urlname = StringField('URL Name', validators=[DataRequired()]) @@ -30,9 +32,9 @@ class DocumentationForm(Form): contents = TextAreaField('Contents', validators=[DataRequired()]) def validate(self): - if Form.validate(self): + if Form.validate(self): documentation = add_documentation(self.contents.data, - self.name.data, self.urlname.data, self.imageurl.data) + self.name.data, self.urlname.data, self.imageurl.data) if documentation: return True else: @@ -41,15 +43,16 @@ def validate(self): else: return False + class MemberForm(Form): name = StringField('Name', validators=[DataRequired()]) imageurl = StringField('Image URL', validators=[DataRequired()]) about = TextAreaField('Contents', validators=[DataRequired()]) def validate(self): - if Form.validate(self): + if Form.validate(self): member = add_member(self.about.data, - self.name.data, self.imageurl.data) + self.name.data, self.imageurl.data) if member: return True else: diff --git a/api/meta.py b/api/meta.py index d88dfb1..37780be 100644 --- a/api/meta.py +++ b/api/meta.py @@ -1,8 +1,7 @@ from functools import wraps -from flask import jsonify, render_template, url_for, request, redirect +from flask import render_template, url_for, request, redirect from flask import send_from_directory from api import app, db, requires_auth, make_json_error -from api.scripts.stats import get_total_requests from api.forms import SignupForm, DocumentationForm, MemberForm from flask import Markup import markdown @@ -18,6 +17,7 @@ api_documentations = db['api_documentations'] members = db['members'] + @app.route('/favicon.ico') def favicon(): return send_from_directory(os.path.join(app.root_path, 'static'), @@ -27,8 +27,9 @@ def favicon(): @app.route('/') def root(): # num_requests = get_total_requests() - return render_template('home.html', - api_documentations=list(api_documentations.find().sort("_id",1))) + return render_template('home.html', + api_documentations=list(api_documentations.find().sort("_id", 1))) + @app.route('/signup', methods=['GET', 'POST']) def signup(): @@ -36,40 +37,46 @@ def signup(): if form.validate_on_submit(): return redirect(url_for('root', signedup='true')) return render_template('signup.html', form=form, active="signup", - api_documentations=list(api_documentations.find().sort("_id",1))) + api_documentations=list(api_documentations.find().sort("_id", 1))) + @app.route('/docs', methods=['GET']) def docs(): - return redirect('https://api.students.brown.edu/docs/getting-started') #TODO: Fix this part to use url_for + return redirect('https://api.students.brown.edu/docs/getting-started') # TODO: Fix this part to use url_for + @app.route('/docs/', methods=['GET']) def docs_for(docName="getting-started"): - api_documentation=api_documentations.find_one({'urlname': docName}) - name=api_documentation['name'] - contents=api_documentation['contents'] - contents=Markup(markdown.markdown(contents)) + api_documentation = api_documentations.find_one({'urlname': docName}) + name = api_documentation['name'] + contents = api_documentation['contents'] + contents = Markup(markdown.markdown(contents)) return render_template('documentation_template.html', - api_documentations=list(api_documentations.find().sort("_id",1)), - name=name, contents=contents, active="docs") + api_documentations=list(api_documentations.find().sort("_id", 1)), + name=name, contents=contents, active="docs") + @app.route('/about-us', methods=['GET']) def about_us(): return render_template('about-us.html', - api_documentations=list(api_documentations.find().sort("_id",1)), - active="about", members=members.find().sort("name",1)) + api_documentations=list(api_documentations.find().sort("_id", 1)), + active="about", members=members.find().sort("name", 1)) + @app.route('/github', methods=['GET']) def github(): return redirect('https://github.com/hackatbrown/apis') + @app.route('/contribute', methods=['GET']) def contribute(): # TODO: Get rid of inline documentation, this is very bad... - contents='The future of Brown APIs depends on you! All of our code is open source, and we rely heavily on contributions from the Brown community. You can view our code (along with open issues and future plans) [on Github](https://github.com/hackatbrown/apis).\r\n\r\n## How to Help\r\n\r\nThere are many ways to help further the development of Brown APIs. You can add new APIs, maintain and enhance current APIs, fix bugs, improve this website, or build better tools to help others contribute. Check the [issues](https://github.com/hackatbrown/apis/issues) on our Github for suggestions of what to do first. You don\'t need to be able to code to help either. Reach out to CIS and other university organizations to get easier and wider access to campus data.\r\n\r\n## General Development Information\r\n\r\nThe APIs are written in Python and run on a [Flask](http://flask.pocoo.org) server. This website is also served by the same server and uses [Jinja](http://jinja.pocoo.org) templates with the [Bootstrap](http://getbootstrap.com) framework.\r\n\r\nData is stored in a single [MongoDB](https://docs.mongodb.com/getting-started/python/introduction/) database hosted on [mLab.com](https://mlab.com/) (_Note: This was probably a bad decision that could really use some contributions to fix!_). Because there is only one copy of the database, developers must take care to avoid corrupting the data while testing fixes or new features.\r\n\r\n## Getting Started\r\n\r\nYou\'ll need the latest version of Python 3, along with `virtualenv` and `pip`. Go ahead and look up these programs if you aren\'t familiar with them. They\'re crucial to our development process.\r\n\r\n1. Clone this repository to your own machine:\r\n - `git clone https://github.com/hackatbrown/brown-apis.git`\r\n2. Open a terminal and navigate to the top level of the repository (_brown-apis/_).\r\n3. Create and activate a virtual environment (again, look up `virtualenv` online to understand what this does):\r\n - ``virtualenv -p `which python3` venv``\r\n - `source venv/bin/activate`\r\n4. Install all the required libraries in your virtual environment:\r\n - `pip install -r requirements.txt`\r\n5. Create a new branch for your changes. For example (while on the master branch):\r\n - `git checkout -b `\r\n6. Make any changes you want to make.\r\n7. Commit your changes, push them to `origin/`, and open a new pull request.\r\n8. To test your code, you may merge them into the `stage` branch. These changes will be automatically reflected on our [staging server](http://brown-apis-staging.herokuapp.com/). You can merge changes from the develop branch into the staging branch with:\r\n - `git checkout stage`\r\n - `git fetch origin`\r\n - `git reset --hard origin/master`\r\n - `git rebase `\r\n - `git push --force`\r\n - Note: This won\'t work if multiple developers are doing this at the same time.\r\n9. You\'re code will be merged into `master` once your pull request is accepted.\r\n\r\n#### How to Run Scripts\r\n\r\n1. Navigate to the top-level directory (_brown-apis/_).\r\n2. Run the script from a package environment, allowing it to import the database from the _api_ package:\r\n - `python3 -m api.scripts.` where \'scriptname\' does NOT include the \'.py\' extension.\r\n3. You can include any script arguments after the command (just like you normally would).\r\n\r\n## Data Structures\r\n\r\nWe use MongoDB to store various menus and schedules, as well as client information. In MongoDB, all objects are stored as JSON, and there is no schema that forces all objects in a collection to share the same fields. Thus, we keep documentation of the different collections here (and in the API overviews below) to encourage an implicit schema. Objects added to the database should follow these templates. If you add a new collection to the database, remember to add a template here, too.\r\n\r\n#### db.clients ####\r\n\r\n- *username*: <STRING>,\r\n- *client_email*: <STRING>,\r\n- *client_id*: <STRING>,\r\n- *valid*: <BOOLEAN>, **<-- can this client make requests?**\r\n- *joined*: <DATETIME>, **<-- when did this client register?**\r\n- *requests*: <INTEGER> **<-- total number of requests made by this client (not included until this client makes their first request)**\r\n- *activity*: **list of activity objects which take the form:**\r\n * _timestamp_: <DATETIME>, **<-- time of request**\r\n * _endpoint_: <STRING> **<-- endpoint of request**\r\n- **DEPRECATED:** *client_name*: <STRING> **<-- replaced with _username_**\r\n\r\n#### db.api_documentations ####\r\n- *urlname*: <STRING>\r\n- *name*: <STRING>\r\n- *contents*: <STRING>\r\n- *imageurl*: <IMAGE>\r\n\r\n\r\n## High Level API Overviews\r\n\r\n### Dining\r\n\r\nThe Dining API is updated every day by a scraper that parses the menus from Brown Dining Services\' website. The hours for each eatery are entered manually inside of the scraper script before each semester. When the scraper is run, all this data is stored in the database. Calls to the API trigger various queries to the database and fetch the scraped data.\r\n\r\n#### db.dining\_menus\r\n\r\n- *eatery*: <STRING>,\r\n- *year*: <INTEGER>,\r\n- *month*: <INTEGER>,\r\n- *day*: <INTEGER>,\r\n- *start_hour*: <INTEGER>, **<-- these four lines describe a menu\'s start/end times**\r\n- *start_minute*: <INTEGER>, \r\n- *end_hour*: <INTEGER>, \r\n- *end_minute*: <INTEGER>,\r\n- *meal*: <STRING>,\r\n- *food*: [ <STRING>, <STRING>, ... ] **<-- list of all food items on menu**\r\n- *<section>*: [ <STRING>, <STRING>, ... ], **<-- category (e.g. "Bistro") mapped to list of food items**\r\n- ... (there can be multiple sections per menu)\r\n\r\n#### db.dining\_hours\r\n\r\n- *eatery*: <STRING>,\r\n- *year*: <INTEGER>,\r\n- *month*: <INTEGER>,\r\n- *day*: <INTEGER>,\r\n- *open_hour*: <INTEGER>,\r\n- *open_minute*: <INTEGER>, \r\n- *close_hour*: <INTEGER>, \r\n- *close_minute*: <INTEGER>\r\n\r\n#### db.dining\_all\_foods\r\n\r\n- *eatery*: <STRING>,\r\n- *food*: [ <STRING>, <STRING>, ... ]\r\n\r\n### WiFi\r\n\r\nThe WiFi API just forwards requests to another API run by Brown CIS. Their API is protected by a password (HTTP Basic Auth) and is nearly identical to the WiFi API that we expose. The response from the CIS API is returned back to the client.\r\n\r\n### Laundry\r\n\r\nThe Laundry API is updated manually with a scraper that pulls all the laundry rooms and stores them in the database. When a request is received, the API checks the request against the list of rooms in the database and optionally retrieves status information by scraping the laundry website in realtime.\r\n\r\n#### db.laundry\r\n- *room*\r\n - *name*: <STRING>\r\n - *id*: <INT>\r\n - *machines*: list of objects with:\r\n - *id*: <INT>\r\n - *type*: <STRING> (one of `washFL`, `washNdry`, `dry`)\r\n\r\n### Academic\r\n\r\nThe Academic API used to scrape course information from Banner and store it in the database. Since Banner has been deprecated for course selection, the Academic API scraper has stopped working, and we are no longer able to collect course data. Thus, the Academic API is unavailable for the foreseeable future. Contributions are especially welcome here.' - contents=Markup(markdown.markdown(contents)) + contents = 'The future of Brown APIs depends on you! All of our code is open source, and we rely heavily on contributions from the Brown community. You can view our code (along with open issues and future plans) [on Github](https://github.com/hackatbrown/apis).\r\n\r\n## How to Help\r\n\r\nThere are many ways to help further the development of Brown APIs. You can add new APIs, maintain and enhance current APIs, fix bugs, improve this website, or build better tools to help others contribute. Check the [issues](https://github.com/hackatbrown/apis/issues) on our Github for suggestions of what to do first. You don\'t need to be able to code to help either. Reach out to CIS and other university organizations to get easier and wider access to campus data.\r\n\r\n## General Development Information\r\n\r\nThe APIs are written in Python and run on a [Flask](http://flask.pocoo.org) server. This website is also served by the same server and uses [Jinja](http://jinja.pocoo.org) templates with the [Bootstrap](http://getbootstrap.com) framework.\r\n\r\nData is stored in a single [MongoDB](https://docs.mongodb.com/getting-started/python/introduction/) database hosted on [mLab.com](https://mlab.com/) (_Note: This was probably a bad decision that could really use some contributions to fix!_). Because there is only one copy of the database, developers must take care to avoid corrupting the data while testing fixes or new features.\r\n\r\n## Getting Started\r\n\r\nYou\'ll need the latest version of Python 3, along with `virtualenv` and `pip`. Go ahead and look up these programs if you aren\'t familiar with them. They\'re crucial to our development process.\r\n\r\n1. Clone this repository to your own machine:\r\n - `git clone https://github.com/hackatbrown/brown-apis.git`\r\n2. Open a terminal and navigate to the top level of the repository (_brown-apis/_).\r\n3. Create and activate a virtual environment (again, look up `virtualenv` online to understand what this does):\r\n - ``virtualenv -p `which python3` venv``\r\n - `source venv/bin/activate`\r\n4. Install all the required libraries in your virtual environment:\r\n - `pip install -r requirements.txt`\r\n5. Create a new branch for your changes. For example (while on the master branch):\r\n - `git checkout -b `\r\n6. Make any changes you want to make.\r\n7. Commit your changes, push them to `origin/`, and open a new pull request.\r\n8. To test your code, you may merge them into the `stage` branch. These changes will be automatically reflected on our [staging server](http://brown-apis-staging.herokuapp.com/). You can merge changes from the develop branch into the staging branch with:\r\n - `git checkout stage`\r\n - `git fetch origin`\r\n - `git reset --hard origin/master`\r\n - `git rebase `\r\n - `git push --force`\r\n - Note: This won\'t work if multiple developers are doing this at the same time.\r\n9. You\'re code will be merged into `master` once your pull request is accepted.\r\n\r\n#### How to Run Scripts\r\n\r\n1. Navigate to the top-level directory (_brown-apis/_).\r\n2. Run the script from a package environment, allowing it to import the database from the _api_ package:\r\n - `python3 -m api.scripts.` where \'scriptname\' does NOT include the \'.py\' extension.\r\n3. You can include any script arguments after the command (just like you normally would).\r\n\r\n## Data Structures\r\n\r\nWe use MongoDB to store various menus and schedules, as well as client information. In MongoDB, all objects are stored as JSON, and there is no schema that forces all objects in a collection to share the same fields. Thus, we keep documentation of the different collections here (and in the API overviews below) to encourage an implicit schema. Objects added to the database should follow these templates. If you add a new collection to the database, remember to add a template here, too.\r\n\r\n#### db.clients ####\r\n\r\n- *username*: <STRING>,\r\n- *client_email*: <STRING>,\r\n- *client_id*: <STRING>,\r\n- *valid*: <BOOLEAN>, **<-- can this client make requests?**\r\n- *joined*: <DATETIME>, **<-- when did this client register?**\r\n- *requests*: <INTEGER> **<-- total number of requests made by this client (not included until this client makes their first request)**\r\n- *activity*: **list of activity objects which take the form:**\r\n * _timestamp_: <DATETIME>, **<-- time of request**\r\n * _endpoint_: <STRING> **<-- endpoint of request**\r\n- **DEPRECATED:** *client_name*: <STRING> **<-- replaced with _username_**\r\n\r\n#### db.api_documentations ####\r\n- *urlname*: <STRING>\r\n- *name*: <STRING>\r\n- *contents*: <STRING>\r\n- *imageurl*: <IMAGE>\r\n\r\n\r\n## High Level API Overviews\r\n\r\n### Dining\r\n\r\nThe Dining API is updated every day by a scraper that parses the menus from Brown Dining Services\' website. The hours for each eatery are entered manually inside of the scraper script before each semester. When the scraper is run, all this data is stored in the database. Calls to the API trigger various queries to the database and fetch the scraped data.\r\n\r\n#### db.dining\_menus\r\n\r\n- *eatery*: <STRING>,\r\n- *year*: <INTEGER>,\r\n- *month*: <INTEGER>,\r\n- *day*: <INTEGER>,\r\n- *start_hour*: <INTEGER>, **<-- these four lines describe a menu\'s start/end times**\r\n- *start_minute*: <INTEGER>, \r\n- *end_hour*: <INTEGER>, \r\n- *end_minute*: <INTEGER>,\r\n- *meal*: <STRING>,\r\n- *food*: [ <STRING>, <STRING>, ... ] **<-- list of all food items on menu**\r\n- *<section>*: [ <STRING>, <STRING>, ... ], **<-- category (e.g. "Bistro") mapped to list of food items**\r\n- ... (there can be multiple sections per menu)\r\n\r\n#### db.dining\_hours\r\n\r\n- *eatery*: <STRING>,\r\n- *year*: <INTEGER>,\r\n- *month*: <INTEGER>,\r\n- *day*: <INTEGER>,\r\n- *open_hour*: <INTEGER>,\r\n- *open_minute*: <INTEGER>, \r\n- *close_hour*: <INTEGER>, \r\n- *close_minute*: <INTEGER>\r\n\r\n#### db.dining\_all\_foods\r\n\r\n- *eatery*: <STRING>,\r\n- *food*: [ <STRING>, <STRING>, ... ]\r\n\r\n### WiFi\r\n\r\nThe WiFi API just forwards requests to another API run by Brown CIS. Their API is protected by a password (HTTP Basic Auth) and is nearly identical to the WiFi API that we expose. The response from the CIS API is returned back to the client.\r\n\r\n### Laundry\r\n\r\nThe Laundry API is updated manually with a scraper that pulls all the laundry rooms and stores them in the database. When a request is received, the API checks the request against the list of rooms in the database and optionally retrieves status information by scraping the laundry website in realtime.\r\n\r\n#### db.laundry\r\n- *room*\r\n - *name*: <STRING>\r\n - *id*: <INT>\r\n - *machines*: list of objects with:\r\n - *id*: <INT>\r\n - *type*: <STRING> (one of `washFL`, `washNdry`, `dry`)\r\n\r\n### Academic\r\n\r\nThe Academic API used to scrape course information from Banner and store it in the database. Since Banner has been deprecated for course selection, the Academic API scraper has stopped working, and we are no longer able to collect course data. Thus, the Academic API is unavailable for the foreseeable future. Contributions are especially welcome here.' + contents = Markup(markdown.markdown(contents)) return render_template('documentation_template.html', - api_documentations=list(api_documentations.find().sort("_id",1)), - name='How to Contribute', contents=contents) + api_documentations=list(api_documentations.find().sort("_id", 1)), + name='How to Contribute', contents=contents) + @app.route('/admin/add-documentation', methods=['GET', 'POST']) @requires_auth @@ -78,7 +85,8 @@ def add_documentation(): if form.validate_on_submit(): return redirect(url_for('root')) return render_template('add_documentation.html', form=form, - api_documentations=list(api_documentations.find().sort("_id",1))) + api_documentations=list(api_documentations.find().sort("_id", 1))) + @app.route('/admin/add-member', methods=['GET', 'POST']) @requires_auth @@ -87,7 +95,7 @@ def add_member(): if form.validate_on_submit(): return redirect(url_for('root')) return render_template('add_member.html', form=form, - api_documentations=list(api_documentations.find().sort("_id",1))) + api_documentations=list(api_documentations.find().sort("_id", 1))) # Static responses diff --git a/api/scripts/add_client.py b/api/scripts/add_client.py index 9a789da..40ab011 100644 --- a/api/scripts/add_client.py +++ b/api/scripts/add_client.py @@ -10,6 +10,7 @@ # simplify collection name clients = db.clients + def add_client_id(email, username, client_id=None): if email[-10:] != '@brown.edu': print("Invalid student email") @@ -22,12 +23,12 @@ def add_client_id(email, username, client_id=None): while clients.find_one({'client_id': client_id}): client_id = str(uuid4()) new_client = { - 'client_id': client_id, - 'username': username, - 'client_email': email, - 'joined': str(datetime.now()), - 'valid': True - } + 'client_id': client_id, + 'username': username, + 'client_email': email, + 'joined': str(datetime.now()), + 'valid': True + } clients.insert(new_client) return client_id @@ -38,7 +39,7 @@ def add_client_id(email, username, client_id=None): print("\tusername - Required. A user who owns this client (typically a first and last name, like 'Josiah Carberry').") print("\tclient_id - Optional. Provide a string representation of a UUID4 client ID.") exit() - + if len(argv) == 3: client_id = add_client_id(argv[1], argv[2]) if len(argv) == 4: diff --git a/api/scripts/add_documentation.py b/api/scripts/add_documentation.py index 126c4e7..691b458 100644 --- a/api/scripts/add_documentation.py +++ b/api/scripts/add_documentation.py @@ -2,12 +2,13 @@ api_documentations = db.api_documentations + def add_documentation(contents, name, urlname, imageurl): new_documentation = { - 'name': name, - 'urlname': urlname, - 'contents': contents, - 'imageurl': imageurl - } + 'name': name, + 'urlname': urlname, + 'contents': contents, + 'imageurl': imageurl + } api_documentations.insert(new_documentation) return True diff --git a/api/scripts/add_member.py b/api/scripts/add_member.py index f25e973..df60550 100644 --- a/api/scripts/add_member.py +++ b/api/scripts/add_member.py @@ -2,11 +2,12 @@ api_members = db.members + def add_member(about, name, imageurl): new_member = { - 'name': name, - 'about': about, - 'image_url': imageurl - } + 'name': name, + 'about': about, + 'image_url': imageurl + } api_members.insert(new_member) return True diff --git a/api/scripts/disable_client.py b/api/scripts/disable_client.py index 24afd14..0893137 100644 --- a/api/scripts/disable_client.py +++ b/api/scripts/disable_client.py @@ -1,9 +1,9 @@ from sys import argv -from api import db, meta +from api import meta if __name__ == '__main__': - if len(argv) != 2: - print("Usage: python -m api.scripts.disable_client ") - else: - print(meta.invalidate_client(argv[1])) \ No newline at end of file + if len(argv) != 2: + print("Usage: python -m api.scripts.disable_client ") + else: + print(meta.invalidate_client(argv[1])) diff --git a/api/scripts/eateries.py b/api/scripts/eateries.py index 3c8a594..afe4693 100644 --- a/api/scripts/eateries.py +++ b/api/scripts/eateries.py @@ -2,7 +2,6 @@ from urllib.parse import unquote from bs4 import BeautifulSoup as soup from datetime import date, timedelta -import time from api import db # simplify database names @@ -10,8 +9,8 @@ hours = db.dining_hours all_foods = db.dining_all_foods -class Eatery: +class Eatery: # a set of meal names to ignore, because dining services will often put items that aren't meals in the spreadsheets food_ignore_list = set(["closed for breakfast", "closed for breakfat", @@ -82,16 +81,17 @@ def add_menu_to_db(self, year, month, day, meal, food, section_dict={}): Return True if successful, otherwise False ''' # create separate menu docs to query and update the DB - menu_query = {'eatery': self.name, - 'year': year, - 'month': month, - 'day': day, - 'start_hour': self.mealtimes[meal]['start']['hour'], - 'start_minute': self.mealtimes[meal]['start']['minute'], - 'end_hour': self.mealtimes[meal]['end']['hour'], - 'end_minute': self.mealtimes[meal]['end']['minute'], - 'meal': meal - } + menu_query = { + 'eatery': self.name, + 'year': year, + 'month': month, + 'day': day, + 'start_hour': self.mealtimes[meal]['start']['hour'], + 'start_minute': self.mealtimes[meal]['start']['minute'], + 'end_hour': self.mealtimes[meal]['end']['hour'], + 'end_minute': self.mealtimes[meal]['end']['minute'], + 'meal': meal + } menu_full = {'food': food} menu_full.update(section_dict) menu_full.update(menu_query) @@ -101,7 +101,7 @@ def update_all_foods_in_db(self, food): ''' Update the eatery's food list in the all_foods collection Return True if successful, otherwise False ''' - return True if all_foods.update({'eatery':self.name}, {'$addToSet' : {'food': {'$each': food}}}, upsert=True) else False + return True if all_foods.update({'eatery': self.name}, {'$addToSet': {'food': {'$each': food}}}, upsert=True) else False def scrape_hours(self): ''' Scrape hours for this eatery @@ -114,39 +114,39 @@ def add_hours_to_db(self, year, month, day, open_time, close_time): 'open' and 'close' are tuples (, ) Return the ObjectID of the hours document in the database ''' - hours_doc = {'eatery': self.name, - 'year': year, - 'month': month, - 'day': day, - 'open_hour': open_time[0], - 'open_minute': open_time[1], - 'close_hour': close_time[0], - 'close_minute': close_time[1] - } + hours_doc = { + 'eatery': self.name, + 'year': year, + 'month': month, + 'day': day, + 'open_hour': open_time[0], + 'open_minute': open_time[1], + 'close_hour': close_time[0], + 'close_minute': close_time[1] + } return True if hours.update(hours_doc, hours_doc, upsert=True) else False - class Ratty(Eatery): - def __init__(self): self.name = 'ratty' self.eatery_page = "refectory.php" - self.mealtimes = {'breakfast': {'start': {'hour':7, 'minute':30}, - 'end': {'hour':11, 'minute':00}}, - 'brunch': {'start': {'hour':10, 'minute':30}, - 'end': {'hour':16, 'minute':00}}, - 'lunch': {'start': {'hour':11, 'minute':00}, - 'end': {'hour':16, 'minute':00}}, - 'dinner': {'start': {'hour':16, 'minute':00}, - 'end': {'hour':19, 'minute':30}} - } + self.mealtimes = { + 'breakfast': {'start': {'hour': 7, 'minute': 30}, + 'end': {'hour': 11, 'minute': 00}}, + 'brunch': {'start': {'hour': 10, 'minute': 30}, + 'end': {'hour': 16, 'minute': 00}}, + 'lunch': {'start': {'hour': 11, 'minute': 00}, + 'end': {'hour': 16, 'minute': 00}}, + 'dinner': {'start': {'hour': 16, 'minute': 00}, + 'end': {'hour': 19, 'minute': 30}} + } def find_available_days_and_meals(self): ''' see description in superclass (Eatery) ''' html = get_html(self.base_url + self.eatery_page) parsed = soup(html, 'html5lib') - table = parsed.find('table', {'class':'lines'}) + table = parsed.find('table', {'class': 'lines'}) rows = table.find_all('tr')[1:] days = [str(unquote(r.find_all('td')[1].text)).lower() for r in rows] days_meals = {} @@ -170,7 +170,7 @@ def scrape_menus(self): for day in ordered_days: for meal in days_meals[day]: import time - time.sleep(1) # delays for 1 second (BDS rate limits their site) + time.sleep(1) # delays for 1 second (BDS rate limits their site) try: print(meal, "for", day, menu_date, "->", self.scrape_menu(menu_date, day, meal)) num_menus += 1 @@ -190,39 +190,39 @@ def scrape_menu(self, menu_date, day, meal): # start at the main page for the Ratty main_html = get_html(self.base_url + self.eatery_page) main_parsed = soup(main_html, 'html5lib') # the Ratty website has errors, so the default parser fails -> use html5lib instead - + # Navigate to the specified day - menus_url = main_parsed.find('table', {'class':'lines'}).find('a', text=day.title())['href'] + menus_url = main_parsed.find('table', {'class': 'lines'}).find('a', text=day.title())['href'] menus_html = get_html(self.base_url + menus_url) menus_parsed = soup(menus_html, 'html5lib') # Convert 'brunch' to 'lunch' in order to navigate the HTML correctly meal_query = 'lunch' if meal == 'brunch' else meal - + # Get the table for the specified meal - meal_url = menus_parsed.find('iframe', {'id':meal_query.title()})['src'] + meal_url = menus_parsed.find('iframe', {'id': meal_query.title()})['src'] meal_html = get_html(meal_url) meal_parsed = soup(meal_html, 'html5lib') - # Scrape the table into a dict of sections (Chef's Corner, Bistro, etc) - table = meal_parsed.find('table', {'id':'tblMain'}) - if table == None: - table = meal_parsed.find('table', {'class':'waffle'}) + # Scrape the table into a dict of sections (Chef's Corner, Bistro, etc) + table = meal_parsed.find('table', {'id': 'tblMain'}) + if table is None: + table = meal_parsed.find('table', {'class': 'waffle'}) rows = table.find_all('tr')[1:] cols = [unquote(col.text).lower() for col in rows[0].find_all('td')] - data = {col:[] for col in cols} + data = {col: [] for col in cols} for row in rows[1:-1]: row_cols = row.find_all('td') for ix, c in enumerate(row_cols): if c.text and not c.text.lower().strip() in self.food_ignore_list: data[cols[ix]].append(c.text.lower().strip()) - data['daily sidebars'] = [col.text.lower().strip() for col in rows[-1].findAll('td') \ - if col.text and col.text.lower().strip() not in self.food_ignore_list] + data['daily sidebars'] = [col.text.lower().strip() for col in rows[-1].findAll('td') + if col.text and col.text.lower().strip() not in self.food_ignore_list] # For now, convert the dict into a single list of food items before adding to the DB - flat_data = [d for d in flatten(data) if not d in self.food_ignore_list] + flat_data = [d for d in flatten(data) if d not in self.food_ignore_list] return self.add_menu_to_db(menu_date.year, menu_date.month, menu_date.day, meal, flat_data, data) and \ - self.update_all_foods_in_db(flat_data) + self.update_all_foods_in_db(flat_data) def scrape_hours(self): ''' see description in superclass (Eatery) ''' @@ -233,7 +233,7 @@ def scrape_hours(self): if today > date(2016, 5, 31): print("ERROR: hours scraper is out of date") return num_hours - while today < date(2016, 5,31): + while today < date(2016, 5, 31): if today.month == 3 and today.day >= 26: # spring break pass @@ -264,24 +264,25 @@ class VDub(Eatery): def __init__(self): self.name = 'vdub' self.eatery_page = "verneywoolley_menu.php" - self.mealtimes = {'breakfast': {'start': {'hour':7, 'minute':30}, - 'end': {'hour':9, 'minute':30}}, - 'continental breakfast': {'start': {'hour':9, 'minute':30}, - 'end': {'hour':11, 'minute':00}}, - 'lunch': {'start': {'hour':11, 'minute':00}, - 'end': {'hour':14, 'minute':00}}, - 'dinner': {'start': {'hour':16, 'minute':30}, - 'end': {'hour':19, 'minute':30}} - } + self.mealtimes = { + 'breakfast': {'start': {'hour': 7, 'minute': 30}, + 'end': {'hour': 9, 'minute': 30}}, + 'continental breakfast': {'start': {'hour': 9, 'minute': 30}, + 'end': {'hour': 11, 'minute': 00}}, + 'lunch': {'start': {'hour': 11, 'minute': 00}, + 'end': {'hour': 14, 'minute': 00}}, + 'dinner': {'start': {'hour': 16, 'minute': 30}, + 'end': {'hour': 19, 'minute': 30}} + } def find_available_days_and_meals(self): ''' see description in superclass (Eatery) ''' html = get_html(self.base_url + self.eatery_page) parsed_html = soup(html, 'html5lib') - + # search for all 'h4' tags on the page -- the VDub staff uses these as headers for the days available available_days = [h4.text.split()[0].lower() for h4 in parsed_html.find_all("h4")] - + # the VDub serves breakfast, continental breakfast, lunch, and dinner every weekday days_meals = {} for day in available_days: @@ -305,7 +306,7 @@ def scrape_menus(self): for n, day in enumerate(ordered_days): # menu_ids is a list [('breakfast', _id), ('continental breakfast', _id), ...] import time - time.sleep(1) # delays for 1 second (BDS rate limits their site) + time.sleep(1) # delays for 1 second (BDS rate limits their site) try: menu_ids = self.scrape_menu(menu_date, day, days_meals[day], n) for menu_id in menu_ids: @@ -323,10 +324,10 @@ def scrape_menus(self): return num_menus def scrape_menu(self, menu_date, day, meals, nth_day): - ''' see description in superclass (Eatery) + ''' see description in superclass (Eatery) nth_day - allows scraper to determine which table to use on VDub website ''' - # start at the main page for the VDub + # start at the main page for the VDub main_html = get_html(self.base_url + self.eatery_page) main_parsed = soup(main_html, 'html5lib') # the VDub website has errors, so the default parser fails -> use html5lib instead @@ -339,18 +340,18 @@ def scrape_menu(self, menu_date, day, meals, nth_day): meal_parsed = soup(menu_html, 'html5lib') # scrape the table into a dict of sections (Chef's Corner, Bistro, etc) - table = meal_parsed.find('table', {'id':'tblMain'}) - if table == None: - table = meal_parsed.find('table', {'class':'waffle'}) + table = meal_parsed.find('table', {'id': 'tblMain'}) + if table is None: + table = meal_parsed.find('table', {'class': 'waffle'}) rows = table.find_all('tr')[1:] cols = [unquote(col.text).lower() for col in rows[0].find_all('td')] - data = {col:[] for col in cols} + data = {col: [] for col in cols} for row in rows[1:-1]: row_cols = row.find_all('td') for ix, c in enumerate(row_cols): if c.text: data[cols[ix]].append(c.text.lower().strip()) - + # continental breakfast doesn't have its own menu -- copy breakfast data data['continental breakfast'] = data['breakfast'] @@ -359,17 +360,16 @@ def scrape_menu(self, menu_date, day, meals, nth_day): # add each meal's menu to DB for meal in meals: section_dict = {} - section_dict['main menu'] = [d for d in data[meal] if not d in self.food_ignore_list] - section_dict['daily sidebars'] = [d for d in data['daily sidebars'] if not d in self.food_ignore_list] - flat_data = list({d.lower().strip() for d in data[meal] if not d in self.food_ignore_list for ds in data['daily sidebars'] \ - if not ds in self.food_ignore_list}) + section_dict['main menu'] = [d for d in data[meal] if d not in self.food_ignore_list] + section_dict['daily sidebars'] = [d for d in data['daily sidebars'] if d not in self.food_ignore_list] + flat_data = list({d.lower().strip() for d in data[meal] if d not in self.food_ignore_list for ds in data['daily sidebars'] + if ds not in self.food_ignore_list}) res_add = self.add_menu_to_db(menu_date.year, menu_date.month, menu_date.day, meal, flat_data, section_dict) res_update = self.update_all_foods_in_db(flat_data) results.append((meal, res_add and res_update)) return results - def scrape_hours(self): ''' see description in superclass (Eatery) ''' # this scraper is only valid through Spring 2015 @@ -402,14 +402,13 @@ def scrape_hours(self): return num_hours - class Jos(Eatery): def __init__(self): self.name = "jos" self.eatery_page = "josiahs.php" - self.mealtimes = {'dinner': {'start': {'hour':18, 'minute':00}, - 'end': {'hour':26, 'minute':00}}} + self.mealtimes = {'dinner': {'start': {'hour': 18, 'minute': 00}, + 'end': {'hour': 26, 'minute': 00}}} def find_available_days_and_meals(self): return None @@ -418,22 +417,20 @@ def scrape_menu(self, menu_date, day, meal): return None def scrape_hours(self): - #TODO: Implement Jos scraping + # TODO: Implement Jos scraping return None - - # Helper methods + def get_html(url): ''' The HTML data for a given URL ''' return requests.get(url).text + def flatten(dct): ''' Flatten a dictionary's values into a list ''' result = [] for val in dct.values(): result += val return list(set(result)) - - diff --git a/api/scripts/email_handler.py b/api/scripts/email_handler.py index b1281a0..9631f31 100644 --- a/api/scripts/email_handler.py +++ b/api/scripts/email_handler.py @@ -7,118 +7,120 @@ URGENT_RECEPIENT = "7172159174@vtext.com" ALERT_RECEPIENT = "joseph_engelman@brown.edu" + def send_id_email(address, name, client_id): - # me == my email address - # you == recipient's email address - me = os.environ['GMAIL_USER'] - - # Create message container - the correct MIME type is multipart/alternative. - msg = MIMEMultipart('alternative') - msg['Subject'] = "Your Brown APIs Client ID" - msg['From'] = me - msg['To'] = address - - # Create the body of the message (a plain-text and an HTML version). - text = "Hi, " + name + "! Welcome to the Brown APIs developer community.\nYour Client ID is: " + client_id + ". Be sure to include it in every request!) You can use this Client ID for multiple projects. There is currently a maximum of one Client ID per student, but exceptions can be made on a case-by-case basis.\n\nBrown APIs are currently in beta. This means functionality may be added, removed, or modified at any time (however, this is very rare). To keep up-to-date on any changes, be sure to join the developer community on Facebook: https://www.facebook.com/groups/brown.apis/\n\nHappy developing!\nThe Brown APIs Team\n" - html = """\ - - - - - -

Hi, """ + name + """! Welcome to the Brown APIs developer community.

-

Your Client ID is: """ + client_id + """.

-

Be sure to include your Client ID with every request you make! You can use this Client ID for multiple projects. Currently, there is a maximum of one Client ID per student, but exceptions can be made on a case-by-case basis.

-

Brown APIs are currently in beta. This means functionality may be added, removed, or modified at any time (however, this is very rare). To keep up-to-date on any changes, be sure to join our community of developers on Facebook.

-
- Happy developing!
- The Brown APIs Team - - - """ - - # Record the MIME types of both parts - text/plain and text/html. - part1 = MIMEText(text, 'plain') - part2 = MIMEText(html, 'html') - - # Attach parts into message container. - # According to RFC 2046, the last part of a multipart message, in this case - # the HTML message, is best and preferred. - msg.attach(part1) - msg.attach(part2) - - s = smtplib.SMTP("smtp.gmail.com", 587) - s.starttls() - s.login(os.environ['GMAIL_USER'], os.environ['GMAIL_PASS']) - - # sendmail function takes 3 arguments: sender's address, recipient's address - # and message to send - here it is sent as one string. - print(s.sendmail(me, address, msg.as_string())) - s.quit() + # me == my email address + # you == recipient's email address + me = os.environ['GMAIL_USER'] + + # Create message container - the correct MIME type is multipart/alternative. + msg = MIMEMultipart('alternative') + msg['Subject'] = "Your Brown APIs Client ID" + msg['From'] = me + msg['To'] = address + + # Create the body of the message (a plain-text and an HTML version). + text = "Hi, " + name + "! Welcome to the Brown APIs developer community.\nYour Client ID is: " + client_id + ". Be sure to include it in every request!) You can use this Client ID for multiple projects. There is currently a maximum of one Client ID per student, but exceptions can be made on a case-by-case basis.\n\nBrown APIs are currently in beta. This means functionality may be added, removed, or modified at any time (however, this is very rare). To keep up-to-date on any changes, be sure to join the developer community on Facebook: https://www.facebook.com/groups/brown.apis/\n\nHappy developing!\nThe Brown APIs Team\n" + html = """\ + + + + + +

Hi, """ + name + """! Welcome to the Brown APIs developer community.

+

Your Client ID is: """ + client_id + """.

+

Be sure to include your Client ID with every request you make! You can use this Client ID for multiple projects. Currently, there is a maximum of one Client ID per student, but exceptions can be made on a case-by-case basis.

+

Brown APIs are currently in beta. This means functionality may be added, removed, or modified at any time (however, this is very rare). To keep up-to-date on any changes, be sure to join our community of developers on Facebook.

+
+ Happy developing!
+ The Brown APIs Team + + + """ + + # Record the MIME types of both parts - text/plain and text/html. + part1 = MIMEText(text, 'plain') + part2 = MIMEText(html, 'html') + + # Attach parts into message container. + # According to RFC 2046, the last part of a multipart message, in this case + # the HTML message, is best and preferred. + msg.attach(part1) + msg.attach(part2) + + s = smtplib.SMTP("smtp.gmail.com", 587) + s.starttls() + s.login(os.environ['GMAIL_USER'], os.environ['GMAIL_PASS']) + + # sendmail function takes 3 arguments: sender's address, recipient's address + # and message to send - here it is sent as one string. + print(s.sendmail(me, address, msg.as_string())) + s.quit() # Example usage of the above method: # send_id_email('7172159174@vtext.com', 'Joe', 'your-client-id-here') + def send_alert_email(message, urgent=False): - # me == my email address - me = os.environ['GMAIL_USER'] - # recepient's email address depends on how urgent the alert is - recepient = URGENT_RECEPIENT if urgent else ALERT_RECEPIENT - - # Create message container - the correct MIME type is multipart/alternative. - msg = MIMEMultipart('alternative') - msg['Subject'] = "Brown APIs - Urgent Alert!" if urgent else "Brown APIs - System Error" - msg['From'] = me - msg['To'] = recepient - - # Create the body of the message (a plain-text and an HTML version). - text = message - html = """\ - - - - - -

The following message was generated moments ago on the Brown APIs server:

-

""" + message + """

- - - """ - - # Record the MIME types of both parts - text/plain and text/html. - part1 = MIMEText(text, 'plain') - part2 = MIMEText(html, 'html') - - # Attach parts into message container. - # According to RFC 2046, the last part of a multipart message, in this case - # the HTML message, is best and preferred. - msg.attach(part1) - msg.attach(part2) - - s = smtplib.SMTP("smtp.gmail.com", 587) - s.starttls() - s.login(os.environ['GMAIL_USER'], os.environ['GMAIL_PASS']) - - # sendmail function takes 3 arguments: sender's address, recipient's address - # and message to send - here it is sent as one string. - print(s.sendmail(me, recepient, msg.as_string())) - s.quit() + # me == my email address + me = os.environ['GMAIL_USER'] + # recepient's email address depends on how urgent the alert is + recepient = URGENT_RECEPIENT if urgent else ALERT_RECEPIENT + + # Create message container - the correct MIME type is multipart/alternative. + msg = MIMEMultipart('alternative') + msg['Subject'] = "Brown APIs - Urgent Alert!" if urgent else "Brown APIs - System Error" + msg['From'] = me + msg['To'] = recepient + + # Create the body of the message (a plain-text and an HTML version). + text = message + html = """\ + + + + + +

The following message was generated moments ago on the Brown APIs server:

+

""" + message + """

+ + + """ + + # Record the MIME types of both parts - text/plain and text/html. + part1 = MIMEText(text, 'plain') + part2 = MIMEText(html, 'html') + + # Attach parts into message container. + # According to RFC 2046, the last part of a multipart message, in this case + # the HTML message, is best and preferred. + msg.attach(part1) + msg.attach(part2) + + s = smtplib.SMTP("smtp.gmail.com", 587) + s.starttls() + s.login(os.environ['GMAIL_USER'], os.environ['GMAIL_PASS']) + + # sendmail function takes 3 arguments: sender's address, recipient's address + # and message to send - here it is sent as one string. + print(s.sendmail(me, recepient, msg.as_string())) + s.quit() # Example usage of the above method: # send_alert_email("Nah, jk. This is just an example alert.") diff --git a/api/scripts/enable_client.py b/api/scripts/enable_client.py index fa39a97..373f70a 100644 --- a/api/scripts/enable_client.py +++ b/api/scripts/enable_client.py @@ -1,9 +1,9 @@ from sys import argv -from api import db, meta +from api import meta if __name__ == '__main__': - if len(argv) != 2: - print("Usage: python -m api.scripts.enable_client ") - else: - print(meta.validate_client(argv[1])) \ No newline at end of file + if len(argv) != 2: + print("Usage: python -m api.scripts.enable_client ") + else: + print(meta.validate_client(argv[1])) diff --git a/api/scripts/selfservice_scraper.py b/api/scripts/selfservice_scraper.py index 36c0a38..4f7a06d 100644 --- a/api/scripts/selfservice_scraper.py +++ b/api/scripts/selfservice_scraper.py @@ -106,7 +106,7 @@ def generate_semesters(n): semesters = [] semesters.append(gen_current_semester()) for i in range(1, n): - semesters.append(gen_next_semester(semesters[i-1])) + semesters.append(gen_next_semester(semesters[i - 1])) return semesters Semesters = generate_semesters(3) @@ -698,10 +698,10 @@ def main(): worker.start() for semester in Semesters: - print("Scraping: "+semester + "...", end="", flush=True, + print("Scraping: " + semester + "...", end="", flush=True, file=sys.stderr) if args.to_files is not None: - os.makedirs(path+semester, exist_ok=True) + os.makedirs(path + semester, exist_ok=True) for department in Departments: for course in gen_courses(s, semester, department): queue.put((path, semester, department, course)) diff --git a/api/scripts/stats.py b/api/scripts/stats.py index f23b653..97dc0d2 100644 --- a/api/scripts/stats.py +++ b/api/scripts/stats.py @@ -7,6 +7,7 @@ endpoints = set() + def get_request_stats(): all_clients = clients.find() request_stats = {} @@ -23,23 +24,25 @@ def get_request_stats(): request_stats[c['username']] = client return request_stats + def get_total_requests(): stats = get_request_stats() - client_count = {c:0 for c in stats.keys()} + client_count = {c: 0 for c in stats.keys()} for client in stats.keys(): req = stats[client].get('count', 0) client_count[client] += req return sum(client_count.values()) + if __name__ == '__main__': stats = get_request_stats() - client_count = {c:0 for c in stats.keys()} + client_count = {c: 0 for c in stats.keys()} for client in stats.keys(): req = stats[client].get('count', 0) client_count[client] += req total_requests = sum(client_count.values()) - endpoint_count = {e:0 for e in endpoints} + endpoint_count = {e: 0 for e in endpoints} for endpoint in endpoints: for client in stats.keys(): req = stats[client].get(endpoint, 0) @@ -57,4 +60,4 @@ def get_total_requests(): print('{0:<25} {1:>6}'.format(endpoint, endpoint_count[endpoint])) print() - print("TOTAL REQUESTS:", total_requests) \ No newline at end of file + print("TOTAL REQUESTS:", total_requests) diff --git a/api/scripts/util/logger.py b/api/scripts/util/logger.py index d81bf03..aea2fac 100644 --- a/api/scripts/util/logger.py +++ b/api/scripts/util/logger.py @@ -2,7 +2,7 @@ import sys _colors = { - 'INFO': '\033[94m', + 'INFO': '\033[94m', 'OK': '\033[92m', 'WARN': '\033[1m\033[93m', 'ERR': '\033[1m\033[91m', diff --git a/api/scripts/wifi_counter.py b/api/scripts/wifi_counter.py index 97b6044..a031330 100644 --- a/api/scripts/wifi_counter.py +++ b/api/scripts/wifi_counter.py @@ -2,14 +2,16 @@ import json location_names = {'Andrews': 'andrews', - 'Jo\'s': 'littlejo', - 'the Ratty': 'ratty', - 'the VDub': 'vdubs', - 'the Blue Room': 'blueroom'} + 'Jo\'s': 'littlejo', + 'the Ratty': 'ratty', + 'the VDub': 'vdubs', + 'the Blue Room': 'blueroom'} + def get_count(location): - response = json.loads(requests.get("https://i2s.brown.edu/wap/apis/localities/" + location + "/devices").content) - return response['count'] + response = json.loads(requests.get("https://i2s.brown.edu/wap/apis/localities/" + location + "/devices").content) + return response['count'] + for location in location_names: - print("There are", get_count(location_names[location]), "people at", location + ".") \ No newline at end of file + print("There are", get_count(location_names[location]), "people at", location + ".") diff --git a/api/student.py b/api/student.py index b437c95..1a9baa0 100644 --- a/api/student.py +++ b/api/student.py @@ -1,60 +1,56 @@ -from flask import request, jsonify -from api import app, db -from datetime import datetime -from difflib import get_close_matches +from api import app ''' TODO: Look into using Active Directory / LDAP for authentication. - Mongo Applications: http://docs.mongodb.org/manual/tutorial/configure-ldap-sasl-activedirectory/ - Brown's Active Dir: http://www.brown.edu/information-technology/services/active-directory + Mongo Applications: http://docs.mongodb.org/manual/tutorial/configure-ldap-sasl-activedirectory/ + Brown's Active Dir: http://www.brown.edu/information-technology/services/active-directory TODO: Establish this document model in the database / debate this model db.student { - 'username' : str, || the username of the user - 'password' : str, || the password of the user - 'email' : str || the email - 'last_login' : date, || the date of the last login - 'login_attempts': int, || the number of login attempts since the last succesfull login - *If login attempts goes above 5, lock the account and automatically - issue an email to the supplied email address - - *Possible Implementations: - - Run a service every n- minutes to subtract one from the login_attempts - - Consider an implementation where the client can only rerequest after a certain amount of time. - - This would involve keeping the current time, the last failed login time, and the login penalty. - - 'token' : str || the current valid login token - *This is the user's key to the world - 'expiration' : date || the expiration date of the token. Help keep things fresh. + 'username' : str, || the username of the user + 'password' : str, || the password of the user + 'email' : str || the email + 'last_login' : date, || the date of the last login + 'login_attempts': int, || the number of login attempts since the last succesfull login + *If login attempts goes above 5, lock the account and automatically + issue an email to the supplied email address + + *Possible Implementations: + - Run a service every n- minutes to subtract one from the login_attempts + - Consider an implementation where the client can only rerequest after a certain amount of time. + - This would involve keeping the current time, the last failed login time, and the login penalty. + + 'token' : str || the current valid login token + *This is the user's key to the world + 'expiration' : date || the expiration date of the token. Help keep things fresh. } - ''' - @app.route('/student/login') def login(L): - '''Allows a user to receive a token in exchange for a valid password / username combination.''' - #TODO: Implement login - Dict = {} - count = 0 - idnum = 1000000 - while len(L) != 0: - Dict.append(L[count], idnum) - count++ - idnum++ - return Dict + '''Allows a user to receive a token in exchange for a valid password / username combination.''' + # TODO: Implement login + Dict = {} + count = 0 + idnum = 1000000 + while len(L) != 0: + Dict.append(L[count], idnum) + count += 1 + idnum += 1 + return Dict + @app.route('/student/balance') def balance(): - '''Allows a user to view their various balances.''' - #TODO: Implement balance retrieval - return - -@app.route('/student/transactions') - '''Allows a user to view their transaction history''' - #TODO: Consider a time + '''Allows a user to view their various balances.''' + # TODO: Implement balance retrieval + return +@app.route('/student/transactions') +def transactions(): + '''Allows a user to view their transaction history''' + # TODO: Consider a time diff --git a/docs/example-script.py b/docs/example-script.py index 56d3411..5e073c2 100644 --- a/docs/example-script.py +++ b/docs/example-script.py @@ -2,6 +2,7 @@ import json import collections + class Client: ''' A client for interacting with Brown resources ''' @@ -39,7 +40,12 @@ def get(self, endpoint, **kwargs): return convert(json.loads(requests.get(url, params=options).text)) + def convert(data): + try: + basestring + except NameError: + basestring = str if isinstance(data, basestring): return str(data) elif isinstance(data, collections.Mapping): @@ -51,10 +57,7 @@ def convert(data): if __name__ == '__main__': c = Client(client_id='test_client') - print "Welcome to the demo! This uses the /dining/menu endpoint to find current menus." + print("Welcome to the demo! This uses the /dining/menu endpoint to find current menus.") while True: - eatery = raw_input("Eatery (or 'exit'): ") - print c.get('/dining/menu', eatery=eatery) - - - + eatery = input("Eatery (or 'exit'): ") + print(c.get('/dining/menu', eatery=eatery)) diff --git a/tests/test_courses.py b/tests/test_courses.py index 9d3ae29..ce88804 100644 --- a/tests/test_courses.py +++ b/tests/test_courses.py @@ -1,6 +1,5 @@ import unittest import requests -import sys base = "http://localhost:5000/academic" @@ -10,7 +9,7 @@ class TestNonConflictingEndpoint(unittest.TestCase): def test_non_conflicting(self): # Test for time collisions mycourse = 'CSCI1670-S01' - r = requests.get(base+"/courses/"+mycourse) + r = requests.get(base + "/courses/" + mycourse) data = r.json() mymeetings = data['meeting'] @@ -30,7 +29,7 @@ def check_course(course): return True check_pages(self, check_course, requests.get( - base+"/non-conflicting", params={'numbers': mycourse})) + base + "/non-conflicting", params={'numbers': mycourse})) class TestPagination(unittest.TestCase): @@ -38,7 +37,7 @@ class TestPagination(unittest.TestCase): def test_limit_constraint(self): ''' Makes sure we don't go over limit ''' value = 4 - r = requests.get(base+"/departments/CSCI", params={"limit": value}) + r = requests.get(base + "/departments/CSCI", params={"limit": value}) data = r.json() while True: self.assertTrue(data['limit'] == value) @@ -53,7 +52,7 @@ def test_item_uniqueness(self): ''' count = 0 seen = set() - r = requests.get(base+"/courses") + r = requests.get(base + "/courses") data = r.json() while True: count += len(data['items']) @@ -80,7 +79,7 @@ def check_course(item): return True return False - check_pages(self, check_course, requests.get(base+"/courses")) + check_pages(self, check_course, requests.get(base + "/courses")) def test_alternative_semester(self): @@ -91,7 +90,7 @@ def check_course(item): return False check_pages(self, check_course, requests.get( - base+'/courses', + base + '/courses', params={"semester": "Summer 2016"})) diff --git a/wrappers/brown-python/brown/client.py b/wrappers/brown-python/brown/client.py index be8b0af..2732a26 100644 --- a/wrappers/brown-python/brown/client.py +++ b/wrappers/brown-python/brown/client.py @@ -2,6 +2,7 @@ import json import collections + class Client: ''' A client for interacting with Brown resources ''' @@ -39,7 +40,12 @@ def get(self, endpoint, **kwargs): return convert(json.loads(requests.get(url, params=options).text)) + def convert(data): + try: + basestring + except NameError: + basestring = str if isinstance(data, basestring): return str(data) elif isinstance(data, collections.Mapping): @@ -51,10 +57,7 @@ def convert(data): if __name__ == '__main__': c = Client(client_id='test_client') - print "Welcome to the demo! This uses the /dining/menu endpoint to find current menus." + print("Welcome to the demo! This uses the /dining/menu endpoint to find current menus.") while True: - eatery = raw_input("Eatery (or 'exit'): ") - print c.get('/dining/menu', eatery=eatery) - - - + eatery = input("Eatery (or 'exit'): ") + print(c.get('/dining/menu', eatery=eatery))