diff --git a/ada-project-docs/testing.md b/ada-project-docs/testing.md index 2222d97a0..cf89322a6 100644 --- a/ada-project-docs/testing.md +++ b/ada-project-docs/testing.md @@ -4,7 +4,7 @@ We will need to complete part – or all – of some of the tests for this proje You may wish to review details about how to run tests [here](https://github.com/AdaGold/viewing-party#details-about-how-to-run-tests). -Recall that it is always a good idea to search the file for any `@pytest.mark.skip` decorators you may have missed before moving to the next wave. +Recall that it is always a good idea to search the file for any `# @pytest.mark.skip` decorators you may have missed before moving to the next wave. ### Code Coverage diff --git a/ada-project-docs/wave_05.md b/ada-project-docs/wave_05.md index f8893cea3..6d4f84b91 100644 --- a/ada-project-docs/wave_05.md +++ b/ada-project-docs/wave_05.md @@ -12,7 +12,7 @@ Our plan for this wave is to be able to create, read, update, and delete differe This wave requires more test writing. The tests you need to write are scaffolded in the `test_wave_05.py` file. - As with incomplete tests in other waves, you should comment out the `Exception` when implementing a test. -- These tests are currently skipped with `@pytest.mark.skip(reason="test to be completed by student")` and the function body has `pass` in it. +- These tests are currently skipped with `# @pytest.mark.skip(reason="test to be completed by student")` and the function body has `pass` in it. - Once you implement these tests you should remove the `skip` decorator and the `pass`. For the tests you write, use the requirements in this document to guide your test writing. diff --git a/app/__init__.py b/app/__init__.py index 3c581ceeb..3be2dc440 100644 --- a/app/__init__.py +++ b/app/__init__.py @@ -1,6 +1,8 @@ from flask import Flask from .db import db, migrate -from .models import task, goal +from .routes.task_routes import bp as task_bp +from .routes.goal_routes import bp as goal_bp + import os def create_app(config=None): @@ -10,13 +12,11 @@ def create_app(config=None): app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('SQLALCHEMY_DATABASE_URI') if config: - # Merge `config` into the app's configuration - # to override the app's default settings for testing app.config.update(config) db.init_app(app) migrate.init_app(app, db) - # Register Blueprints here - + app.register_blueprint(task_bp) + app.register_blueprint(goal_bp) return app diff --git a/app/models/goal.py b/app/models/goal.py index 44282656b..f2297d490 100644 --- a/app/models/goal.py +++ b/app/models/goal.py @@ -1,5 +1,19 @@ -from sqlalchemy.orm import Mapped, mapped_column from ..db import db +from sqlalchemy.orm import Mapped, mapped_column, relationship class Goal(db.Model): id: Mapped[int] = mapped_column(primary_key=True, autoincrement=True) + title: Mapped[str] + tasks: Mapped[list["Task"]] = relationship(back_populates="goal") + + def to_dict(self): + return { + "id": self.id, + "title": self.title + } + + @classmethod + def from_dict(cls, goal_data): + new_goal = cls(title=goal_data["title"]) + return new_goal + diff --git a/app/models/task.py b/app/models/task.py index 5d99666a4..21fdd1d84 100644 --- a/app/models/task.py +++ b/app/models/task.py @@ -1,5 +1,34 @@ -from sqlalchemy.orm import Mapped, mapped_column from ..db import db +from datetime import datetime +from typing import Optional +from sqlalchemy import ForeignKey +from sqlalchemy.orm import Mapped, mapped_column, relationship class Task(db.Model): id: Mapped[int] = mapped_column(primary_key=True, autoincrement=True) + title: Mapped[str] + description: Mapped[str] + completed_at: Mapped[Optional[datetime]] = mapped_column(nullable=True) + goal_id: Mapped[Optional[int]] = mapped_column(ForeignKey("goal.id")) + goal: Mapped[Optional["Goal"]] = relationship(back_populates="tasks") + + def to_dict(self): + return { + "id": self.id, + "title": self.title, + "description": self.description, + "is_complete": bool(self.completed_at) + } + + @classmethod + def from_dict(cls, task_data): + # If `is_complete` is True, set completed_at to now; otherwise keep it None. + is_complete = task_data.get("is_complete", False) + + completed_at = datetime.now() if is_complete else None + + return cls(title=task_data["title"], + description=task_data["description"], + completed_at=completed_at, + goal_id= task_data.get("goal_id", None) + ) \ No newline at end of file diff --git a/app/routes/goal_routes.py b/app/routes/goal_routes.py index 3aae38d49..25e4886fd 100644 --- a/app/routes/goal_routes.py +++ b/app/routes/goal_routes.py @@ -1 +1,58 @@ -from flask import Blueprint \ No newline at end of file +from flask import Blueprint, request +from ..routes.routes_utilities import ( + validate_model, + create_model, + get_models_with_filters, + update_model_fields, + delete_model, + assign_related_by_ids, +) +from ..models.goal import Goal +from ..models.task import Task +from ..db import db + +bp = Blueprint("goals_bp", __name__, url_prefix="/goals") + +@bp.get("") +def get_all_goals(): + return get_models_with_filters(Goal, request.args) + +@bp.get("/") +def get_single_goal(id): + goal = validate_model(Goal, id) + + return goal.to_dict() + +@bp.get("//tasks") +def get_all_goal_tasks(id): + goal = validate_model(Goal, id) + tasks = [] + for task in goal.tasks: + t = task.to_dict() + # Tests expect tasks returned for a goal to include the goal_id + t["goal_id"] = goal.id + tasks.append(t) + + return {"id": goal.id, "title": goal.title, "tasks": tasks} + +@bp.post("") +def create_goal(): + model_dict, status_code = create_model(Goal, request.get_json()) + return model_dict, status_code + +@bp.post("//tasks") +def post_task_ids_to_goal(id): + goal = validate_model(Goal, id) + data = request.get_json() or {} + return assign_related_by_ids(goal, "tasks", Task, data.get("task_ids")) + +@bp.put("/") +def update_goal(id): + goal = validate_model(Goal, id) + request_data = request.get_json() + return update_model_fields(goal, request_data, ["title"]) + +@bp.delete("/") +def delete_goal(id): + goal = validate_model(Goal, id) + return delete_model(goal) \ No newline at end of file diff --git a/app/routes/routes_utilities.py b/app/routes/routes_utilities.py new file mode 100644 index 000000000..dac13e293 --- /dev/null +++ b/app/routes/routes_utilities.py @@ -0,0 +1,71 @@ +from flask import abort, make_response +from ..db import db + +def validate_model(cls, id): + try: + id = int(id) + except (ValueError, TypeError): + abort(make_response({"details": "Invalid id"}, 400)) + + model = db.session.get(cls, id) + + if not model: + abort(make_response({"details": "Not found"}, 404)) + + return model + +def create_model(cls, model_data): + try: + new_model = cls.from_dict(model_data) + except Exception: + abort(make_response({"details": "Invalid data"}, 400)) + + db.session.add(new_model) + db.session.commit() + + return new_model.to_dict(), 201 + +def get_models_with_filters(cls, args=None): + query = db.select(cls) + + # Handle sorting + sort = args.get("sort") if args else None + if sort == "asc": + query = query.order_by(cls.title.asc()) + elif sort == "desc": + query = query.order_by(cls.title.desc()) + else: + query = query.order_by(cls.id) + + models = db.session.scalars(query) + models_response = [model.to_dict() for model in models] + return models_response + +def update_model_fields(model, data, allowed_fields): + if not isinstance(data, dict): + abort(make_response({"details": "Invalid data"}, 400)) + + for field in allowed_fields: + if field in data: + setattr(model, field, data[field]) + + db.session.commit() + return make_response("", 204) + +def delete_model(model): + db.session.delete(model) + db.session.commit() + return make_response("", 204) + +def assign_related_by_ids(parent, relation_name, child_cls, ids, response_key="task_ids"): + if not isinstance(ids, list): + abort(make_response({"details": "Invalid data"}, 400)) + + related = [validate_model(child_cls, cid) for cid in ids] + setattr(parent, relation_name, related) + db.session.commit() + + return { + "id": parent.id, + response_key: [getattr(obj, "id") for obj in related] + }, 200 \ No newline at end of file diff --git a/app/routes/task_routes.py b/app/routes/task_routes.py index 3aae38d49..a2bf9c9bd 100644 --- a/app/routes/task_routes.py +++ b/app/routes/task_routes.py @@ -1 +1,93 @@ -from flask import Blueprint \ No newline at end of file +from flask import Blueprint, request, Response +from ..models.task import Task +from ..db import db +from ..routes.routes_utilities import ( + validate_model, + create_model, + get_models_with_filters, + update_model_fields, + delete_model, +) +from datetime import datetime +from dotenv import load_dotenv +import os + +load_dotenv() +SLACK_TOKEN = os.getenv("SLACK_TOKEN") +SLACK_CHANNEL = os.getenv("SLACK_CHANNEL") + +bp = Blueprint("task_bp", __name__, url_prefix='/tasks') + +@bp.get("") +def get_all_tasks(): + return get_models_with_filters(Task, request.args) + +@bp.get("/") +def get_single_tasks(id): + task = validate_model(Task, id) + task_dict = task.to_dict() + # Include goal_id in the single-task response when applicable (Wave 6) + if task.goal_id is not None: + task_dict["goal_id"] = task.goal_id + return task_dict + +@bp.patch("//mark_complete") +def mark_task_complete(id): + task = validate_model(Task, id) + # No request body is expected for marking a task complete; simply set + # the completed timestamp. + task.completed_at = datetime.now() + + db.session.commit() + + send_completed_task_to_slack(task) + return Response(status=204, mimetype="application/json") + +def send_completed_task_to_slack(task): + import requests + + slack_message_url = "https://slack.com/api/chat.postMessage" + # channel is required by Slack API; allow configuration via SLACK_CHANNEL env var + channel = SLACK_CHANNEL or os.getenv("SLACK_CHANNEL") + + message = { + "channel": channel, + "text": f"Someone just completed the task '{task.title}'!" + } + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {SLACK_TOKEN}" + } + + response = requests.post(slack_message_url, json=message, headers=headers) + # print(response.status_code, response.text) # debug output + # print("SLACK_TOKEN:", SLACK_TOKEN) # debug output + + response.raise_for_status() + +@bp.patch("//mark_incomplete") +def mark_task_incomplete(id): + task = validate_model(Task, id) + + task.completed_at = None + + db.session.commit() + + return Response(status=204, mimetype="application/json") + +@bp.post("") +def create_task(): + model_dict, status_code = create_model(Task, request.get_json()) + return model_dict, status_code + +@bp.put("/") +def replace_task(id): + task = validate_model(Task, id) + + request_body = request.get_json() + return update_model_fields(task, request_body, ["title", "description", "completed_at"]) + +@bp.delete("/") +def delete_task(id): + task = validate_model(Task, id) + return delete_model(task) \ No newline at end of file diff --git a/migrations/README b/migrations/README new file mode 100644 index 000000000..0e0484415 --- /dev/null +++ b/migrations/README @@ -0,0 +1 @@ +Single-database configuration for Flask. diff --git a/migrations/alembic.ini b/migrations/alembic.ini new file mode 100644 index 000000000..ec9d45c26 --- /dev/null +++ b/migrations/alembic.ini @@ -0,0 +1,50 @@ +# A generic, single database configuration. + +[alembic] +# template used to generate migration files +# file_template = %%(rev)s_%%(slug)s + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic,flask_migrate + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[logger_flask_migrate] +level = INFO +handlers = +qualname = flask_migrate + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/migrations/env.py b/migrations/env.py new file mode 100644 index 000000000..4c9709271 --- /dev/null +++ b/migrations/env.py @@ -0,0 +1,113 @@ +import logging +from logging.config import fileConfig + +from flask import current_app + +from alembic import context + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +fileConfig(config.config_file_name) +logger = logging.getLogger('alembic.env') + + +def get_engine(): + try: + # this works with Flask-SQLAlchemy<3 and Alchemical + return current_app.extensions['migrate'].db.get_engine() + except (TypeError, AttributeError): + # this works with Flask-SQLAlchemy>=3 + return current_app.extensions['migrate'].db.engine + + +def get_engine_url(): + try: + return get_engine().url.render_as_string(hide_password=False).replace( + '%', '%%') + except AttributeError: + return str(get_engine().url).replace('%', '%%') + + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +config.set_main_option('sqlalchemy.url', get_engine_url()) +target_db = current_app.extensions['migrate'].db + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def get_metadata(): + if hasattr(target_db, 'metadatas'): + return target_db.metadatas[None] + return target_db.metadata + + +def run_migrations_offline(): + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, target_metadata=get_metadata(), literal_binds=True + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online(): + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + + # this callback is used to prevent an auto-migration from being generated + # when there are no changes to the schema + # reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html + def process_revision_directives(context, revision, directives): + if getattr(config.cmd_opts, 'autogenerate', False): + script = directives[0] + if script.upgrade_ops.is_empty(): + directives[:] = [] + logger.info('No changes in schema detected.') + + conf_args = current_app.extensions['migrate'].configure_args + if conf_args.get("process_revision_directives") is None: + conf_args["process_revision_directives"] = process_revision_directives + + connectable = get_engine() + + with connectable.connect() as connection: + context.configure( + connection=connection, + target_metadata=get_metadata(), + **conf_args + ) + + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/migrations/script.py.mako b/migrations/script.py.mako new file mode 100644 index 000000000..2c0156303 --- /dev/null +++ b/migrations/script.py.mako @@ -0,0 +1,24 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision = ${repr(up_revision)} +down_revision = ${repr(down_revision)} +branch_labels = ${repr(branch_labels)} +depends_on = ${repr(depends_on)} + + +def upgrade(): + ${upgrades if upgrades else "pass"} + + +def downgrade(): + ${downgrades if downgrades else "pass"} diff --git a/migrations/versions/126262bf95fa_recreate_initial.py b/migrations/versions/126262bf95fa_recreate_initial.py new file mode 100644 index 000000000..989dc9515 --- /dev/null +++ b/migrations/versions/126262bf95fa_recreate_initial.py @@ -0,0 +1,75 @@ +"""recreate initial + +Revision ID: 126262bf95fa +Revises: +Create Date: 2025-11-06 11:18:46.135106 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '126262bf95fa' +down_revision = None +branch_labels = None +depends_on = None + + +def upgrade(): + """ + Make the initial schema idempotent: + - On a fresh DB, create goal and task tables. + - If task exists from a previous state, apply the completed_at type cast. + """ + bind = op.get_bind() + insp = sa.inspect(bind) + + if not insp.has_table("goal"): + op.create_table( + "goal", + sa.Column("id", sa.Integer(), primary_key=True, autoincrement=True), + sa.Column("title", sa.String(), nullable=True), + ) + + if not insp.has_table("task"): + op.create_table( + "task", + sa.Column("id", sa.Integer(), primary_key=True, autoincrement=True), + sa.Column("title", sa.String(), nullable=True), + sa.Column("description", sa.String(), nullable=True), + sa.Column("completed_at", sa.DateTime(), nullable=True), + sa.Column("goal_id", sa.Integer(), sa.ForeignKey("goal.id"), nullable=True), + ) + else: + # If the table already exists, apply the type cast for completed_at + with op.batch_alter_table("task", schema=None) as batch_op: + batch_op.alter_column( + "completed_at", + existing_type=sa.VARCHAR(), + type_=sa.DateTime(), + existing_nullable=True, + postgresql_using="completed_at::timestamp without time zone", + ) + + +def downgrade(): + bind = op.get_bind() + insp = sa.inspect(bind) + + # Safely reverse the column cast if table exists + if insp.has_table("task"): + with op.batch_alter_table("task", schema=None) as batch_op: + batch_op.alter_column( + "completed_at", + existing_type=sa.DateTime(), + type_=sa.VARCHAR(), + existing_nullable=True, + postgresql_using="completed_at::varchar", + ) + + # Drop tables (reverse of upgrade create) + if insp.has_table("task"): + op.drop_table("task") + if insp.has_table("goal"): + op.drop_table("goal") diff --git a/seed.py b/seed.py new file mode 100644 index 000000000..a86b38e96 --- /dev/null +++ b/seed.py @@ -0,0 +1,110 @@ +"""Seed the database with sample Goals and Tasks for local development. + +Run with: `python seed.py` (ensure SQLALCHEMY_DATABASE_URI is set) +""" + +from dotenv import load_dotenv +from app import create_app +from app.db import db +from app.models.goal import Goal +from app.models.task import Task + + +goals_data = [ + {"title": "Build a habit of going outside daily"}, + {"title": "Career growth"}, + {"title": "Fitness"}, + {"title": "Learn Flask"}, +] + +tasks_data = [ + { + "title": "Go on my daily walk 🏞", + "description": "Notice something new every day", + "is_complete": False, + "goal_title": "Build a habit of going outside daily", + }, + {"title": "Answer forgotten email 📧", "description": "", "is_complete": False}, + {"title": "Water the garden 🌷", "description": "", "is_complete": False}, + {"title": "Pay my outstanding tickets 😭", "description": "", "is_complete": False}, + { + "title": "Do 20 push-ups", + "description": "Morning routine", + "is_complete": False, + "goal_title": "Fitness", + }, + { + "title": "Run 3km", + "description": "Easy pace", + "is_complete": False, + "goal_title": "Fitness", + }, + { + "title": "Read Flask docs", + "description": "Application factory, blueprints, testing", + "is_complete": False, + "goal_title": "Learn Flask", + }, + { + "title": "Build a demo endpoint", + "description": "Return JSON response and status codes", + "is_complete": False, + "goal_title": "Learn Flask", + }, +] + + +def get_by_field(cls, field_name, value): + stmt = db.select(cls).where(getattr(cls, field_name) == value) + return db.session.scalar(stmt) + + +def main(): + load_dotenv() + app = create_app() + with app.app_context(): + # Seed Goals + title_to_goal = {} + for g in goals_data: + goal = get_by_field(Goal, "title", g["title"]) or Goal(title=g["title"]) + if goal.id is None: + db.session.add(goal) + db.session.flush() # assign id + title_to_goal[goal.title] = goal + + # Seed Tasks (associate to goals if goal_title provided) + for t in tasks_data: + existing = get_by_field(Task, "title", t["title"]) # idempotent by title + if existing: + continue + task_payload = { + "title": t["title"], + "description": t["description"], + "is_complete": bool(t.get("is_complete", False)), + } + goal_title = t.get("goal_title") + if goal_title and goal_title in title_to_goal: + task_payload["goal_id"] = title_to_goal[goal_title].id + + task = Task.from_dict(task_payload) + db.session.add(task) + + db.session.commit() + print("Seed complete.\n") + + # Print a quick summary so you can see relationships at a glance + print("Goals and their tasks:") + for goal in db.session.scalars(db.select(Goal).order_by(Goal.id)): + task_titles = [t.title for t in goal.tasks] + print(f"- [{goal.id}] {goal.title}: {len(task_titles)} tasks") + for t in task_titles: + print(f" • {t}") + + print("\nUnassigned tasks:") + unassigned = db.session.scalars(db.select(Task).where(Task.goal_id.is_(None)).order_by(Task.id)) + for task in unassigned: + print(f"- [{task.id}] {task.title}") + + +if __name__ == "__main__": + main() diff --git a/tests/test_wave_01.py b/tests/test_wave_01.py index fac95a0a3..7afefb173 100644 --- a/tests/test_wave_01.py +++ b/tests/test_wave_01.py @@ -2,7 +2,7 @@ from app.db import db import pytest -@pytest.mark.skip(reason="No way to test this feature yet") +# # # @pytest.mark.skip(reason="No way to test this feature yet") def test_task_to_dict(): #Arrange new_task = Task(id = 1, title="Make My Bed", @@ -19,7 +19,7 @@ def test_task_to_dict(): assert task_dict["description"] == "Start the day off right!" assert task_dict["is_complete"] == False -@pytest.mark.skip(reason="No way to test this feature yet") +# # @pytest.mark.skip(reason="No way to test this feature yet") def test_task_to_dict_missing_id(): #Arrange new_task = Task(title="Make My Bed", @@ -36,7 +36,7 @@ def test_task_to_dict_missing_id(): assert task_dict["description"] == "Start the day off right!" assert task_dict["is_complete"] == False -@pytest.mark.skip(reason="No way to test this feature yet") +# # @pytest.mark.skip(reason="No way to test this feature yet") def test_task_to_dict_missing_title(): #Arrange new_task = Task(id = 1, @@ -53,7 +53,7 @@ def test_task_to_dict_missing_title(): assert task_dict["description"] == "Start the day off right!" assert task_dict["is_complete"] == False -@pytest.mark.skip(reason="No way to test this feature yet") +# # @pytest.mark.skip(reason="No way to test this feature yet") def test_task_from_dict(): #Arrange task_dict = { @@ -70,7 +70,7 @@ def test_task_from_dict(): assert task_obj.description == "Start the day off right!" assert task_obj.completed_at is None -@pytest.mark.skip(reason="No way to test this feature yet") +# # @pytest.mark.skip(reason="No way to test this feature yet") def test_task_from_dict_no_title(): #Arrange task_dict = { @@ -82,7 +82,7 @@ def test_task_from_dict_no_title(): with pytest.raises(KeyError, match = 'title'): Task.from_dict(task_dict) -@pytest.mark.skip(reason="No way to test this feature yet") +# # @pytest.mark.skip(reason="No way to test this feature yet") def test_task_from_dict_no_description(): #Arrange task_dict = { @@ -94,7 +94,7 @@ def test_task_from_dict_no_description(): with pytest.raises(KeyError, match = 'description'): Task.from_dict(task_dict) -@pytest.mark.skip(reason="No way to test this feature yet") +# # @pytest.mark.skip(reason="No way to test this feature yet") def test_get_tasks_no_saved_tasks(client): # Act response = client.get("/tasks") @@ -105,7 +105,7 @@ def test_get_tasks_no_saved_tasks(client): assert response_body == [] -@pytest.mark.skip(reason="No way to test this feature yet") +# # @pytest.mark.skip(reason="No way to test this feature yet") def test_get_tasks_one_saved_tasks(client, one_task): # Act response = client.get("/tasks") @@ -124,7 +124,7 @@ def test_get_tasks_one_saved_tasks(client, one_task): ] -@pytest.mark.skip(reason="No way to test this feature yet") +# # @pytest.mark.skip(reason="No way to test this feature yet") def test_get_task(client, one_task): # Act response = client.get("/tasks/1") @@ -140,7 +140,7 @@ def test_get_task(client, one_task): } -@pytest.mark.skip(reason="No way to test this feature yet") +# # @pytest.mark.skip(reason="No way to test this feature yet") def test_get_task_not_found(client): # Act response = client.get("/tasks/1") @@ -149,13 +149,13 @@ def test_get_task_not_found(client): # Assert assert response.status_code == 404 - raise Exception("Complete test with assertion about response body") + assert response_body == {"details": "Not found"} # ***************************************************************** # **Complete test with assertion about response body*************** # ***************************************************************** -@pytest.mark.skip(reason="No way to test this feature yet") +# # @pytest.mark.skip(reason="No way to test this feature yet") def test_create_task(client): # Act response = client.post("/tasks", json={ @@ -181,7 +181,7 @@ def test_create_task(client): assert new_task.description == "Test Description" assert new_task.completed_at == None -@pytest.mark.skip(reason="No way to test this feature yet") +# # @pytest.mark.skip(reason="No way to test this feature yet") def test_update_task(client, one_task): # Act response = client.put("/tasks/1", json={ @@ -201,7 +201,7 @@ def test_update_task(client, one_task): -@pytest.mark.skip(reason="No way to test this feature yet") +# # @pytest.mark.skip(reason="No way to test this feature yet") def test_update_task_not_found(client): # Act response = client.put("/tasks/1", json={ @@ -213,13 +213,13 @@ def test_update_task_not_found(client): # Assert assert response.status_code == 404 - raise Exception("Complete test with assertion about response body") + assert response_body == {"details": "Not found"} # ***************************************************************** # **Complete test with assertion about response body*************** # ***************************************************************** -@pytest.mark.skip(reason="No way to test this feature yet") +# # @pytest.mark.skip(reason="No way to test this feature yet") def test_delete_task(client, one_task): # Act response = client.delete("/tasks/1") @@ -230,7 +230,7 @@ def test_delete_task(client, one_task): query = db.select(Task).where(Task.id == 1) assert db.session.scalar(query) == None -@pytest.mark.skip(reason="No way to test this feature yet") +# # @pytest.mark.skip(reason="No way to test this feature yet") def test_delete_task_not_found(client): # Act response = client.delete("/tasks/1") @@ -239,7 +239,7 @@ def test_delete_task_not_found(client): # Assert assert response.status_code == 404 - raise Exception("Complete test with assertion about response body") + assert response_body == {"details": "Not found"} # ***************************************************************** # **Complete test with assertion about response body*************** # ***************************************************************** @@ -247,7 +247,7 @@ def test_delete_task_not_found(client): assert db.session.scalars(db.select(Task)).all() == [] -@pytest.mark.skip(reason="No way to test this feature yet") +# # @pytest.mark.skip(reason="No way to test this feature yet") def test_create_task_must_contain_title(client): # Act response = client.post("/tasks", json={ @@ -264,7 +264,7 @@ def test_create_task_must_contain_title(client): assert db.session.scalars(db.select(Task)).all() == [] -@pytest.mark.skip(reason="No way to test this feature yet") +# # @pytest.mark.skip(reason="No way to test this feature yet") def test_create_task_must_contain_description(client): # Act response = client.post("/tasks", json={ diff --git a/tests/test_wave_02.py b/tests/test_wave_02.py index a087e0909..651e3aebd 100644 --- a/tests/test_wave_02.py +++ b/tests/test_wave_02.py @@ -1,7 +1,7 @@ import pytest -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_get_tasks_sorted_asc(client, three_tasks): # Act response = client.get("/tasks?sort=asc") @@ -29,7 +29,7 @@ def test_get_tasks_sorted_asc(client, three_tasks): ] -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_get_tasks_sorted_desc(client, three_tasks): # Act response = client.get("/tasks?sort=desc") diff --git a/tests/test_wave_03.py b/tests/test_wave_03.py index d7d441695..9cc5f3568 100644 --- a/tests/test_wave_03.py +++ b/tests/test_wave_03.py @@ -6,7 +6,7 @@ import pytest -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_mark_complete_on_incomplete_task(client, one_task): # Arrange """ @@ -34,7 +34,7 @@ def test_mark_complete_on_incomplete_task(client, one_task): assert db.session.scalar(query).completed_at -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_mark_incomplete_on_complete_task(client, completed_task): # Act response = client.patch("/tasks/1/mark_incomplete") @@ -46,7 +46,7 @@ def test_mark_incomplete_on_complete_task(client, completed_task): assert db.session.scalar(query).completed_at == None -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_mark_complete_on_completed_task(client, completed_task): # Arrange """ @@ -74,7 +74,7 @@ def test_mark_complete_on_completed_task(client, completed_task): query = db.select(Task).where(Task.id == 1) assert db.session.scalar(query).completed_at -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_mark_incomplete_on_incomplete_task(client, one_task): # Act response = client.patch("/tasks/1/mark_incomplete") @@ -86,7 +86,7 @@ def test_mark_incomplete_on_incomplete_task(client, one_task): assert db.session.scalar(query).completed_at == None -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_mark_complete_missing_task(client): # Act response = client.patch("/tasks/1/mark_complete") @@ -95,13 +95,13 @@ def test_mark_complete_missing_task(client): # Assert assert response.status_code == 404 - raise Exception("Complete test with assertion about response body") + assert response_body == {"details": "Not found"} # ***************************************************************** # **Complete test with assertion about response body*************** # ***************************************************************** -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_mark_incomplete_missing_task(client): # Act response = client.patch("/tasks/1/mark_incomplete") @@ -110,7 +110,7 @@ def test_mark_incomplete_missing_task(client): # Assert assert response.status_code == 404 - raise Exception("Complete test with assertion about response body") + assert response_body == {"details": "Not found"} # ***************************************************************** # **Complete test with assertion about response body*************** # ***************************************************************** diff --git a/tests/test_wave_05.py b/tests/test_wave_05.py index b7cc330ae..d11fb92fc 100644 --- a/tests/test_wave_05.py +++ b/tests/test_wave_05.py @@ -1,7 +1,7 @@ from app.models.goal import Goal import pytest -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_goal_to_dict(): #Arrange new_goal = Goal(id=1, title="Seize the Day!") @@ -13,7 +13,7 @@ def test_goal_to_dict(): assert goal_dict["id"] == 1 assert goal_dict["title"] == "Seize the Day!" -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_goal_to_dict_no_id(): #Arrange new_goal = Goal(title="Seize the Day!") @@ -25,7 +25,7 @@ def test_goal_to_dict_no_id(): assert goal_dict["id"] is None assert goal_dict["title"] == "Seize the Day!" -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_goal_to_dict_no_title(): #Arrange new_goal = Goal(id=1) @@ -39,7 +39,7 @@ def test_goal_to_dict_no_title(): -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_goal_from_dict(): #Arrange goal_dict = { @@ -52,7 +52,7 @@ def test_goal_from_dict(): #Assert assert goal_obj.title == "Seize the Day!" -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_goal_from_dict_no_title(): #Arrange goal_dict = { @@ -63,7 +63,7 @@ def test_goal_from_dict_no_title(): Goal.from_dict(goal_dict) -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_get_goals_no_saved_goals(client): # Act response = client.get("/goals") @@ -74,7 +74,7 @@ def test_get_goals_no_saved_goals(client): assert response_body == [] -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_get_goals_one_saved_goal(client, one_goal): # Act response = client.get("/goals") @@ -91,7 +91,7 @@ def test_get_goals_one_saved_goal(client, one_goal): ] -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_get_goal(client, one_goal): # Act response = client.get("/goals/1") @@ -105,22 +105,18 @@ def test_get_goal(client, one_goal): } -@pytest.mark.skip(reason="test to be completed by student") +# @pytest.mark.skip(reason="test to be completed by student") def test_get_goal_not_found(client): - pass # Act response = client.get("/goals/1") response_body = response.get_json() - raise Exception("Complete test") # Assert - # ---- Complete Test ---- - # assertion 1 goes here - # assertion 2 goes here - # ---- Complete Test ---- + assert response.status_code == 404 + assert response_body == {"details": "Not found"} -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_create_goal(client): # Act response = client.post("/goals", json={ @@ -136,34 +132,36 @@ def test_create_goal(client): } -@pytest.mark.skip(reason="test to be completed by student") +# @pytest.mark.skip(reason="test to be completed by student") def test_update_goal(client, one_goal): - raise Exception("Complete test") # Act - # ---- Complete Act Here ---- + response = client.put("/goals/1", json={ + "title": "Updated Goal Title" + }) # Assert - # ---- Complete Assertions Here ---- - # assertion 1 goes here - # assertion 2 goes here - # assertion 3 goes here - # ---- Complete Assertions Here ---- + assert response.status_code == 204 + + # Verify the update + response = client.get("/goals/1") + response_body = response.get_json() + assert response_body["title"] == "Updated Goal Title" -@pytest.mark.skip(reason="test to be completed by student") +# @pytest.mark.skip(reason="test to be completed by student") def test_update_goal_not_found(client): - raise Exception("Complete test") # Act - # ---- Complete Act Here ---- + response = client.put("/goals/1", json={ + "title": "Updated Goal Title" + }) + response_body = response.get_json() # Assert - # ---- Complete Assertions Here ---- - # assertion 1 goes here - # assertion 2 goes here - # ---- Complete Assertions Here ---- + assert response.status_code == 404 + assert response_body == {"details": "Not found"} -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_delete_goal(client, one_goal): # Act response = client.delete("/goals/1") @@ -176,29 +174,25 @@ def test_delete_goal(client, one_goal): assert response.status_code == 404 response_body = response.get_json() - assert "message" in response_body + assert response_body == {"details": "Not found"} - raise Exception("Complete test with assertion about response body") # ***************************************************************** # **Complete test with assertion about response body*************** # ***************************************************************** -@pytest.mark.skip(reason="test to be completed by student") +# @pytest.mark.skip(reason="test to be completed by student") def test_delete_goal_not_found(client): - raise Exception("Complete test") - # Act - # ---- Complete Act Here ---- + response = client.delete("/goals/1") + response_body = response.get_json() # Assert - # ---- Complete Assertions Here ---- - # assertion 1 goes here - # assertion 2 goes here - # ---- Complete Assertions Here ---- + assert response.status_code == 404 + assert response_body == {"details": "Not found"} -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_create_goal_missing_title(client): # Act response = client.post("/goals", json={}) diff --git a/tests/test_wave_06.py b/tests/test_wave_06.py index 727fce93a..ea82d27ed 100644 --- a/tests/test_wave_06.py +++ b/tests/test_wave_06.py @@ -3,7 +3,7 @@ import pytest -@pytest.mark.skip(reason="No way to test this feature yet") +#### @pytest.mark.skip(reason="No way to test this feature yet") def test_post_task_ids_to_goal(client, one_goal, three_tasks): # Act response = client.post("/goals/1/tasks", json={ @@ -25,7 +25,7 @@ def test_post_task_ids_to_goal(client, one_goal, three_tasks): assert len(db.session.scalar(query).tasks) == 3 -@pytest.mark.skip(reason="No way to test this feature yet") +### @pytest.mark.skip(reason="No way to test this feature yet") def test_post_task_ids_to_goal_overwrites_existing_tasks(client, one_task_belongs_to_one_goal, three_tasks): # Act response = client.post("/goals/1/tasks", json={ @@ -45,7 +45,7 @@ def test_post_task_ids_to_goal_overwrites_existing_tasks(client, one_task_belong assert len(db.session.scalar(query).tasks) == 2 -@pytest.mark.skip(reason="No way to test this feature yet") +### @pytest.mark.skip(reason="No way to test this feature yet") def test_get_tasks_for_specific_goal_no_goal(client): # Act response = client.get("/goals/1/tasks") @@ -53,14 +53,10 @@ def test_get_tasks_for_specific_goal_no_goal(client): # Assert assert response.status_code == 404 + assert response_body == {"details": "Not found"} - raise Exception("Complete test with assertion about response body") - # ***************************************************************** - # **Complete test with assertion about response body*************** - # ***************************************************************** - -@pytest.mark.skip(reason="No way to test this feature yet") +### @pytest.mark.skip(reason="No way to test this feature yet") def test_get_tasks_for_specific_goal_no_tasks(client, one_goal): # Act response = client.get("/goals/1/tasks") @@ -77,7 +73,7 @@ def test_get_tasks_for_specific_goal_no_tasks(client, one_goal): } -@pytest.mark.skip(reason="No way to test this feature yet") +### @pytest.mark.skip(reason="No way to test this feature yet") def test_get_tasks_for_specific_goal(client, one_task_belongs_to_one_goal): # Act response = client.get("/goals/1/tasks") @@ -102,7 +98,7 @@ def test_get_tasks_for_specific_goal(client, one_task_belongs_to_one_goal): } -@pytest.mark.skip(reason="No way to test this feature yet") +### @pytest.mark.skip(reason="No way to test this feature yet") def test_get_task_includes_goal_id(client, one_task_belongs_to_one_goal): response = client.get("/tasks/1") response_body = response.get_json() diff --git a/tests/test_wave_07.py b/tests/test_wave_07.py index 7e7cef55a..55f3eec05 100644 --- a/tests/test_wave_07.py +++ b/tests/test_wave_07.py @@ -2,9 +2,9 @@ from werkzeug.exceptions import HTTPException from app.models.goal import Goal from app.models.task import Task -from app.routes.route_utilities import create_model, validate_model +from app.routes.routes_utilities import create_model, validate_model -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_route_utilities_validate_model_with_task(client, three_tasks): #Act task_1 = validate_model(Task, 1) @@ -24,7 +24,7 @@ def test_route_utilities_validate_model_with_task(client, three_tasks): assert task_3.title == "Pay my outstanding tickets 😭" -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_route_utilities_validate_model_with_task_invalid_id(client, three_tasks): #Act & Assert # Calling `validate_model` without being invoked by a route will @@ -35,25 +35,20 @@ def test_route_utilities_validate_model_with_task_invalid_id(client, three_tasks # Test that the correct status code and response message are returned response = e.value.get_response() assert response.status_code == 400 + assert response.get_json() == {"details": "Invalid id"} - raise Exception("Complete test with an assertion about the response body") - # ***************************************************************************** - # ** Complete test with an assertion about the response body **************** - # ***************************************************************************** - -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_route_utilities_validate_model_with_task_missing_id(client, three_tasks): #Act & Assert with pytest.raises(HTTPException) as e: result_task = validate_model(Task, 4) - raise Exception("Complete test with assertion status code and response body") - # ***************************************************************************** - # **Complete test with assertion about status code response body*************** - # ***************************************************************************** + response = e.value.get_response() + assert response.status_code == 404 + assert response.get_json() == {"details": "Not found"} -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_route_utilities_validate_model_with_goal(client, one_goal): #Act goal_1 = validate_model(Goal, 1) @@ -62,29 +57,27 @@ def test_route_utilities_validate_model_with_goal(client, one_goal): assert goal_1.id == 1 assert goal_1.title == "Build a habit of going outside daily" -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_route_utilities_validate_model_with_goal_invalid_id(client, one_goal): #Act & Assert with pytest.raises(HTTPException) as e: result_task = validate_model(Goal, "One") - raise Exception("Complete test with assertion status code and response body") - # ***************************************************************************** - # **Complete test with assertion about status code response body*************** - # ***************************************************************************** + response = e.value.get_response() + assert response.status_code == 400 + assert response.get_json() == {"details": "Invalid id"} -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_route_utilities_validate_model_with_goal_missing_id(client, one_goal): #Act & Assert with pytest.raises(HTTPException) as e: result_task = validate_model(Goal, 4) - raise Exception("Complete test with assertion status code and response body") - # ***************************************************************************** - # **Complete test with assertion about status code response body*************** - # ***************************************************************************** + response = e.value.get_response() + assert response.status_code == 404 + assert response.get_json() == {"details": "Not found"} -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_route_utilities_create_model_with_task(client): #Arrange request_body = { @@ -103,7 +96,7 @@ def test_route_utilities_create_model_with_task(client): assert response[0]["is_complete"] == False assert response[1] == 201 -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_route_utilities_create_model_with_task_missing_title(client): #Arrange request_body = { @@ -120,7 +113,7 @@ def test_route_utilities_create_model_with_task_missing_title(client): assert response.get_json() == {"details": "Invalid data"} -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_route_utilities_create_model_with_goal(client): #Arrange request_body = { @@ -135,7 +128,7 @@ def test_route_utilities_create_model_with_goal(client): assert response[0]["title"] == "Seize the Day!" assert response[1] == 201 -@pytest.mark.skip(reason="No way to test this feature yet") +# @pytest.mark.skip(reason="No way to test this feature yet") def test_route_utilities_create_model_with_goal_missing_title(client): #Arrange request_body = { @@ -145,7 +138,6 @@ def test_route_utilities_create_model_with_goal_missing_title(client): with pytest.raises(HTTPException) as e: create_model(Goal, request_body) - raise Exception("Complete test with assertion status code and response body") - # ***************************************************************************** - # **Complete test with assertion about status code response body*************** - # ***************************************************************************** + response = e.value.get_response() + assert response.status_code == 400 + assert response.get_json() == {"details": "Invalid data"}