diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 42da1318..29576396 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,6 @@ -name: Python CI +# This workflow can be troubleshooted with act https://github.com/nektos/act +# For example: act --job run_tests --matrix toxenv:e2e +name: Run tests on: push: @@ -20,7 +22,7 @@ jobs: matrix: os: [ubuntu-latest] python-version: ['3.11', '3.12'] - toxenv: [py, quality, django42] + toxenv: [py, quality, django42, e2e] steps: - uses: actions/checkout@v4 diff --git a/Makefile b/Makefile index aba82c42..ed97a608 100644 --- a/Makefile +++ b/Makefile @@ -67,7 +67,7 @@ piptools: ## install pinned version of pip-compile and pip-sync requirements: clean_tox piptools ## install development environment requirements pip-sync -q requirements/dev.txt requirements/private.* -test-all: clean test test-quality test-pii selfcheck ## run all tests +test-all: selfcheck clean test test-quality test-pii test-e2e ## run all tests test: ## run unit tests pytest @@ -94,6 +94,15 @@ test-pii: export DJANGO_SETTINGS_MODULE=forum.settings.test test-pii: ## # check for PII annotations on all Django models code_annotations django_find_annotations --config_file .pii_annotations.yml --lint --report --coverage +test-e2e: e2e-stop-services e2e-start-services # run end-to-end tests + pytest tests/e2e + +e2e-start-services: # Start dependency containers necessary for e2e tests + docker compose -f tests/e2e/docker-compose.yml --project-name forum_e2e up -d + +e2e-stop-services: # Stop dependency containers necessary for e2e tests + docker compose -f tests/e2e/docker-compose.yml --project-name forum_e2e down + selfcheck: ## check that the Makefile is well-formed @echo "The Makefile is well-formed." diff --git a/forum/models/model_utils.py b/forum/models/model_utils.py index f58a0716..7bc116f1 100644 --- a/forum/models/model_utils.py +++ b/forum/models/model_utils.py @@ -1237,9 +1237,23 @@ def build_course_stats(author_id: str, course_id: str) -> None: "$group": { "_id": {"type": "$_type", "is_reply": "$is_reply"}, "count": {"$sum": 1}, - "active_flags": {"$sum": {"$gt": [{"$size": "$abuse_flaggers"}, 0]}}, + "active_flags": { + "$sum": { + "$cond": { + "if": {"$gt": [{"$size": "$abuse_flaggers"}, 0]}, + "then": 1, + "else": 0, + } + } + }, "inactive_flags": { - "$sum": {"$gt": [{"$size": "$historical_abuse_flaggers"}, 0]} + "$sum": { + "$cond": { + "if": {"$gt": [{"$size": "$historical_abuse_flaggers"}, 0]}, + "then": 1, + "else": 0, + } + } }, "latest_update_at": {"$max": "$updated_at"}, } diff --git a/forum/search/comment_search.py b/forum/search/comment_search.py index 2793ffa1..e162d8c8 100644 --- a/forum/search/comment_search.py +++ b/forum/search/comment_search.py @@ -5,7 +5,6 @@ from typing import Any, Optional from forum.constants import FORUM_MAX_DEEP_SEARCH_COMMENT_COUNT - from forum.models import CommentThread from forum.search.es import ElasticsearchModelMixin diff --git a/forum/search/es.py b/forum/search/es.py index 55646943..189dae91 100644 --- a/forum/search/es.py +++ b/forum/search/es.py @@ -7,7 +7,7 @@ from django.conf import settings from elasticsearch import Elasticsearch -from forum.models import BaseContents, MODEL_INDICES +from forum.models import MODEL_INDICES, BaseContents __all__ = ["Elasticsearch", "ElasticsearchModelMixin"] diff --git a/forum/settings/test.py b/forum/settings/test.py index ecb7b130..a9a8b752 100644 --- a/forum/settings/test.py +++ b/forum/settings/test.py @@ -66,7 +66,7 @@ def root(*args: str) -> str: FORUM_MONGODB_CLIENT_PARAMETERS: dict[str, str] = {} FORUM_MONGODB_AUTH_PARAMETERS: dict[str, str] = {} -FORUM_ENABLE_ELASTIC_SEARCH = False +FORUM_ENABLE_ELASTIC_SEARCH = True if FORUM_ENABLE_ELASTIC_SEARCH: FORUM_ELASTIC_SEARCH_CONFIG = [ { diff --git a/forum/signals.py b/forum/signals.py index 66a2aed5..ca192952 100644 --- a/forum/signals.py +++ b/forum/signals.py @@ -21,6 +21,7 @@ comment_updated = Signal() comment_thread_updated = Signal() +# TODO this setting should not exist. We need elasticsearch in production, and there is no way around it. if settings.FORUM_ENABLE_ELASTIC_SEARCH: # Connect the handlers when FORUM_ENABLE_ELASTIC_SEARCH is enabled. comment_deleted.connect(handle_comment_deletion) diff --git a/forum/views/search.py b/forum/views/search.py index 053b4906..9edfc93e 100644 --- a/forum/views/search.py +++ b/forum/views/search.py @@ -31,52 +31,49 @@ def _validate_and_extract_params(self, request: Request) -> dict[str, Any]: """ Validate and extract query parameters from the request. """ + data = request.query_params params: dict[str, Any] = {} # Required parameters - text = request.GET.get("text") + text = data.get("text") if not text: raise ValueError("text is required") params["text"] = text # Sort key validation VALID_SORT_KEYS = ("activity", "comments", "date", "votes") - sort_key = request.GET.get("sort_key", "date") + sort_key = data.get("sort_key", "date") if sort_key not in VALID_SORT_KEYS: raise ValueError("invalid sort_key") params["sort_key"] = sort_key # Pagination handling - page = request.GET.get("page", FORUM_DEFAULT_PAGE) + page = data.get("page", FORUM_DEFAULT_PAGE) try: params["page"] = int(page) except ValueError as exc: raise ValueError("Invalid page value.") from exc - per_page = request.GET.get("per_page", FORUM_DEFAULT_PER_PAGE) + per_page = data.get("per_page", FORUM_DEFAULT_PER_PAGE) try: params["per_page"] = int(per_page) except ValueError as exc: raise ValueError("Invalid per_page value.") from exc # Optional parameters with default values and type conversion - params["context"] = request.GET.get("context", "course") - params["user_id"] = request.GET.get("user_id", "") - params["course_id"] = request.GET.get("course_id", "") - params["author_id"] = request.GET.get("author_id") - params["thread_type"] = request.GET.get("thread_type") - params["flagged"] = request.GET.get("flagged", "false").lower() == "true" - params["unread"] = request.GET.get("unread", "false").lower() == "true" - params["unanswered"] = request.GET.get("unanswered", "false").lower() == "true" - params["unresponded"] = ( - request.GET.get("unresponded", "false").lower() == "true" - ) - params["count_flagged"] = ( - request.GET.get("count_flagged", "false").lower() == "true" - ) + params["context"] = data.get("context", "course") + params["user_id"] = data.get("user_id", "") + params["course_id"] = data.get("course_id", "") + params["author_id"] = data.get("author_id") + params["thread_type"] = data.get("thread_type") + params["flagged"] = data.get("flagged", "false").lower() == "true" + params["unread"] = data.get("unread", "false").lower() == "true" + params["unanswered"] = data.get("unanswered", "false").lower() == "true" + params["unresponded"] = data.get("unresponded", "false").lower() == "true" + params["count_flagged"] = data.get("count_flagged", "false").lower() == "true" # Group IDs extraction - params["group_ids"] = get_group_ids_from_params(request.GET) + params["group_ids"] = get_group_ids_from_params(data) return params @@ -122,13 +119,14 @@ def get(self, request: Request) -> Response: Returns: Response: A JSON response containing the search results, corrected text (if any), and total results. """ + try: params = self._validate_and_extract_params(request) except ValueError as error: return Response({"error": str(error)}, status=status.HTTP_400_BAD_REQUEST) thread_ids, corrected_text = self._get_thread_ids_from_indexes( - params["context"], params["group_ids"], request.GET, params["text"] + params["context"], params["group_ids"], request.query_params, params["text"] ) data: dict[str, Any] = handle_threads_query( diff --git a/forum/views/threads.py b/forum/views/threads.py index 2ed5115b..da39fdf5 100644 --- a/forum/views/threads.py +++ b/forum/views/threads.py @@ -11,7 +11,6 @@ from rest_framework.serializers import ValidationError from rest_framework.views import APIView -from forum.models.comments import Comment from forum.models.model_utils import ( delete_comments_of_a_thread, delete_subscriptions_of_a_thread, @@ -166,9 +165,10 @@ def delete(self, request: Request, thread_id: str) -> Response: result = CommentThread().delete(thread_id) delete_subscriptions_of_a_thread(thread_id) if result: - update_stats_for_course( - thread["author_id"], thread["course_id"], threads=-1 - ) + if not (thread["anonymous"] or thread["anonymous_to_peers"]): + update_stats_for_course( + thread["author_id"], thread["course_id"], threads=-1 + ) return Response(serialized_data, status=status.HTTP_200_OK) @@ -185,7 +185,7 @@ def put(self, request: Request, thread_id: str) -> Response: The details of the thread that is updated. """ try: - thread = validate_object(Comment, thread_id) + thread = validate_object(CommentThread, thread_id) except ObjectDoesNotExist: return Response( {"error": "thread does not exist"}, @@ -280,7 +280,8 @@ def post(self, request: Request) -> Response: ) thread = self.create_thread(data) - update_stats_for_course(thread["author_id"], thread["course_id"], threads=1) + if not (thread["anonymous"] or thread["anonymous_to_peers"]): + update_stats_for_course(thread["author_id"], thread["course_id"], threads=1) try: serialized_data = prepare_thread_api_response(thread, True, data) except ValidationError as error: diff --git a/requirements/base.txt b/requirements/base.txt index 06bd2257..00ff4c3c 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -6,13 +6,13 @@ # asgiref==3.8.1 # via django -certifi==2024.7.4 +certifi==2024.8.30 # via # elasticsearch # requests charset-normalizer==3.3.2 # via requests -django==4.2.15 +django==4.2.16 # via # -c https://raw.githubusercontent.com/edx/edx-lint/master/edx_lint/files/common_constraints.txt # -r requirements/base.in @@ -25,17 +25,17 @@ elasticsearch==7.13.4 # via # -c https://raw.githubusercontent.com/edx/edx-lint/master/edx_lint/files/common_constraints.txt # -r requirements/base.in -idna==3.7 +idna==3.10 # via requests -openedx-atlas==0.6.1 +openedx-atlas==0.6.2 # via -r requirements/base.in -pymongo==4.8.0 +pymongo==4.9.1 # via -r requirements/base.in requests==2.32.3 # via -r requirements/base.in sqlparse==0.5.1 # via django -urllib3==1.26.19 +urllib3==1.26.20 # via # elasticsearch # requests diff --git a/requirements/ci.txt b/requirements/ci.txt index cec2e76d..fe80970c 100644 --- a/requirements/ci.txt +++ b/requirements/ci.txt @@ -20,7 +20,7 @@ cachetools==5.5.0 # via # -r requirements/quality.txt # tox -certifi==2024.7.4 +certifi==2024.8.30 # via # -r requirements/quality.txt # elasticsearch @@ -64,7 +64,7 @@ distlib==0.3.8 # via # -r requirements/quality.txt # virtualenv -django==4.2.15 +django==4.2.16 # via # -c https://raw.githubusercontent.com/edx/edx-lint/master/edx_lint/files/common_constraints.txt # -r requirements/quality.txt @@ -81,24 +81,26 @@ django-stubs-ext==5.0.4 # django-stubs djangorestframework==3.15.2 # via -r requirements/quality.txt -djangorestframework-stubs==3.15.0 +djangorestframework-stubs==3.15.1 # via -r requirements/quality.txt dnspython==2.6.1 # via # -r requirements/quality.txt # pymongo -edx-lint==5.3.7 +edx-lint==5.4.0 # via -r requirements/quality.txt elasticsearch==7.13.4 # via # -c https://raw.githubusercontent.com/edx/edx-lint/master/edx_lint/files/common_constraints.txt # -r requirements/quality.txt -filelock==3.15.4 +faker==28.4.1 + # via -r requirements/quality.txt +filelock==3.16.1 # via # -r requirements/quality.txt # tox # virtualenv -idna==3.7 +idna==3.10 # via # -r requirements/quality.txt # requests @@ -122,16 +124,16 @@ mccabe==0.7.0 # via # -r requirements/quality.txt # pylint -mongomock==4.1.2 +mongomock==4.2.0.post1 # via -r requirements/quality.txt -mypy==1.11.1 +mypy==1.11.2 # via -r requirements/quality.txt mypy-extensions==1.0.0 # via # -r requirements/quality.txt # black # mypy -openedx-atlas==0.6.1 +openedx-atlas==0.6.2 # via -r requirements/quality.txt packaging==24.1 # via @@ -143,11 +145,11 @@ packaging==24.1 # tox pathspec==0.12.1 # via black -pbr==6.0.0 +pbr==6.1.0 # via # -r requirements/quality.txt # stevedore -platformdirs==4.2.2 +platformdirs==4.3.6 # via # -r requirements/quality.txt # black @@ -163,7 +165,7 @@ pycodestyle==2.12.1 # via -r requirements/quality.txt pydocstyle==6.3.0 # via -r requirements/quality.txt -pylint==3.2.6 +pylint==3.2.7 # via # -r requirements/quality.txt # edx-lint @@ -183,25 +185,33 @@ pylint-plugin-utils==0.8.2 # -r requirements/quality.txt # pylint-celery # pylint-django -pymongo==4.8.0 +pymongo==4.9.1 # via -r requirements/quality.txt -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via # -r requirements/quality.txt # tox -pytest==8.3.2 +pytest==8.3.3 # via # -r requirements/quality.txt # pytest-cov # pytest-django pytest-cov==5.0.0 # via -r requirements/quality.txt -pytest-django==4.8.0 +pytest-django==4.9.0 # via -r requirements/quality.txt +python-dateutil==2.9.0.post0 + # via + # -r requirements/quality.txt + # faker python-slugify==8.0.4 # via # -r requirements/quality.txt # code-annotations +pytz==2024.2 + # via + # -r requirements/quality.txt + # mongomock pyyaml==6.0.2 # via # -r requirements/quality.txt @@ -218,6 +228,7 @@ six==1.16.0 # via # -r requirements/quality.txt # edx-lint + # python-dateutil snowballstemmer==2.2.0 # via # -r requirements/quality.txt @@ -226,7 +237,7 @@ sqlparse==0.5.1 # via # -r requirements/quality.txt # django -stevedore==5.2.0 +stevedore==5.3.0 # via # -r requirements/quality.txt # code-annotations @@ -238,9 +249,9 @@ tomlkit==0.13.2 # via # -r requirements/quality.txt # pylint -tox==4.18.0 +tox==4.20.0 # via -r requirements/quality.txt -types-pyyaml==6.0.12.20240808 +types-pyyaml==6.0.12.20240917 # via # -r requirements/quality.txt # django-stubs @@ -260,12 +271,12 @@ typing-extensions==4.12.2 # django-stubs-ext # djangorestframework-stubs # mypy -urllib3==1.26.19 +urllib3==1.26.20 # via # -r requirements/quality.txt # elasticsearch # requests -virtualenv==20.26.3 +virtualenv==20.26.5 # via # -r requirements/quality.txt # tox diff --git a/requirements/dev.txt b/requirements/dev.txt index 1df5c3cc..4f27be5e 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -18,7 +18,7 @@ astroid==3.2.4 # pylint-celery black==24.8.0 # via -r requirements/ci.txt -build==1.2.1 +build==1.2.2 # via # -r requirements/pip-tools.txt # pip-tools @@ -27,7 +27,7 @@ cachetools==5.5.0 # -r requirements/ci.txt # -r requirements/quality.txt # tox -certifi==2024.7.4 +certifi==2024.8.30 # via # -r requirements/ci.txt # -r requirements/quality.txt @@ -74,7 +74,7 @@ coverage[toml]==7.6.1 # -r requirements/ci.txt # -r requirements/quality.txt # pytest-cov -diff-cover==9.1.1 +diff-cover==9.2.0 # via -r requirements/dev.in dill==0.3.8 # via @@ -86,7 +86,7 @@ distlib==0.3.8 # -r requirements/ci.txt # -r requirements/quality.txt # virtualenv -django==4.2.15 +django==4.2.16 # via # -c https://raw.githubusercontent.com/edx/edx-lint/master/edx_lint/files/common_constraints.txt # -r requirements/ci.txt @@ -109,7 +109,7 @@ djangorestframework==3.15.2 # via # -r requirements/ci.txt # -r requirements/quality.txt -djangorestframework-stubs==3.15.0 +djangorestframework-stubs==3.15.1 # via # -r requirements/ci.txt # -r requirements/quality.txt @@ -118,9 +118,9 @@ dnspython==2.6.1 # -r requirements/ci.txt # -r requirements/quality.txt # pymongo -edx-i18n-tools==1.6.2 +edx-i18n-tools==1.6.3 # via -r requirements/dev.in -edx-lint==5.3.7 +edx-lint==5.4.0 # via # -r requirements/ci.txt # -r requirements/quality.txt @@ -129,13 +129,17 @@ elasticsearch==7.13.4 # -c https://raw.githubusercontent.com/edx/edx-lint/master/edx_lint/files/common_constraints.txt # -r requirements/ci.txt # -r requirements/quality.txt -filelock==3.15.4 +faker==28.4.1 + # via + # -r requirements/ci.txt + # -r requirements/quality.txt +filelock==3.16.1 # via # -r requirements/ci.txt # -r requirements/quality.txt # tox # virtualenv -idna==3.7 +idna==3.10 # via # -r requirements/ci.txt # -r requirements/quality.txt @@ -156,8 +160,12 @@ jinja2==3.1.4 # -r requirements/quality.txt # code-annotations # diff-cover -lxml==5.3.0 - # via edx-i18n-tools +lxml[html-clean,html_clean]==5.3.0 + # via + # edx-i18n-tools + # lxml-html-clean +lxml-html-clean==0.2.2 + # via lxml markupsafe==2.1.5 # via # -r requirements/ci.txt @@ -168,11 +176,11 @@ mccabe==0.7.0 # -r requirements/ci.txt # -r requirements/quality.txt # pylint -mongomock==4.1.2 +mongomock==4.2.0.post1 # via # -r requirements/ci.txt # -r requirements/quality.txt -mypy==1.11.1 +mypy==1.11.2 # via # -r requirements/ci.txt # -r requirements/quality.txt @@ -182,7 +190,7 @@ mypy-extensions==1.0.0 # -r requirements/quality.txt # black # mypy -openedx-atlas==0.6.1 +openedx-atlas==0.6.2 # via # -r requirements/ci.txt # -r requirements/quality.txt @@ -203,14 +211,14 @@ pathspec==0.12.1 # via # -r requirements/ci.txt # black -pbr==6.0.0 +pbr==6.1.0 # via # -r requirements/ci.txt # -r requirements/quality.txt # stevedore pip-tools==7.4.1 # via -r requirements/pip-tools.txt -platformdirs==4.2.2 +platformdirs==4.3.6 # via # -r requirements/ci.txt # -r requirements/quality.txt @@ -237,7 +245,7 @@ pydocstyle==6.3.0 # -r requirements/quality.txt pygments==2.18.0 # via diff-cover -pylint==3.2.6 +pylint==3.2.7 # via # -r requirements/ci.txt # -r requirements/quality.txt @@ -261,11 +269,11 @@ pylint-plugin-utils==0.8.2 # -r requirements/quality.txt # pylint-celery # pylint-django -pymongo==4.8.0 +pymongo==4.9.1 # via # -r requirements/ci.txt # -r requirements/quality.txt -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via # -r requirements/ci.txt # -r requirements/quality.txt @@ -275,7 +283,7 @@ pyproject-hooks==1.1.0 # -r requirements/pip-tools.txt # build # pip-tools -pytest==8.3.2 +pytest==8.3.3 # via # -r requirements/ci.txt # -r requirements/quality.txt @@ -285,15 +293,25 @@ pytest-cov==5.0.0 # via # -r requirements/ci.txt # -r requirements/quality.txt -pytest-django==4.8.0 +pytest-django==4.9.0 # via # -r requirements/ci.txt # -r requirements/quality.txt +python-dateutil==2.9.0.post0 + # via + # -r requirements/ci.txt + # -r requirements/quality.txt + # faker python-slugify==8.0.4 # via # -r requirements/ci.txt # -r requirements/quality.txt # code-annotations +pytz==2024.2 + # via + # -r requirements/ci.txt + # -r requirements/quality.txt + # mongomock pyyaml==6.0.2 # via # -r requirements/ci.txt @@ -315,6 +333,7 @@ six==1.16.0 # -r requirements/ci.txt # -r requirements/quality.txt # edx-lint + # python-dateutil snowballstemmer==2.2.0 # via # -r requirements/ci.txt @@ -325,7 +344,7 @@ sqlparse==0.5.1 # -r requirements/ci.txt # -r requirements/quality.txt # django -stevedore==5.2.0 +stevedore==5.3.0 # via # -r requirements/ci.txt # -r requirements/quality.txt @@ -340,11 +359,11 @@ tomlkit==0.13.2 # -r requirements/ci.txt # -r requirements/quality.txt # pylint -tox==4.18.0 +tox==4.20.0 # via # -r requirements/ci.txt # -r requirements/quality.txt -types-pyyaml==6.0.12.20240808 +types-pyyaml==6.0.12.20240917 # via # -r requirements/ci.txt # -r requirements/quality.txt @@ -368,13 +387,13 @@ typing-extensions==4.12.2 # django-stubs-ext # djangorestframework-stubs # mypy -urllib3==1.26.19 +urllib3==1.26.20 # via # -r requirements/ci.txt # -r requirements/quality.txt # elasticsearch # requests -virtualenv==20.26.3 +virtualenv==20.26.5 # via # -r requirements/ci.txt # -r requirements/quality.txt diff --git a/requirements/doc.txt b/requirements/doc.txt index 5f1b86cd..a7158c2f 100644 --- a/requirements/doc.txt +++ b/requirements/doc.txt @@ -20,13 +20,13 @@ backports-tarfile==1.2.0 # via jaraco-context beautifulsoup4==4.12.3 # via pydata-sphinx-theme -build==1.2.1 +build==1.2.2 # via -r requirements/doc.in cachetools==5.5.0 # via # -r requirements/test.txt # tox -certifi==2024.7.4 +certifi==2024.8.30 # via # -r requirements/test.txt # elasticsearch @@ -57,7 +57,7 @@ distlib==0.3.8 # via # -r requirements/test.txt # virtualenv -django==4.2.15 +django==4.2.16 # via # -c https://raw.githubusercontent.com/edx/edx-lint/master/edx_lint/files/common_constraints.txt # -r requirements/test.txt @@ -68,9 +68,9 @@ dnspython==2.6.1 # via # -r requirements/test.txt # pymongo -doc8==1.1.1 +doc8==1.1.2 # via -r requirements/doc.in -docutils==0.20.1 +docutils==0.21.2 # via # doc8 # pydata-sphinx-theme @@ -81,18 +81,20 @@ elasticsearch==7.13.4 # via # -c https://raw.githubusercontent.com/edx/edx-lint/master/edx_lint/files/common_constraints.txt # -r requirements/test.txt -filelock==3.15.4 +faker==28.4.1 + # via -r requirements/test.txt +filelock==3.16.1 # via # -r requirements/test.txt # tox # virtualenv -idna==3.7 +idna==3.10 # via # -r requirements/test.txt # requests imagesize==1.4.1 # via sphinx -importlib-metadata==8.2.0 +importlib-metadata==8.5.0 # via # keyring # twine @@ -102,7 +104,7 @@ iniconfig==2.0.0 # pytest jaraco-classes==3.4.0 # via keyring -jaraco-context==5.3.0 +jaraco-context==6.0.1 # via keyring jaraco-functools==4.0.2 # via keyring @@ -111,7 +113,7 @@ jinja2==3.1.4 # -r requirements/test.txt # code-annotations # sphinx -keyring==25.3.0 +keyring==25.4.0 # via twine markdown-it-py==3.0.0 # via rich @@ -121,15 +123,15 @@ markupsafe==2.1.5 # jinja2 mdurl==0.1.2 # via markdown-it-py -mongomock==4.1.2 +mongomock==4.2.0.post1 # via -r requirements/test.txt -more-itertools==10.4.0 +more-itertools==10.5.0 # via # jaraco-classes # jaraco-functools nh3==0.2.18 # via readme-renderer -openedx-atlas==0.6.1 +openedx-atlas==0.6.2 # via -r requirements/test.txt packaging==24.1 # via @@ -141,13 +143,13 @@ packaging==24.1 # pytest # sphinx # tox -pbr==6.0.0 +pbr==6.1.0 # via # -r requirements/test.txt # stevedore pkginfo==1.10.0 # via twine -platformdirs==4.2.2 +platformdirs==4.3.6 # via # -r requirements/test.txt # tox @@ -167,32 +169,40 @@ pygments==2.18.0 # readme-renderer # rich # sphinx -pymongo==4.8.0 +pymongo==4.9.1 # via -r requirements/test.txt -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via # -r requirements/test.txt # tox pyproject-hooks==1.1.0 # via build -pytest==8.3.2 +pytest==8.3.3 # via # -r requirements/test.txt # pytest-cov # pytest-django pytest-cov==5.0.0 # via -r requirements/test.txt -pytest-django==4.8.0 +pytest-django==4.9.0 # via -r requirements/test.txt +python-dateutil==2.9.0.post0 + # via + # -r requirements/test.txt + # faker python-slugify==8.0.4 # via # -r requirements/test.txt # code-annotations +pytz==2024.2 + # via + # -r requirements/test.txt + # mongomock pyyaml==6.0.2 # via # -r requirements/test.txt # code-annotations -readme-renderer==43.0 +readme-renderer==44.0 # via twine requests==2.32.3 # via @@ -206,12 +216,16 @@ restructuredtext-lint==1.4.0 # via doc8 rfc3986==2.0.0 # via twine -rich==13.7.1 +rich==13.8.1 # via twine sentinels==1.0.0 # via # -r requirements/test.txt # mongomock +six==1.16.0 + # via + # -r requirements/test.txt + # python-dateutil snowballstemmer==2.2.0 # via sphinx soupsieve==2.6 @@ -239,7 +253,7 @@ sqlparse==0.5.1 # via # -r requirements/test.txt # django -stevedore==5.2.0 +stevedore==5.3.0 # via # -r requirements/test.txt # code-annotations @@ -248,21 +262,21 @@ text-unidecode==1.3 # via # -r requirements/test.txt # python-slugify -tox==4.18.0 +tox==4.20.0 # via -r requirements/test.txt twine==5.1.1 # via -r requirements/doc.in typing-extensions==4.12.2 # via pydata-sphinx-theme -urllib3==1.26.19 +urllib3==1.26.20 # via # -r requirements/test.txt # elasticsearch # requests # twine -virtualenv==20.26.3 +virtualenv==20.26.5 # via # -r requirements/test.txt # tox -zipp==3.20.0 +zipp==3.20.2 # via importlib-metadata diff --git a/requirements/pip-tools.txt b/requirements/pip-tools.txt index fedf88df..84acdf9b 100644 --- a/requirements/pip-tools.txt +++ b/requirements/pip-tools.txt @@ -4,7 +4,7 @@ # # make upgrade # -build==1.2.1 +build==1.2.2 # via pip-tools click==8.1.7 # via pip-tools diff --git a/requirements/pip.txt b/requirements/pip.txt index 5f8b9c04..36c777e2 100644 --- a/requirements/pip.txt +++ b/requirements/pip.txt @@ -10,5 +10,5 @@ wheel==0.44.0 # The following packages are considered to be unsafe in a requirements file: pip==24.2 # via -r requirements/pip.in -setuptools==72.2.0 +setuptools==75.1.0 # via -r requirements/pip.in diff --git a/requirements/quality.txt b/requirements/quality.txt index ac624fa3..3d848d48 100644 --- a/requirements/quality.txt +++ b/requirements/quality.txt @@ -17,7 +17,7 @@ cachetools==5.5.0 # via # -r requirements/test.txt # tox -certifi==2024.7.4 +certifi==2024.8.30 # via # -r requirements/test.txt # elasticsearch @@ -56,7 +56,7 @@ distlib==0.3.8 # via # -r requirements/test.txt # virtualenv -django==4.2.15 +django==4.2.16 # via # -c https://raw.githubusercontent.com/edx/edx-lint/master/edx_lint/files/common_constraints.txt # -r requirements/test.txt @@ -69,24 +69,26 @@ django-stubs-ext==5.0.4 # via django-stubs djangorestframework==3.15.2 # via -r requirements/test.txt -djangorestframework-stubs==3.15.0 +djangorestframework-stubs==3.15.1 # via -r requirements/quality.in dnspython==2.6.1 # via # -r requirements/test.txt # pymongo -edx-lint==5.3.7 +edx-lint==5.4.0 # via -r requirements/quality.in elasticsearch==7.13.4 # via # -c https://raw.githubusercontent.com/edx/edx-lint/master/edx_lint/files/common_constraints.txt # -r requirements/test.txt -filelock==3.15.4 +faker==28.4.1 + # via -r requirements/test.txt +filelock==3.16.1 # via # -r requirements/test.txt # tox # virtualenv -idna==3.7 +idna==3.10 # via # -r requirements/test.txt # requests @@ -108,13 +110,13 @@ markupsafe==2.1.5 # jinja2 mccabe==0.7.0 # via pylint -mongomock==4.1.2 +mongomock==4.2.0.post1 # via -r requirements/test.txt -mypy==1.11.1 +mypy==1.11.2 # via -r requirements/quality.in mypy-extensions==1.0.0 # via mypy -openedx-atlas==0.6.1 +openedx-atlas==0.6.2 # via -r requirements/test.txt packaging==24.1 # via @@ -123,11 +125,11 @@ packaging==24.1 # pyproject-api # pytest # tox -pbr==6.0.0 +pbr==6.1.0 # via # -r requirements/test.txt # stevedore -platformdirs==4.2.2 +platformdirs==4.3.6 # via # -r requirements/test.txt # pylint @@ -142,7 +144,7 @@ pycodestyle==2.12.1 # via -r requirements/quality.in pydocstyle==6.3.0 # via -r requirements/quality.in -pylint==3.2.6 +pylint==3.2.7 # via # edx-lint # pylint-celery @@ -156,25 +158,33 @@ pylint-plugin-utils==0.8.2 # via # pylint-celery # pylint-django -pymongo==4.8.0 +pymongo==4.9.1 # via -r requirements/test.txt -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via # -r requirements/test.txt # tox -pytest==8.3.2 +pytest==8.3.3 # via # -r requirements/test.txt # pytest-cov # pytest-django pytest-cov==5.0.0 # via -r requirements/test.txt -pytest-django==4.8.0 +pytest-django==4.9.0 # via -r requirements/test.txt +python-dateutil==2.9.0.post0 + # via + # -r requirements/test.txt + # faker python-slugify==8.0.4 # via # -r requirements/test.txt # code-annotations +pytz==2024.2 + # via + # -r requirements/test.txt + # mongomock pyyaml==6.0.2 # via # -r requirements/test.txt @@ -188,14 +198,17 @@ sentinels==1.0.0 # -r requirements/test.txt # mongomock six==1.16.0 - # via edx-lint + # via + # -r requirements/test.txt + # edx-lint + # python-dateutil snowballstemmer==2.2.0 # via pydocstyle sqlparse==0.5.1 # via # -r requirements/test.txt # django -stevedore==5.2.0 +stevedore==5.3.0 # via # -r requirements/test.txt # code-annotations @@ -205,9 +218,9 @@ text-unidecode==1.3 # python-slugify tomlkit==0.13.2 # via pylint -tox==4.18.0 +tox==4.20.0 # via -r requirements/test.txt -types-pyyaml==6.0.12.20240808 +types-pyyaml==6.0.12.20240917 # via # django-stubs # djangorestframework-stubs @@ -223,12 +236,12 @@ typing-extensions==4.12.2 # django-stubs-ext # djangorestframework-stubs # mypy -urllib3==1.26.19 +urllib3==1.26.20 # via # -r requirements/test.txt # elasticsearch # requests -virtualenv==20.26.3 +virtualenv==20.26.5 # via # -r requirements/test.txt # tox diff --git a/requirements/test.in b/requirements/test.in index 9d0daaee..1fa5e073 100644 --- a/requirements/test.in +++ b/requirements/test.in @@ -8,3 +8,4 @@ pytest-django # pytest extension for better Django support code-annotations # provides commands used by the pii_check tox target. tox mongomock +Faker diff --git a/requirements/test.txt b/requirements/test.txt index 730b3203..dc369048 100644 --- a/requirements/test.txt +++ b/requirements/test.txt @@ -10,7 +10,7 @@ asgiref==3.8.1 # django cachetools==5.5.0 # via tox -certifi==2024.7.4 +certifi==2024.8.30 # via # -r requirements/base.txt # elasticsearch @@ -45,11 +45,13 @@ elasticsearch==7.13.4 # via # -c https://raw.githubusercontent.com/edx/edx-lint/master/edx_lint/files/common_constraints.txt # -r requirements/base.txt -filelock==3.15.4 +faker==28.4.1 + # via -r requirements/test.in +filelock==3.16.1 # via # tox # virtualenv -idna==3.7 +idna==3.10 # via # -r requirements/base.txt # requests @@ -59,9 +61,9 @@ jinja2==3.1.4 # via code-annotations markupsafe==2.1.5 # via jinja2 -mongomock==4.1.2 +mongomock==4.2.0.post1 # via -r requirements/test.in -openedx-atlas==0.6.1 +openedx-atlas==0.6.2 # via -r requirements/base.txt packaging==24.1 # via @@ -69,9 +71,9 @@ packaging==24.1 # pyproject-api # pytest # tox -pbr==6.0.0 +pbr==6.1.0 # via stevedore -platformdirs==4.2.2 +platformdirs==4.3.6 # via # tox # virtualenv @@ -79,40 +81,46 @@ pluggy==1.5.0 # via # pytest # tox -pymongo==4.8.0 +pymongo==4.9.1 # via -r requirements/base.txt -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via tox -pytest==8.3.2 +pytest==8.3.3 # via # pytest-cov # pytest-django pytest-cov==5.0.0 # via -r requirements/test.in -pytest-django==4.8.0 +pytest-django==4.9.0 # via -r requirements/test.in +python-dateutil==2.9.0.post0 + # via faker python-slugify==8.0.4 # via code-annotations +pytz==2024.2 + # via mongomock pyyaml==6.0.2 # via code-annotations requests==2.32.3 # via -r requirements/base.txt sentinels==1.0.0 # via mongomock +six==1.16.0 + # via python-dateutil sqlparse==0.5.1 # via # -r requirements/base.txt # django -stevedore==5.2.0 +stevedore==5.3.0 # via code-annotations text-unidecode==1.3 # via python-slugify -tox==4.18.0 +tox==4.20.0 # via -r requirements/test.in -urllib3==1.26.19 +urllib3==1.26.20 # via # -r requirements/base.txt # elasticsearch # requests -virtualenv==20.26.3 +virtualenv==20.26.5 # via tox diff --git a/test_utils/mock_es_backend.py b/test_utils/mock_es_backend.py new file mode 100644 index 00000000..82ce33e0 --- /dev/null +++ b/test_utils/mock_es_backend.py @@ -0,0 +1,58 @@ +""" +Mock Elasticsearch Backend. +""" + +from typing import Any +from forum.search.backend import ElasticsearchBackend + + +class MockElasticsearchBackend(ElasticsearchBackend): + """ + Mocked class for ElasticsearchBackend to return dummy values. + + Since we are using fixtures for the search API in tests, these methods + are overridden to provide mocked behavior without performing actual operations. + """ + + def rebuild_indices( + self, batch_size: int = 500, extra_catchup_minutes: int = 5 + ) -> None: + """Mock method for rebuilding Elasticsearch indices.""" + + def create_indices(self) -> list[str]: + """Mock method for creating Elasticsearch indices.""" + return [] + + def delete_index(self, name: str) -> None: + """Mock method for deleting an Elasticsearch index.""" + + def delete_unused_indices(self) -> int: + """Mock method for deleting unused Elasticsearch indices.""" + return 0 + + def move_alias( + self, alias_name: str, index_name: str, force_delete: bool = False + ) -> None: + """Mock method for moving Elasticsearch aliases.""" + + def refresh_indices(self) -> None: + """Mock method for refreshing Elasticsearch indices.""" + + def initialize_indices(self, force_new_index: bool = False) -> None: + """Mock method for initializing Elasticsearch indices.""" + + def validate_indices(self) -> None: + """Mock method for validating Elasticsearch indices.""" + + def update_document( + self, index_name: str, doc_id: str, update_data: dict[str, Any] + ) -> None: + """Mock method for updating a document in Elasticsearch.""" + + def delete_document(self, index_name: str, doc_id: str) -> None: + """Mock method for deleting a document from Elasticsearch.""" + + def index_document( + self, index_name: str, doc_id: str, document: dict[str, Any] + ) -> None: + """Mock method for indexing a document in Elasticsearch.""" diff --git a/tests/conftest.py b/tests/conftest.py index 969a4679..b5868614 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,15 +1,16 @@ -# conftest.py """ Init file for tests. """ -from typing import Any +from typing import Any, Generator +from unittest.mock import patch import mongomock import pytest from pymongo import MongoClient from test_utils.client import APIClient +from test_utils.mock_es_backend import MockElasticsearchBackend @pytest.fixture(autouse=True) @@ -26,3 +27,10 @@ def patch_default_mongo_database(monkeypatch: pytest.MonkeyPatch) -> None: def fixture_api_client() -> APIClient: """Create an API client for testing.""" return APIClient() + + +@pytest.fixture(autouse=True) +def mock_elasticsearch_backend() -> Generator[Any, Any, Any]: + """Mock the dummy elastic search.""" + with patch("forum.search.backend.ElasticsearchBackend", MockElasticsearchBackend): + yield diff --git a/tests/e2e/__init__.py b/tests/e2e/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/e2e/conftest.py b/tests/e2e/conftest.py new file mode 100644 index 00000000..63127219 --- /dev/null +++ b/tests/e2e/conftest.py @@ -0,0 +1,85 @@ +""" +Init file for tests. +""" + +import logging +import time + +import pytest +from pymongo.errors import ServerSelectionTimeoutError + +from forum.mongo import get_database +from forum.search.backend import ElasticsearchBackend +from test_utils.client import APIClient + +log = logging.getLogger(__name__) + +ES_TIMEOUT = 60 +MONGO_TIMEOUT = 60 +SLEEP_INTERVAL = 5 + + +@pytest.fixture(name="api_client") +def fixture_api_client() -> APIClient: + """Create an API client for testing.""" + return APIClient() + + +def wait_for_mongodb() -> None: + """Wait for MongoDB to start.""" + db = get_database() + timeout = ES_TIMEOUT + while timeout > 0: + try: + db.command("ping") + log.info("Connected to the MongoDB") + return + except ServerSelectionTimeoutError: + log.info("Waiting for mongodb to connect") + time.sleep(SLEEP_INTERVAL) + timeout -= SLEEP_INTERVAL + raise Exception("Elasticsearch did not start in time") + + +def wait_for_elasticsearch() -> None: + """Wait for ElasticSearch to start.""" + es = ElasticsearchBackend() + timeout = ES_TIMEOUT + while timeout > 0: + if es.client.ping(): + log.info("Connected to the Elastic Search") + return + log.info("Waiting for elasticsearch to connect") + time.sleep(SLEEP_INTERVAL) + timeout -= SLEEP_INTERVAL + raise Exception("Elasticsearch did not start in time") + + +@pytest.fixture(autouse=True) +def initialize_indices() -> None: + """Initialize Elasticsearch indices.""" + wait_for_elasticsearch() + es = ElasticsearchBackend() + es.client.indices.delete(index="*") + es.initialize_indices() + + +@pytest.fixture(autouse=True) +def mongo_cleanup() -> None: + """Cleanup MongoDB collections after each test.""" + wait_for_mongodb() + db = get_database() + + # Clean up collections after each test case + for collection_name in db.list_collection_names(): + db.drop_collection(collection_name) + + +@pytest.fixture(autouse=True) +def patch_default_mongo_database() -> None: + """Override the patch statement.""" + + +@pytest.fixture(autouse=True) +def mock_elasticsearch_backend() -> None: + """Overide teh mocked backend to use actual backend.""" diff --git a/tests/e2e/docker-compose.yml b/tests/e2e/docker-compose.yml new file mode 100644 index 00000000..feef7b55 --- /dev/null +++ b/tests/e2e/docker-compose.yml @@ -0,0 +1,17 @@ +# Docker Compose service declaration for end-to-end testing. This runs services without +# data persistence. Be careful about losing your data! +services: + mongodb: + # https://hub.docker.com/_/mongo/tags + image: docker.io/mongo:7.0.14 + ports: + - 127.0.0.1:27017:27017 + + elasticsearch: + # https://hub.docker.com/_/elasticsearch/tags + image: docker.io/elasticsearch:7.17.23 + ports: + - 127.0.0.1:5200:9200 + environment: + - discovery.type=single-node + - xpack.security.enabled=false diff --git a/tests/e2e/test_search.py b/tests/e2e/test_search.py new file mode 100644 index 00000000..e3e0a7c5 --- /dev/null +++ b/tests/e2e/test_search.py @@ -0,0 +1,657 @@ +""" +Test Search Thread API Endpoints +""" + +import time +from typing import Any, Optional +from urllib.parse import urlencode + +from requests import Response + +from forum.models import Comment, CommentThread, Users +from forum.models.model_utils import mark_as_read +from forum.search.backend import get_search_backend +from test_utils.client import APIClient + + +def perform_search_query(api_client: APIClient, params: dict[str, Any]) -> Response: + """Perform the search query""" + encoded_params = urlencode(params) + return api_client.get_json(f"/api/v2/search/threads?{encoded_params}", {}) + + +def assert_result_total(response: Response, expected_total: int) -> None: + """Assert that the total number of results matches the expected total.""" + assert response.status_code == 200 + result = response.json() + assert result["total_results"] == expected_total + + +def refresh_elastic_search_indices() -> None: + """Refresh Elasticsearch indices.""" + get_search_backend().refresh_indices() + + +def test_invalid_request(api_client: APIClient) -> None: + """ + Test that invalid requests to the search API return a 400 status. + + This test checks that invalid parameters in the search query string + result in a 400 Bad Request response. + """ + + user_id = "1" + course_id = "course-v1:Arbisoft+SE002+2024_S2" + + Users().insert(user_id, username="user1", email="email1") + comment_thread_id = CommentThread().insert( + title="title", + body="Hello World!", + pinned=False, + author_id=user_id, + course_id=course_id, + commentable_id="66b4e0440dead7001deb948b", + author_username="Faraz", + ) + Comment().insert( + body="Hello World!", + course_id=course_id, + comment_thread_id=comment_thread_id, + author_id="1", + author_username="Faraz", + ) + + refresh_elastic_search_indices() + + params = {"course_id": course_id} + response = perform_search_query(api_client, params) + assert response.status_code == 400 + + params = {"text": "foobar", "sort_key": "invalid"} + response = perform_search_query(api_client, params) + assert response.status_code == 400 + + +def test_search_returns_empty_for_deleted_thread(api_client: APIClient) -> None: + """ + Test that searching for a deleted thread returns no results. + + This test checks that after a thread is deleted, it no longer appears + in search results. + """ + + course_id = "course-v1:Arbisoft+SE002+2024_S2" + thread_id = CommentThread().insert( + title="title-1", + course_id=course_id, + body="body-1", + author_id="1", + author_username="test_user", + commentable_id="course", + ) + + CommentThread().delete(thread_id) + + refresh_elastic_search_indices() + + params = {"course_id": course_id, "text": "title-1", "sort_key": "date"} + response = perform_search_query(api_client, params) + + assert_result_total(response, 0) + + +def test_search_returns_only_updated_thread(api_client: APIClient) -> None: + """ + Test that searching for a thread returns only the updated version. + + This test checks that after a thread is updated, the search results reflect + the updated title and not the original one. + """ + + original_title = "title-original" + updated_title = "updated-title" + course_id = "course-v1:Arbisoft+SE002+2024_S2" + + thread_id = CommentThread().insert( + title=original_title, + course_id=course_id, + body="body-1", + author_id="1", + author_username="test_user", + commentable_id="course", + ) + CommentThread().update(thread_id=thread_id, title=updated_title) + + refresh_elastic_search_indices() + + params = {"course_id": course_id, "text": original_title} + + response = perform_search_query(api_client, params) + assert_result_total(response, 0) + + params = {"course_id": course_id, "text": updated_title} + response = perform_search_query(api_client, params) + assert_result_total(response, 1) + + +def test_search_returns_empty_for_deleted_comment(api_client: APIClient) -> None: + """ + Test that searching for a deleted comment returns no results. + + This test checks that after a comment is deleted, it no longer appears + in search results. + """ + + course_id = "course-v1:Arbisoft+SE002+2024_S2" + thread_id = CommentThread().insert( + title="thread-1", + course_id=course_id, + body="thread-body", + author_id="1", + author_username="test_user", + commentable_id="course", + ) + comment_id = Comment().insert( + body="comment-body", + course_id=course_id, + comment_thread_id=thread_id, + author_id="1", + ) + Comment().delete(comment_id) + + refresh_elastic_search_indices() + + params = {"course_id": course_id, "text": "comment-body", "sort_key": "date"} + response = perform_search_query(api_client, params) + + assert_result_total(response, 0) + + +def test_search_returns_only_updated_comment(api_client: APIClient) -> None: + """ + Test that searching for a comment returns only the updated version. + + This test checks that after a comment is updated, the search results reflect + the updated text and not the original one. + """ + + original_comment = "comment-original" + updated_comment = "comment-updated" + course_id = "course-v1:Arbisoft+SE002+2024_S2" + + thread_id = CommentThread().insert( + title="thread-1", + course_id=course_id, + body="thread-body", + author_id="1", + author_username="test_user", + commentable_id="course", + ) + comment_id = Comment().insert( + body=original_comment, + course_id=course_id, + comment_thread_id=thread_id, + author_id="1", + ) + + Comment().update(comment_id=comment_id, body=updated_comment) + refresh_elastic_search_indices() + + params = {"course_id": course_id, "text": original_comment} + response = perform_search_query(api_client, params) + assert_result_total(response, 0) + + params = {"course_id": course_id, "text": updated_comment} + response = perform_search_query(api_client, params) + assert_result_total(response, 1) + + +def create_threads_and_comments_for_filter_tests( + course_id_0: str, course_id_1: str +) -> tuple[list[str], dict[str, Any]]: + """ + Create a set of threads and comments for testing various filter conditions. + Returns a list of thread IDs and a dictionary mapping thread IDs to their associated comment IDs. + """ + threads_ids = [] + threads_comments: dict[str, Any] = {} + for i in range(35): + context = "standalone" if i > 29 else "course" + group_id = i % 5 + thread_id = CommentThread().insert( + title=f"title-{i}", + body="text", + author_id="1", + course_id=course_id_0 if i % 2 == 0 else course_id_1, + commentable_id=f"commentable{i % 3}", + context=context, + group_id=group_id, + ) + threads_ids.append(thread_id) + + if i < 2: + comment_id = Comment().insert( + body="objectionable", + course_id=course_id_0 if i % 2 == 0 else course_id_1, + comment_thread_id=thread_id, + author_id="1", + ) + Comment().update(comment_id=comment_id, abuse_flaggers=["1"]) + comment_ids = threads_comments.get(thread_id, []) + comment_ids.append(comment_id) + threads_comments[thread_id] = comment_ids + + if i in [0, 2, 4]: + CommentThread().update(thread_id=thread_id, thread_type="question") + comment_id = Comment().insert( + body="response", + course_id=course_id_0 if i % 2 == 0 else course_id_1, + comment_thread_id=thread_id, + author_id="1", + ) + comment_ids = threads_comments.get(thread_id, []) + comment_ids.append(comment_id) + threads_comments[thread_id] = comment_ids + + return threads_ids, threads_comments + + +# The test covers all the filters and making this modular leads to more complex structure. +# pylint: disable=too-many-statements +def test_filter_threads(api_client: APIClient) -> None: + """ + Test various filtering options for threads, including course_id, context, flagged, unanswered, group_id, + commentable_id, and combinations of these filters. Asserts that the correct threads are returned for each filter. + """ + course_id_0 = "course-v1:Arbisoft+SE002+2024_S2" + course_id_1 = "course-v1:Arbisoft+SE003+2024_S2" + + user_id = Users().insert("1", username="user1", email="example@test.com") + threads_ids, threads_comments = create_threads_and_comments_for_filter_tests( + course_id_0, course_id_1 + ) + + refresh_elastic_search_indices() + + # Test filtering by course_id + def assert_response_contains( + response: Response, expected_indexes: list[int] + ) -> None: + assert response.status_code == 200 + threads = response.json()["collection"] + expected_ids = {threads_ids[i] for i in expected_indexes} + actual_ids = {thread["id"] for thread in threads} + assert ( + actual_ids == expected_ids + ), f"Expected {expected_ids}, but got {actual_ids}" + + # Test filtering by course_id + params = {"text": "text", "course_id": course_id_0} + response = perform_search_query(api_client, params) + assert_response_contains(response, [i for i in range(30) if i % 2 == 0]) + + # # Test filtering by context + params = {"text": "text", "context": "standalone"} + response = perform_search_query(api_client, params) + assert_response_contains(response, list(range(30, 35))) + + # Test filtering with unread filter + user = Users().get(_id=user_id) or {} + thread = CommentThread().get(_id=threads_ids[0]) or {} + mark_as_read(user, thread) + params = { + "text": "text", + "course_id": course_id_0, + "user_id": user_id, + "unread": "True", + } + response = perform_search_query(api_client, params) + assert_response_contains(response, [i for i in range(1, 30) if i % 2 == 0]) + + # Test filtering with flagged filter + params = {"text": "text", "course_id": course_id_0, "flagged": "True"} + response = perform_search_query(api_client, params) + assert_response_contains(response, [0]) + + # Test filtering with unanswered filter + params = {"text": "text", "course_id": course_id_0, "unanswered": "True"} + response = perform_search_query(api_client, params) + assert_response_contains(response, [0, 2, 4]) + + # Test filtering with unanswered filter and group_id + params = { + "text": "text", + "course_id": course_id_0, + "unanswered": "True", + "group_id": "2", + } + response = perform_search_query(api_client, params) + assert_response_contains(response, [0, 2]) + + params = { + "text": "text", + "course_id": course_id_0, + "unanswered": "True", + "group_id": "4", + } + response = perform_search_query(api_client, params) + assert_response_contains(response, [0, 4]) + + comment = threads_comments[threads_ids[4]][0] + Comment().update(comment_id=comment, endorsed=True) + refresh_elastic_search_indices() + + response = perform_search_query(api_client, params) + assert_response_contains(response, [0]) + + # Test filtering by commentable_id + params = {"text": "text", "commentable_id": "commentable0"} + response = perform_search_query(api_client, params) + assert_response_contains(response, [i for i in range(30) if i % 3 == 0]) + + # Test filtering by commentable_ids + params = {"text": "text", "commentable_ids": "commentable0,commentable1"} + response = perform_search_query(api_client, params) + assert_response_contains(response, [i for i in range(30) if i % 3 in [0, 1]]) + + # Test filtering by group_id + params = {"text": "text", "group_id": "1"} + response = perform_search_query(api_client, params) + assert_response_contains(response, [i for i in range(30) if i % 5 in [0, 1]]) + + # Test filtering by group_ids + params = {"text": "text", "group_ids": "1,2"} + response = perform_search_query(api_client, params) + assert_response_contains(response, [i for i in range(30) if i % 5 in [0, 1, 2]]) + + # Test filtering by all filters combined + params = { + "text": "text", + "course_id": course_id_0, + "commentable_id": "commentable0", + "group_id": "1", + } + response = perform_search_query(api_client, params) + assert_response_contains(response, [0, 6]) + + +def test_pagination(api_client: APIClient) -> None: + """ + Test pagination of search results. Ensures that results are correctly paginated and that the order of + threads is as expected across different pages. + """ + course_id = "course-v1:Arbisoft+SE002+2024_S2" + + threads_ids = [] + for i in range(50): + thread_id = CommentThread().insert( + title=f"title-{i}", + body="text", + author_id="1", + course_id=course_id, + commentable_id="dummy", + ) + threads_ids.append(thread_id) + # Add a slight delay to ensure created_date is different + time.sleep(0.001) + + refresh_elastic_search_indices() + + def check_pagination(per_page: Optional[int], num_pages: int) -> None: + result_ids = [] + params = {"text": "text"} + if per_page: + params["per_page"] = str(per_page) + + for i in range(1, num_pages + 2): + params["page"] = str(i) + response = perform_search_query(api_client, params) + assert response.status_code == 200 + result = response.json() + result_ids.extend([r["id"] for r in result["collection"]]) + + expected_ids = threads_ids[::-1] + assert result_ids == expected_ids + + check_pagination(1, 50) + check_pagination(30, 2) + check_pagination(None, 3) + + +def test_sorting(api_client: APIClient) -> None: + """ + Test the sorting functionality for threads based on various criteria, such as date, activity, votes, and comments. + Asserts that the threads are sorted correctly according to the specified sorting key. + """ + course_id = "course-v1:Arbisoft+SE002+2024_S2" + + # Create and save threads + threads_ids = [] + for i in range(6): + thread = CommentThread().insert( + title=f"title-{i}", + body="text", + author_id="1", + course_id=course_id, + commentable_id="dummy", + ) + threads_ids.append(thread) + # Add a slight delay to ensure created_date is different + time.sleep(0.001) + + # Update specific threads to simulate activity, votes, and comments + votes = CommentThread().get_votes_dict(up=["1"], down=[]) + CommentThread().update(thread_id=threads_ids[1], votes=votes) + CommentThread().update(thread_id=threads_ids[2], votes=votes) + CommentThread().update(thread_id=threads_ids[1], comments_count=5) + CommentThread().update(thread_id=threads_ids[3], comments_count=5) + + refresh_elastic_search_indices() + + def fetch_and_check(sort_key: Optional[str], expected_indexes: list[int]) -> None: + params = {"text": "text"} + if sort_key: + params["sort_key"] = str(sort_key) + + response = perform_search_query(api_client, params) + assert_result_total(response, 6) + result = response.json() + threads = result["collection"] + expected_ids = [threads_ids[i] for i in expected_indexes] + actual_ids = [thread["id"] for thread in threads] + assert ( + actual_ids == expected_ids + ), f"Expected {expected_ids}, but got {actual_ids}" + + # Test various sorting scenarios + fetch_and_check("date", [5, 4, 3, 2, 1, 0]) + fetch_and_check("activity", [5, 4, 3, 2, 1, 0]) + fetch_and_check("votes", [2, 1, 5, 4, 3, 0]) + fetch_and_check("comments", [3, 1, 5, 4, 2, 0]) + fetch_and_check(None, [5, 4, 3, 2, 1, 0]) # Default sorting by date + + +def test_spelling_correction(api_client: APIClient) -> None: + """ + Test the spelling correction feature in search. + Verifies that misspelled words in both thread titles and comment bodies are correct + """ + commentable_id = "test_commentable" + thread_title = "a thread about green artichokes" + comment_body = "a comment about greed pineapples" + + thread_id = CommentThread().insert( + title=thread_title, + body="", + author_id="1", + course_id="course_id", + commentable_id=commentable_id, + ) + + Comment().insert( + body=comment_body, + course_id="course_id", + comment_thread_id=thread_id, + author_id="1", + ) + refresh_elastic_search_indices() + + def check_correction(original_text: str, corrected_text: Optional[str]) -> None: + params = {"text": original_text} + response = perform_search_query(api_client, params) + assert response.status_code == 200 + result = response.json() + assert ( + result.get("corrected_text") == corrected_text + ), f"Expected '{corrected_text}', but got '{result.get('corrected_text')}'" + assert result[ + "collection" + ], f"Expected non-empty collection for '{original_text}', but got empty." + + # Test: can correct a word appearing only in a comment + check_correction("pinapples", "pineapples") + + # Test: can correct a word appearing only in a thread + check_correction("arichokes", "artichokes") + + # Test: can correct a word appearing in both a comment and a thread + check_correction("abot", "about") + + # Test: can correct a word with multiple errors + check_correction("artcokes", "artichokes") + + # Test: can correct misspellings in different terms in the same search + check_correction("comment abot pinapples", "comment about pineapples") + + # Test: does not correct a word that appears in a thread but has a correction and no matches in comments + check_correction("green", None) + + # Test: does not correct a word that appears in a comment but has a correction and no matches in threads + check_correction("greed", None) + + +def test_spelling_correction_with_mush_clause(api_client: APIClient) -> None: + """ + Test the spelling correction feature & mush clause in the search. + Verifies the even if the text matches with the threds it should also consider other + params in the search i.e course_id + """ + course_id = "course_id" + + # Add documents containing a word that is close to our search term + # but that do not match our filter criteria; because we currently only + # consider the top suggestion returned by Elasticsearch without regard + # to the filter, and that suggestion in this case does not match any + # results, we should get back no results and no correction. + for _ in range(10): + CommentThread().insert( + title="abbot", + body="text", + author_id="1", + course_id="other_course_id", + commentable_id="other_commentable_id", + ) + refresh_elastic_search_indices() + + params = {"text": "abot", "course_id": course_id} + response = perform_search_query(api_client, params) + assert response.status_code == 200 + result = response.json() + corrected_text = result.get("corrected_text") + assert ( + corrected_text is None + ), f"Expected 'corrected_text' to be None, but got a value '{corrected_text}'." + assert not result["collection"], "Expected an empty collection, but got results." + + +def test_total_results_and_num_pages(api_client: APIClient) -> None: + """ + Test the total number of results and pagination of search results. + Ensures that the total count of search results and the number of pages are calculated + correctly based on varying text patterns in threads. + """ + course_id = "test/course/id" + + threads_ids = [] + + # Creating 100 comments with varying text patterns + for i in range(1, 101): + text = "all" + if i % 2 == 0: + text += " half" + if i % 4 == 0: + text += " quarter" + if i % 10 == 0: + text += " tenth" + if i == 100: + text += " one" + + # Create the comment + thread_id = CommentThread().insert( + title=f"title-{i}", + body=text, + course_id=course_id, + author_id="1", + commentable_id="course", + ) + threads_ids.append(thread_id) + + # Refresh Elasticsearch indices to ensure all comments are searchable + refresh_elastic_search_indices() + + def test_text( + text: str, expected_total_results: int, expected_num_pages: int + ) -> None: + params = {"course_id": course_id, "text": text, "per_page": "10"} + response = perform_search_query(api_client, params) + assert response.status_code == 200 + result = response.json() + assert ( + result["total_results"] == expected_total_results + ), f"Expected total_results {expected_total_results}, but got {result['total_results']}" + assert ( + result["num_pages"] == expected_num_pages + ), f"Expected num_pages {expected_num_pages}, but got {result['num_pages']}" + + # Running the tests + test_text("all", 100, 10) + test_text("half", 50, 5) + test_text("quarter", 25, 3) + test_text("tenth", 10, 1) + test_text("one", 1, 1) + + +def test_unicode_data(api_client: APIClient) -> None: + """ + Test the handling of Unicode characters in search queries. Verifies that threads containing Unicode characters + are searchable and return correct results when queried with ASCII search terms. + """ + text = "␎ⶀⅰ⑀⍈┣♲⺝" + search_term = "artichoke" + + # Create a comment thread and a comment containing the specified text + thread_id = CommentThread().insert( + title="A thread title", + body=f"{search_term} {text}", + author_id="1", + course_id="course-v1:Arbisoft+SE002+2024_S2", + commentable_id="course", + ) + Comment().insert( + body=text, + course_id="course-v1:Arbisoft+SE002+2024_S2", + comment_thread_id=thread_id, + author_id="1", + ) + + # Refresh Elasticsearch indices to make the new data searchable + refresh_elastic_search_indices() + + # Perform the search with the ASCII term + params = {"course_id": "course-v1:Arbisoft+SE002+2024_S2", "text": search_term} + response = perform_search_query(api_client, params) + + # Check that the response is OK and that exactly one result is returned + assert response.status_code == 200 + json = response.json()["collection"] + assert len(json) == 1, f"Expected 1 result, but got {len(json)}" diff --git a/tests/e2e/test_users.py b/tests/e2e/test_users.py new file mode 100644 index 00000000..bf615f00 --- /dev/null +++ b/tests/e2e/test_users.py @@ -0,0 +1,678 @@ +""" +E2E testcases. +""" + +import random +import time +from abc import ABCMeta +from typing import Any, Optional + +import pytest +from faker import Faker + +from forum.models import Comment, CommentThread, Users +from forum.models.model_utils import build_course_stats +from test_utils.client import APIClient + +fake = Faker() + + +def setup_10_threads(author_id: str, author_username: str) -> list[str]: + """Create 10 threads for a user.""" + ids = [] + for thread in range(10): + thread_id = CommentThread().insert( + title=f"Test Thread {thread}", + body="This is a test thread", + course_id="course1", + commentable_id="commentable1", + author_id=author_id, + author_username=author_username, + ) + Comment().insert( + body="This is a test comment", + course_id="course1", + author_id=author_id, + comment_thread_id=str(thread_id), + author_username=author_username, + ) + ids.append(thread_id) + return ids + + +def add_flags( + model: ABCMeta, + content_data: Optional[dict[str, Any]], + expected_data: dict[str, Any], +) -> None: + """Add abuse flags to the content and update expected data.""" + if not content_data: + return + + abuse_flaggers = list(range(1, random.randint(0, 3))) + historical_abuse_flaggers = list(range(1, random.randint(0, 2))) + + model().update( + str(content_data["_id"]), + abuse_flaggers=abuse_flaggers, + historical_abuse_flaggers=historical_abuse_flaggers, + ) + + expected_data[content_data["author_id"]]["active_flags"] += ( + 1 if abuse_flaggers else 0 + ) + expected_data[content_data["author_id"]]["inactive_flags"] += ( + 1 if historical_abuse_flaggers else 0 + ) + + +def build_structure_and_response( + course_id: str, + authors: list[dict[str, Any]], + build_initial_stats: bool = True, + with_timestamps: bool = False, +) -> dict[str, dict[str, Any]]: + """Build the content structure and expected response.""" + + assert authors is not None + assert not any(not item for item in authors) + + expected_data: dict[str, dict[str, Any]] = { + author["external_id"]: { + "username": author["username"], + "active_flags": 0, + "inactive_flags": 0, + "threads": 0, + "responses": 0, + "replies": 0, + } + for author in authors + } + + for _ in range(10): + thread_author = random.choice(authors) + expected_data[thread_author["external_id"]]["threads"] += 1 + if with_timestamps: + expected_data[thread_author["external_id"]]["last_activity_at"] = ( + time.strftime("%Y-%m-%dT%H:%M:%SZ") + ) + thread_id = CommentThread().insert( + title=fake.word(), + body=fake.sentence(), + course_id=course_id, + commentable_id="course", + author_id=thread_author["external_id"], + ) + thread = CommentThread().get(thread_id) or {} + + add_flags(CommentThread, thread, expected_data) + + for _ in range(5): + comment_author = random.choice(authors) + expected_data[comment_author["external_id"]]["responses"] += 1 + if with_timestamps: + expected_data[comment_author["external_id"]]["last_activity_at"] = ( + time.strftime("%Y-%m-%dT%H:%M:%SZ") + ) + comment_id = Comment().insert( + body=fake.sentence(), + course_id=course_id, + author_id=comment_author["external_id"], + comment_thread_id=thread_id, + ) + comment = Comment().get(comment_id) or {} + + add_flags(Comment, comment, expected_data) + + for _ in range(2): + reply_author = random.choice(authors) + expected_data[reply_author["external_id"]]["replies"] += 1 + if with_timestamps: + expected_data[reply_author["external_id"]]["last_activity_at"] = ( + time.strftime("%Y-%m-%dT%H:%M:%SZ") + ) + + reply_id = Comment().insert( + body=fake.sentence(), + course_id=course_id, + author_id=reply_author["external_id"], + parent_id=str(comment["_id"]), + comment_thread_id=thread_id, + ) + reply = Comment().get(reply_id) or {} + + add_flags(Comment, reply, expected_data) + + if build_initial_stats: + for author in authors: + build_course_stats(author["_id"], course_id) + + return expected_data + + +@pytest.mark.parametrize("sort_key", [None, "recency", "flagged"]) +def test_get_user_stats(api_client: Any, sort_key: Optional[str]) -> None: + """Test retrieving user stats with various sorting options.""" + course_id = fake.word() + authors_ids = [ + Users().insert(external_id=f"{i}", username=f"author-{i}") for i in range(1, 7) + ] + authors = [Users().get(author_id) or {} for author_id in authors_ids] + + build_structure_and_response(course_id, authors) + + params = {"sort_key": sort_key, "with_timestamps": "true"} + response = api_client.get_json(f"/api/v2/users/{course_id}/stats", params) + assert response.status_code == 200 + + res_data = response.json()["user_stats"] + + if sort_key == "recency": + expected_order = sorted( + res_data, key=lambda x: (x["last_activity_at"], x["username"]), reverse=True + ) + elif sort_key == "flagged": + expected_order = sorted( + res_data, + key=lambda x: (x["active_flags"], x["inactive_flags"], x["username"]), + reverse=True, + ) + else: + expected_order = sorted( + res_data, + key=lambda x: (x["threads"], x["responses"], x["replies"], x["username"]), + reverse=True, + ) + + assert res_data == expected_order + + +def test_stats_for_user_with_no_activity(api_client: Any) -> None: + """Test handling stats for user with no activity.""" + invalid_course_id = "course-v1:edX+DNE+Not_EXISTS" + + response = api_client.get_json( + f"/api/v2/users/{invalid_course_id}/stats", params={} + ) + assert response.status_code == 200 + + res_data = response.json()["user_stats"] + assert res_data == [] + + +def test_user_stats_filtered_by_user(api_client: Any) -> None: + """Test returning user stats filtered by usernames with default/activity sort.""" + course_id = fake.word() + + # Create some users + authors_ids = [ + Users().insert(external_id=f"{i}", username=f"userauthor-{i}") + for i in range(1, 4) + ] + authors = [Users().get(author_id) or {} for author_id in authors_ids] + + # Build structure and response + full_data = build_structure_and_response(course_id, authors) + + # Randomly sample and shuffle usernames + usernames = random.sample([f"userauthor-{i}" for i in range(1, 4)], 2) + + usernames_str = ",".join(usernames) + + # Get user stats filtered by usernames + response = api_client.get_json( + f"/api/v2/users/{course_id}/stats?usernames={usernames_str}", params={} + ) + assert response.status_code == 200 + + res_data = response.json()["user_stats"] + + # Sort the map entries using the usernames order + expected_result = sorted( + [data for data in full_data.values() if data["username"] in usernames], + key=lambda x: usernames.index(x["username"]), + ) + + assert res_data == expected_result + + +def test_user_stats_with_recency_sort(api_client: APIClient) -> None: + """Test returning user stats with recency sort.""" + course_id = fake.word() + # Create some users + authors_ids = [ + Users().insert(external_id=f"author-{i}", username=f"userauthor-{i}") + for i in range(1, 6) + ] + authors = [Users().get(author_id) or {} for author_id in authors_ids] + + # Build structure with timestamps + build_structure_and_response(course_id, authors, with_timestamps=True) + + # Get user stats sorted by recency + response = api_client.get_json( + f"/api/v2/users/{course_id}/stats", + params={"sort_key": "recency", "with_timestamps": "true"}, + ) + assert response.status_code == 200 + + res_data = response.json()["user_stats"] + + # Sort by last_activity_at and username in reverse order + sorted_order = sorted( + res_data, key=lambda x: (x["last_activity_at"], x["username"]), reverse=True + ) + + assert res_data == sorted_order + + +@pytest.fixture(name="original_stats") +def get_original_stats(api_client: APIClient) -> tuple[dict[str, Any], str, str]: + """Setup the initial data structure and save stats.""" + course_id = fake.word() + authors_ids = [ + Users().insert(external_id=f"{i}", username=f"userauthor-{i}") + for i in range(1, 4) + ] + authors = [Users().get(author_id) or {} for author_id in authors_ids] + + build_structure_and_response(course_id, authors) + + response = api_client.get_json(f"/api/v2/users/{course_id}/stats", params={}) + assert response.status_code == 200 + + res_data = response.json()["user_stats"] + + # Save original stats for the first entry + org_stats = res_data[0] + org_username = org_stats["username"] + + return org_stats, org_username, course_id + + +def get_new_stats( + api_client: APIClient, + course_id: str, + original_username: str, +) -> Optional[dict[str, Any]]: + """Fetch the new stats after performing actions.""" + response = api_client.get_json(f"/api/v2/users/{course_id}/stats", params={}) + assert response.status_code == 200 + + res_data = response.json()["user_stats"] + return next( + (stat for stat in res_data if stat["username"] == original_username), None + ) + + +def test_handles_deleting_threads( + api_client: APIClient, + original_stats: tuple[dict[str, Any], str, str], +) -> None: + """Test handling deleting threads.""" + stats, username, course_id = original_stats + + thread = CommentThread().find_one( + {"author_username": username, "course_id": course_id} + ) + assert thread is not None + + response = api_client.delete_json(f"/api/v2/threads/{str(thread['_id'])}") + assert response.status_code == 200 + + new_stats = get_new_stats(api_client, course_id, username) + + assert new_stats is not None + assert new_stats["threads"] == stats["threads"] - 1 + assert new_stats["responses"] <= stats["responses"] + assert new_stats["replies"] <= stats["replies"] + + +def test_handles_updating_threads( + api_client: APIClient, + original_stats: tuple[dict[str, Any], str, str], +) -> None: + """Test handling updating threads.""" + stats, username, course_id = original_stats + + thread = CommentThread().find_one( + {"author_username": username, "course_id": course_id} + ) + assert thread is not None + + response = api_client.put_json( + f"/api/v2/threads/{thread['_id']}", + data={ + "body": "new body", + "title": "new title", + "commentable_id": "new_commentable_id", + "thread_type": "question", + "user_id": 1, + }, + ) + assert response.status_code == 200 + + new_stats = get_new_stats(api_client, course_id, username) + + assert new_stats is not None + assert new_stats["threads"] == stats["threads"] + assert new_stats["responses"] == stats["responses"] + assert new_stats["replies"] == stats["replies"] + + +def test_handles_adding_threads( + api_client: APIClient, + original_stats: tuple[dict[str, Any], str, str], +) -> None: + """Test handling adding threads.""" + stats, username, course_id = original_stats + + response = api_client.post_json( + "/api/v2/course/threads", + data={ + "title": "new thread", + "body": "new thread", + "course_id": course_id, + "user_id": username.replace("userauthor-", ""), + }, + ) + assert response.status_code == 200 + + new_stats = get_new_stats(api_client, course_id, username) + + assert new_stats is not None + assert new_stats["threads"] == stats["threads"] + 1 + assert new_stats["responses"] == stats["responses"] + assert new_stats["replies"] == stats["replies"] + + +def test_handles_deleting_responses( + api_client: APIClient, original_stats: tuple[dict[str, Any], str, str] +) -> None: + """Test handling deleting responses.""" + stats, username, course_id = original_stats + + comment = Comment().find_one( + { + "author_username": username, + "course_id": course_id, + "parent_id": None, + } + ) + assert comment is not None + + response = api_client.delete_json(f"/api/v2/comments/{str(comment['_id'])}") + assert response.status_code == 200 + + new_stats = get_new_stats(api_client, course_id, username) + + assert new_stats is not None + assert new_stats["threads"] == stats["threads"] + assert new_stats["responses"] == stats["responses"] - 1 + assert new_stats["replies"] <= stats["replies"] + + +def test_handles_updating_responses( + api_client: APIClient, + original_stats: tuple[dict[str, Any], str, str], +) -> None: + """Test handling updating responses.""" + stats, username, course_id = original_stats + + comment = Comment().find_one( + { + "author_username": username, + "course_id": course_id, + "parent_id": None, + } + ) + assert comment is not None + + response = api_client.put_json( + f"/api/v2/comments/{comment['_id']}", + data={"body": "new body", "user_id": "1"}, + ) + assert response.status_code == 200 + + new_stats = get_new_stats(api_client, course_id, username) + + assert new_stats is not None + assert new_stats["threads"] == stats["threads"] + assert new_stats["responses"] == stats["responses"] + assert new_stats["replies"] == stats["replies"] + + +def test_handles_deleting_replies( + api_client: APIClient, + original_stats: tuple[dict[str, Any], str, str], +) -> None: + """Test handling deleting replies.""" + stats, username, course_id = original_stats + + # Find a reply (comment with a parent_id) + reply = Comment().find_one( + { + "author_username": username, + "course_id": course_id, + "parent_id": {"$ne": None}, + } + ) + + assert reply is not None + + # Delete the reply + response = api_client.delete_json(f"/api/v2/comments/{str(reply['_id'])}") + assert response.status_code == 200 + # Fetch new stats + new_stats = get_new_stats(api_client, course_id, username) + + # Thread count should stay the same + assert new_stats is not None + assert new_stats["threads"] == stats["threads"] + assert new_stats["responses"] == stats["responses"] + assert new_stats["replies"] == stats["replies"] - 1 + + +def test_handles_removing_flags( + api_client: APIClient, + original_stats: tuple[dict[str, Any], str, str], +) -> None: + """Test handling removing abuse flags.""" + stats, username, course_id = original_stats + + # Find a comment with existing abuse flaggers + comment = Comment().find_one( + { + "author_username": username, + "course_id": course_id, + "abuse_flaggers": {"$ne": []}, + } + ) + assert comment is not None + + # Set abuse flaggers to two users + Comment().update(str(comment["_id"]), abuse_flaggers=["1", "2"]) + + # Remove the flag for the first user + response = api_client.put_json( + f"/api/v2/comments/{str(comment['_id'])}/abuse_unflag", + data={"user_id": "1"}, + ) + assert response.status_code == 200 + + # Fetch new stats, the active flags should stay the same (still one flagger left) + new_stats = get_new_stats(api_client, course_id, username) + + assert new_stats is not None + assert new_stats["active_flags"] == stats["active_flags"] + + # Remove the flag for the second user + response = api_client.put_json( + f"/api/v2/comments/{str(comment['_id'])}/abuse_unflag", + data={"user_id": "2"}, + ) + assert response.status_code == 200 + + # Fetch stats again, now the active flags should reduce by one + response = api_client.get_json(f"/api/v2/users/{course_id}/stats", params={}) + assert response.status_code == 200 + res_data = response.json()["user_stats"] + new_stats = next(stats for stats in res_data if stats["username"] == username) + + assert new_stats is not None + assert new_stats["active_flags"] == stats["active_flags"] - 1 + + +def test_build_course_stats_with_anonymous_posts(api_client: APIClient) -> None: + """Test that anonymous posts are not included in user stats after a non-anonymous post.""" + + # Create a test user + user_id = Users().insert(external_id="3", username="user3") + course_id = "course-1" + + threads_ids = [] + + # Create threads + for i in range(len(range(3))): + response = api_client.post_json( + "/api/v2/course/threads", + data={ + "title": f"thread_{i}", + "body": f"thread {i} by author", + "course_id": course_id, + "user_id": user_id, + "anonymous_to_peers": "true" if i == 0 else "false", + "anonymous": "true" if i == 1 else "false", + }, + ) + assert response.status_code == 200 + threads_ids.append(response.json()["id"]) + + # Fetch the user stats + response = api_client.get_json(f"/api/v2/users/{course_id}/stats", {}) + assert response.status_code == 200 + + # Parse response data + stats = response.json() + + # Assert that only the non-anonymous post is included in stats + assert stats["user_stats"][0]["replies"] == 0 + assert stats["user_stats"][0]["responses"] == 0 + assert stats["user_stats"][0]["threads"] == 1 + + +def test_update_user_stats(api_client: APIClient) -> None: + """Test that user stats are updated when requested.""" + # Create a test course ID and users + course_id = fake.word() + authors_ids = [ + Users().insert(external_id=f"author-{i}", username=f"author-{i}") + for i in range(1, 7) + ] + authors = [Users().get(author_id) or {} for author_id in authors_ids] + # Build the expected data without initial stats + expected_data = build_structure_and_response( + course_id, authors, build_initial_stats=False + ) + + # Sort the data for expected result (threads, responses, replies) + expected_result = sorted( + expected_data.values(), + key=lambda val: (val["threads"], val["responses"], val["replies"]), + reverse=True, + ) + + # Fetch user stats (before updating) + response = api_client.get_json(f"/api/v2/users/{course_id}/stats", {}) + assert response.status_code == 200 + res = response.json() + assert res["user_stats"] != expected_result # User stats should not be updated yet + + # Request to update user stats + response = api_client.post_json(f"/api/v2/users/{course_id}/update_stats", {}) + assert response.status_code == 200 + res = response.json() + assert res["user_count"] == 6 # Confirm all 6 users are counted + + # Fetch user stats (after updating) + response = api_client.get_json(f"/api/v2/users/{course_id}/stats", {}) + assert response.status_code == 200 + res = response.json() + + assert ( + res["user_stats"] == expected_result + ) # User stats should now match the expected data + + +def test_mark_thread_as_read(api_client: APIClient) -> None: + """Test that a thread is marked as read for the user.""" + user_id = "1" + username = "user1" + Users().insert(external_id=user_id, username=username) + + # Setup 10 threads for testing + threads_ids = setup_10_threads(user_id, username) + thread = CommentThread().get(threads_ids[0]) or {} + + # Create a test user + user_id = Users().insert(external_id="42", username="user-42") + + # Mark the first thread as read + response = api_client.post_json( + f"/api/v2/users/{user_id}/read", + data={"source_type": "thread", "source_id": str(thread["_id"])}, + ) + assert response.status_code == 200 + + # Reload the user and verify read state + user = Users().get(user_id) or {} + read_states = [ + course_state + for course_state in user["read_states"] + if course_state["course_id"] == thread["course_id"] + ] + read_date = read_states[0]["last_read_times"][str(thread["_id"])] + + assert ( + read_date >= thread["updated_at"] + ) # Verify the read date is on or after the thread's updated_at + + +def test_retire_user_inactive(api_client: APIClient) -> None: + """Test retiring an inactive user.""" + + user_id = Users().insert(external_id="1", username="user1") + user = Users().get(user_id) or {} + + # Verify user is not subscribed to any threads + response = api_client.get_json( + f"/api/v2/users/{user['external_id']}/subscribed_threads", + params={"course_id": "1"}, + ) + assert response.status_code == 200 + assert response.json()["thread_count"] == 0 + + response = api_client.get_json( + f"/api/v2/users/{user['external_id']}/subscribed_threads", + params={"course_id": "2"}, + ) + assert response.status_code == 200 + assert response.json()["thread_count"] == 0 + + # Retire the user + retired_username = "retired_username_ABCD1234" + response = api_client.post_json( + f"/api/v2/users/{user['external_id']}/retire", + data={"retired_username": retired_username}, + ) + assert response.status_code == 200 + + user = Users().get(user_id) or {} + assert user["username"] == retired_username + assert user["email"] == "" + + # Check user's comments are blanked + comments = list(Comment().find({"author_username": retired_username})) + list( + CommentThread().find({"author_username": retired_username}) + ) + assert len(comments) == 0 diff --git a/tests/test_views/test_search.py b/tests/test_views/test_search.py index f22942ec..ed24b2a6 100644 --- a/tests/test_views/test_search.py +++ b/tests/test_views/test_search.py @@ -20,30 +20,19 @@ """ import time -import urllib.parse from typing import Any, Optional from unittest.mock import patch +from urllib.parse import urlencode -import pytest -from django.conf import settings from requests import Response from forum.models import Comment, CommentThread, Users from forum.models.model_utils import mark_as_read -from forum.search.backend import ElasticsearchBackend +from forum.search.backend import get_search_backend from forum.search.comment_search import ThreadSearch from test_utils.client import APIClient -@pytest.fixture(autouse=True) -def initialize_indices() -> None: - """Initialize Elasticsearch indices if Elasticsearch is enabled.""" - if settings.FORUM_ENABLE_ELASTIC_SEARCH: - es = ElasticsearchBackend() - es.client.indices.delete(index="*") - es.initialize_indices() - - def assert_result_total(response: Response, expected_total: int) -> None: """Assert that the total number of results matches the expected total.""" assert response.status_code == 200 @@ -53,7 +42,7 @@ def assert_result_total(response: Response, expected_total: int) -> None: def get_search_response( api_client: APIClient, - query_string: str, + params: dict[str, str], get_thread_ids_value: Optional[list[str]] = None, get_suggested_text_value: Optional[str] = "", get_therad_ids_with_corrected_text_values: Optional[list[str]] = None, @@ -62,7 +51,7 @@ def get_search_response( Helper function to patch ThreadSearch methods and get search response. :param api_client: The API client used to make the request. - :param query_string: The query string for the search. + :param params: The query dict for the search. :param get_thread_ids_value: Mocked return value for get_thread_ids. :param get_suggested_text_value: Mocked return value for get_suggested_text. :param get_therad_ids_with_corrected_text_values: Mocked return value for get_thread_ids_with_corrected_text. @@ -74,31 +63,28 @@ def get_search_response( get_therad_ids_with_corrected_text_values or [] ) - if not settings.FORUM_ENABLE_ELASTIC_SEARCH: + with patch.object( + ThreadSearch, "get_thread_ids", return_value=get_thread_ids_value + ): with patch.object( - ThreadSearch, "get_thread_ids", return_value=get_thread_ids_value + ThreadSearch, + "get_suggested_text", + return_value=get_suggested_text_value, ): with patch.object( ThreadSearch, - "get_suggested_text", - return_value=get_suggested_text_value, + "get_thread_ids_with_corrected_text", + return_value=get_therad_ids_with_corrected_text_values, ): - with patch.object( - ThreadSearch, - "get_thread_ids_with_corrected_text", - return_value=get_therad_ids_with_corrected_text_values, - ): - return api_client.get_json( - f"/api/v2/search/threads?{query_string}", {} - ) - - return api_client.get_json(f"/api/v2/search/threads?{query_string}", {}) + encoded_params = urlencode(params) + return api_client.get_json( + f"/api/v2/search/threads?{encoded_params}", {} + ) def refresh_elastic_search_indices() -> None: """Refresh Elasticsearch indices.""" - if settings.FORUM_ENABLE_ELASTIC_SEARCH: - ElasticsearchBackend().refresh_indices() + get_search_backend().refresh_indices() def test_invalid_request(api_client: APIClient) -> None: @@ -133,13 +119,11 @@ def test_invalid_request(api_client: APIClient) -> None: refresh_elastic_search_indices() params = {"course_id": course_id} - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string) + response = get_search_response(api_client, params) assert response.status_code == 400 params = {"text": "foobar", "sort_key": "invalid"} - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string) + response = get_search_response(api_client, params) assert response.status_code == 400 @@ -166,9 +150,8 @@ def test_search_returns_empty_for_deleted_thread(api_client: APIClient) -> None: refresh_elastic_search_indices() params = {"course_id": course_id, "text": "title-1", "sort_key": "date"} - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, [], "") + response = get_search_response(api_client, params, [], "") assert_result_total(response, 0) @@ -198,14 +181,12 @@ def test_search_returns_only_updated_thread(api_client: APIClient) -> None: refresh_elastic_search_indices() params = {"course_id": course_id, "text": original_title} - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, [], "") + response = get_search_response(api_client, params, [], "") assert_result_total(response, 0) params = {"course_id": course_id, "text": updated_title} - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, [thread_id], "") + response = get_search_response(api_client, params, [thread_id], "") assert_result_total(response, 1) @@ -237,8 +218,7 @@ def test_search_returns_empty_for_deleted_comment(api_client: APIClient) -> None refresh_elastic_search_indices() params = {"course_id": course_id, "text": "comment-body", "sort_key": "date"} - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, [], "") + response = get_search_response(api_client, params, [], "") assert_result_total(response, 0) @@ -274,13 +254,11 @@ def test_search_returns_only_updated_comment(api_client: APIClient) -> None: refresh_elastic_search_indices() params = {"course_id": course_id, "text": original_comment} - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, [], "") + response = get_search_response(api_client, params, [], "") assert_result_total(response, 0) params = {"course_id": course_id, "text": updated_comment} - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, [thread_id], "") + response = get_search_response(api_client, params, [thread_id], "") assert_result_total(response, 1) @@ -365,14 +343,12 @@ def assert_response_contains( # Test filtering by course_id params = {"text": "text", "course_id": course_id_0} - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, threads_ids[:30:2]) + response = get_search_response(api_client, params, threads_ids[:30:2]) assert_response_contains(response, [i for i in range(30) if i % 2 == 0]) # Test filtering by context params = {"text": "text", "context": "standalone"} - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, threads_ids[30:35]) + response = get_search_response(api_client, params, threads_ids[30:35]) assert_response_contains(response, list(range(30, 35))) # Test filtering with unread filter @@ -387,8 +363,7 @@ def assert_response_contains( "user_id": user_id, "unread": "true", } - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, threads_ids[:35:2]) + response = get_search_response(api_client, params, threads_ids[:35:2]) assert_response_contains(response, [i for i in range(30) if i % 2 == 0]) mark_as_read(user, thread_course_1) @@ -398,20 +373,17 @@ def assert_response_contains( "user_id": user_id, "unread": "true", } - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, threads_ids[:35:2]) + response = get_search_response(api_client, params, threads_ids[:35:2]) assert_response_contains(response, [i for i in range(1, 30) if i % 2 == 0]) # Test filtering with flagged filter params = {"text": "text", "course_id": course_id_0, "flagged": "True"} - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, threads_ids[:30:2]) + response = get_search_response(api_client, params, threads_ids[:30:2]) assert_response_contains(response, [0]) # Test filtering with unanswered filter params = {"text": "text", "course_id": course_id_0, "unanswered": "True"} - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, threads_ids[:30:2]) + response = get_search_response(api_client, params, threads_ids[:30:2]) assert_response_contains(response, [0, 2, 4]) # Test filtering with unanswered filter and group_id @@ -421,8 +393,7 @@ def assert_response_contains( "unanswered": "True", "group_id": "2", } - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, threads_ids[:30:2]) + response = get_search_response(api_client, params, threads_ids[:30:2]) assert_response_contains(response, [0, 2]) params = { @@ -431,41 +402,36 @@ def assert_response_contains( "unanswered": "True", "group_id": "4", } - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, threads_ids[:30:2]) + response = get_search_response(api_client, params, threads_ids[:30:2]) assert_response_contains(response, [0, 4]) comment = threads_comments[threads_ids[4]][0] Comment().update(comment_id=comment, endorsed=True) refresh_elastic_search_indices() - response = get_search_response(api_client, query_string, threads_ids[:30:2]) + response = get_search_response(api_client, params, threads_ids[:30:2]) assert_response_contains(response, [0]) # Test filtering by commentable_id params = {"text": "text", "commentable_id": "commentable0"} - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, threads_ids[::3]) + response = get_search_response(api_client, params, threads_ids[::3]) assert_response_contains(response, [i for i in range(30) if i % 3 == 0]) # Test filtering by commentable_ids params = {"text": "text", "commentable_ids": "commentable0,commentable1"} - query_string = urllib.parse.urlencode(params) response = get_search_response( - api_client, query_string, [threads_ids[i] for i in range(35) if i % 3 in [0, 1]] + api_client, params, [threads_ids[i] for i in range(35) if i % 3 in [0, 1]] ) assert_response_contains(response, [i for i in range(30) if i % 3 in [0, 1]]) # Test filtering by group_id params = {"text": "text", "group_id": "1"} - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, threads_ids) + response = get_search_response(api_client, params, threads_ids) assert_response_contains(response, [i for i in range(30) if i % 5 in [0, 1]]) # Test filtering by group_ids params = {"text": "text", "group_ids": "1,2"} - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, threads_ids) + response = get_search_response(api_client, params, threads_ids) assert_response_contains(response, [i for i in range(30) if i % 5 in [0, 1, 2]]) # Test filtering by all filters combined @@ -475,8 +441,7 @@ def assert_response_contains( "commentable_id": "commentable0", "group_id": "1", } - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, threads_ids[::6]) + response = get_search_response(api_client, params, threads_ids[::6]) assert_response_contains(response, [0, 6]) @@ -510,8 +475,7 @@ def check_pagination(per_page: Optional[int], num_pages: int) -> None: for i in range(1, num_pages + 2): params["page"] = str(i) - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, threads_ids) + response = get_search_response(api_client, params, threads_ids) assert response.status_code == 200 result = response.json() result_ids.extend([r["id"] for r in result["collection"]]) @@ -559,8 +523,7 @@ def fetch_and_check(sort_key: Optional[str], expected_indexes: list[int]) -> Non if sort_key: params["sort_key"] = str(sort_key) - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, threads_ids) + response = get_search_response(api_client, params, threads_ids) assert_result_total(response, 6) result = response.json() threads = result["collection"] @@ -612,11 +575,10 @@ def get_threda_ids_for_fixtures(original_text: str) -> list[str]: def check_correction(original_text: str, corrected_text: Optional[str]) -> None: params = {"text": original_text} - query_string = urllib.parse.urlencode(params) get_thread_ids_value = get_threda_ids_for_fixtures(original_text) response = get_search_response( api_client, - query_string, + params, get_thread_ids_value, corrected_text, [thread_id] if corrected_text else [], @@ -676,10 +638,9 @@ def test_spelling_correction_with_mush_clause(api_client: APIClient) -> None: refresh_elastic_search_indices() params = {"text": "abot", "course_id": course_id} - query_string = urllib.parse.urlencode(params) response = get_search_response( api_client, - query_string, + params, ) assert response.status_code == 200 result = response.json() @@ -745,9 +706,8 @@ def test_text( text: str, expected_total_results: int, expected_num_pages: int ) -> None: params = {"course_id": course_id, "text": text, "per_page": "10"} - query_string = urllib.parse.urlencode(params) get_thread_ids_value = get_thread_ids_for_fixture(text) - response = get_search_response(api_client, query_string, get_thread_ids_value) + response = get_search_response(api_client, params, get_thread_ids_value) assert response.status_code == 200 result = response.json() assert ( @@ -793,8 +753,7 @@ def test_unicode_data(api_client: APIClient) -> None: # Perform the search with the ASCII term params = {"course_id": "course-v1:Arbisoft+SE002+2024_S2", "text": search_term} - query_string = urllib.parse.urlencode(params) - response = get_search_response(api_client, query_string, [thread_id]) + response = get_search_response(api_client, params, [thread_id]) # Check that the response is OK and that exactly one result is returned assert response.status_code == 200 diff --git a/tests/test_views/test_users.py b/tests/test_views/test_users.py index f4767664..1b799686 100644 --- a/tests/test_views/test_users.py +++ b/tests/test_views/test_users.py @@ -1,139 +1,32 @@ """Tests for Users apis.""" -import random -from typing import Any - -import pytest - from forum.constants import RETIRED_BODY, RETIRED_TITLE from forum.models import Comment, CommentThread, Contents, Users from forum.models.model_utils import subscribe_user, upvote_content from test_utils.client import APIClient -def setup_10_threads() -> None: +def setup_10_threads(author_id: str, author_username: str) -> list[str]: """Create 10 threads for a user.""" + ids = [] for thread in range(10): thread_id = CommentThread().insert( title=f"Test Thread {thread}", body="This is a test thread", course_id="course1", commentable_id="commentable1", - author_id="test_id", - author_username="test-user", + author_id=author_id, + author_username=author_username, ) Comment().insert( body="This is a test comment", course_id="course1", - author_id="test_id", + author_id=author_id, comment_thread_id=str(thread_id), - author_username="test-user", + author_username=author_username, ) - - -@pytest.fixture(name="build_structure_and_response") -def fixture_build_structure_and_response() -> dict[str, Any]: - """Fixture for creating course stats.""" - course_id = "test_course" - authors = ["author-1", "author-2", "author-3", "author-4", "author-5", "author-6"] - for author in authors: - Users().insert(author, author) - - expected_data: dict[str, dict[str, Any]] = { - author: { - "username": author, - "active_flags": 0, - "inactive_flags": 0, - "threads": 0, - "responses": 0, - "replies": 0, - } - for author in authors - } - for _ in range(10): - thread_author = random.choice(authors) - expected_data[thread_author]["threads"] += 1 - thread_id = CommentThread().insert( - title="Test Thread", - body="This is a test thread", - course_id=course_id, - commentable_id="commentable1", - author_id=thread_author, - author_username=thread_author, - ) - abuse_flaggers = random.sample(range(1, 3), random.randint(0, 2)) - historical_abuse_flaggers = random.sample(range(1, 2), random.randint(0, 1)) - CommentThread().update( - thread_id, - abuse_flaggers=[str(x) for x in abuse_flaggers], - historical_abuse_flaggers=[str(x) for x in historical_abuse_flaggers], - ) - if abuse_flaggers: - expected_data[thread_author]["active_flags"] += 1 - if historical_abuse_flaggers: - expected_data[thread_author]["inactive_flags"] += 1 - - for _ in range(5): - comment_author = random.choice(authors) - expected_data[comment_author]["responses"] += 1 - comment_id = Comment().insert( - body="This is a test comment", - course_id=course_id, - author_id=comment_author, - comment_thread_id=str(thread_id), - author_username=comment_author, - ) - abuse_flaggers_comment = random.sample(range(1, 3), random.randint(0, 2)) - historical_abuse_flaggers_comment = random.sample( - range(1, 2), random.randint(0, 1) - ) - Comment().update( - comment_id, - abuse_flaggers=[str(x) for x in abuse_flaggers_comment], - historical_abuse_flaggers=[ - str(x) for x in historical_abuse_flaggers_comment - ], - ) - if abuse_flaggers_comment: - expected_data[comment_author]["active_flags"] += 1 - if historical_abuse_flaggers_comment: - expected_data[comment_author]["inactive_flags"] += 1 - - for _ in range(3): - comment = Comment().get(comment_id) - if not comment: - continue - Comment().update( - comment_id, - child_count=comment["child_count"], - ) - reply_author = random.choice(authors) - expected_data[reply_author]["replies"] += 1 - reply_id = Comment().insert( - body="This is a test comment", - course_id=course_id, - author_id=reply_author, - parent_id=comment_id, - comment_thread_id=str(thread_id), - author_username=reply_author, - ) - abuse_flaggers_reply = random.sample(range(1, 3), random.randint(0, 2)) - historical_abuse_flaggers_reply = random.sample( - range(1, 2), random.randint(0, 1) - ) - Comment().update( - reply_id, - abuse_flaggers=[str(x) for x in abuse_flaggers_reply], - historical_abuse_flaggers=[ - str(x) for x in historical_abuse_flaggers_reply - ], - ) - if abuse_flaggers_reply: - expected_data[reply_author]["active_flags"] += 1 - if historical_abuse_flaggers_reply: - expected_data[reply_author]["inactive_flags"] += 1 - - return expected_data + ids.append(thread_id) + return ids def test_create_user(api_client: APIClient) -> None: @@ -314,7 +207,7 @@ def test_get_active_threads_requires_course_id(api_client: APIClient) -> None: user_id, username, ) - setup_10_threads() + setup_10_threads(user_id, username) response = api_client.get(f"/api/v2/users/{user_id}/active_threads") assert response.status_code == 200 assert response.json() == {} @@ -328,7 +221,7 @@ def test_get_active_threads(api_client: APIClient) -> None: user_id, username, ) - setup_10_threads() + setup_10_threads(user_id, username) course_id = "course1" response = api_client.get( f"/api/v2/users/{user_id}/active_threads?course_id={course_id}", @@ -419,7 +312,7 @@ def test_attempts_to_replace_username_and_username_on_content( user_id, username, ) - setup_10_threads() + setup_10_threads(user_id, username) user = Users().get(user_id) new_username = "test_username_replacement" @@ -485,7 +378,7 @@ def test_retire_user(api_client: APIClient) -> None: user_id, username, ) - setup_10_threads() + setup_10_threads(user_id, username) retired_username = "retired_username_ABCD1234" user = Users().get(user_id) assert user @@ -517,7 +410,7 @@ def test_retire_user_with_subscribed_threads(api_client: APIClient) -> None: user_id, username, ) - setup_10_threads() + setup_10_threads(user_id, username) retired_username = "retired_username_ABCD1234" user = Users().get(user_id) assert user @@ -571,128 +464,3 @@ def test_retire_user_with_subscribed_threads(api_client: APIClient) -> None: assert content["title"] == RETIRED_TITLE assert content["body"] == RETIRED_BODY assert content["author_username"] == retired_username - - -def test_update_user_stats( - api_client: APIClient, - build_structure_and_response: dict[str, Any], -) -> None: - """Test update user stats.""" - course_id = "test_course" - expected_data = build_structure_and_response - expected_result = sorted( - expected_data.values(), - key=lambda x: (x["threads"], x["responses"], x["replies"]), - reverse=True, - ) - response = api_client.get(f"/api/v2/users/{course_id}/stats") - assert response.status_code == 200 - res = response.json() - assert res["user_stats"] != expected_result - - response = api_client.post_json(f"/api/v2/users/{course_id}/update_stats", data={}) - assert response.status_code == 200 - res = response.json() - assert res["user_count"] == 6 - - response = api_client.get(f"/api/v2/users/{course_id}/stats") - assert response.status_code == 200 - res = response.json() - assert res["user_stats"] == expected_result - - -def test_returns_users_stats_with_default_activity_sort( - api_client: APIClient, - build_structure_and_response: dict[str, Any], -) -> None: - """Test returns user's stats with default/activity sort.""" - course_id = "test_course" - expected_data = build_structure_and_response - expected_result = sorted( - expected_data.values(), - key=lambda x: (x["threads"], x["responses"], x["replies"], x["username"]), - reverse=True, - ) - response = api_client.post_json(f"/api/v2/users/{course_id}/update_stats", data={}) - assert response.status_code == 200 - res = response.json() - assert res["user_count"] == 6 - - response = api_client.get(f"/api/v2/users/{course_id}/stats") - assert response.status_code == 200 - res = response.json() - assert res["user_stats"] == expected_result - - -def test_handle_stats_for_user_with_no_activity(api_client: APIClient) -> None: - """Test handle stats for user with no activity.""" - invalid_course_id = "course-v1:edX+DNE+Not_EXISTS" - response = api_client.get(f"/api/v2/users/{invalid_course_id}/stats") - assert response.status_code == 200 - res = response.json() - assert res["user_stats"] == [] - - -def test_returns_users_stats_filtered_by_user_with_default_activity_sort( - api_client: APIClient, - build_structure_and_response: dict[str, Any], -) -> None: - """Test returns user's stats filtered by user with default/activity sort.""" - course_id = "test_course" - authors = ["author-1", "author-2", "author-3", "author-4", "author-5", "author-6"] - usernames = random.sample(authors, 2) - usernames_str = ",".join(usernames) - full_data = build_structure_and_response - response = api_client.post_json(f"/api/v2/users/{course_id}/update_stats", data={}) - assert response.status_code == 200 - res = response.json() - assert res["user_count"] == 6 - - expected_result = [ - val for val in full_data.values() if val["username"] in usernames - ] - expected_result.sort(key=lambda x: usernames.index(x["username"])) - - response = api_client.get( - f"/api/v2/users/{course_id}/stats?usernames={usernames_str}", - ) - assert response.status_code == 200 - res = response.json() - assert res["user_stats"] == expected_result - - -def test_returns_users_stats_with_recency_sort(api_client: APIClient) -> None: - """Test returns user's stats with recency sort.""" - course_id = "test_course" - response = api_client.post_json(f"/api/v2/users/{course_id}/update_stats", data={}) - response = api_client.get( - f"/api/v2/users/{course_id}/stats?sort_key=recency&with_timestamps=true" - ) - assert response.status_code == 200 - res = response.json() - sorted_order = sorted( - res["user_stats"], - key=lambda x: (x["last_activity_at"], x["username"]), - reverse=True, - ) - assert res["user_stats"] == sorted_order - - -def test_returns_users_stats_with_flagged_sort( - api_client: APIClient, - build_structure_and_response: dict[str, Any], -) -> None: - """Test returns user's stats with flagged sort.""" - course_id = "test_course" - expected_data = build_structure_and_response - response = api_client.post_json(f"/api/v2/users/{course_id}/update_stats", data={}) - expected_result = sorted( - expected_data.values(), - key=lambda x: (x["active_flags"], x["inactive_flags"], x["username"]), - reverse=True, - ) - - response = api_client.get(f"/api/v2/users/{course_id}/stats?sort_key=flagged") - assert response.status_code == 200 - res = response.json() - assert res["user_stats"] == expected_result diff --git a/tox.ini b/tox.ini index 18cf85ac..58683485 100644 --- a/tox.ini +++ b/tox.ini @@ -32,7 +32,7 @@ match-dir = (?!migrations) [pytest] DJANGO_SETTINGS_MODULE = forum.settings.test addopts = --cov forum --cov tests --cov-report term-missing --cov-report xml -norecursedirs = .* docs requirements site-packages +norecursedirs = .* docs requirements site-packages e2e [testenv] deps = @@ -82,3 +82,12 @@ deps = commands = make test-pii +[testenv:e2e] +allowlist_externals = + make + rm + touch +deps = + -r{toxinidir}/requirements/test.txt +commands = + make test-e2e