From 8fde7af1443c5932b49a3e2573358ef2bf7855ea Mon Sep 17 00:00:00 2001 From: Safwan Rahman Date: Sun, 20 May 2018 03:06:01 +0600 Subject: [PATCH 01/18] First phase of test for search --- readthedocs/search/indexes.py | 4 ++ readthedocs/search/tests/__init__.py | 0 readthedocs/search/tests/conftest.py | 85 ++++++++++++++++++++++++++ readthedocs/search/tests/test_views.py | 17 ++++++ requirements/testing.txt | 2 + 5 files changed, 108 insertions(+) create mode 100644 readthedocs/search/tests/__init__.py create mode 100644 readthedocs/search/tests/conftest.py create mode 100644 readthedocs/search/tests/test_views.py diff --git a/readthedocs/search/indexes.py b/readthedocs/search/indexes.py index 1b2ede6aaa9..e74aa900a99 100644 --- a/readthedocs/search/indexes.py +++ b/readthedocs/search/indexes.py @@ -155,6 +155,10 @@ def index_document(self, data, index=None, parent=None, routing=None): kwargs['routing'] = routing self.es.index(**kwargs) + def delete_index(self, index_name): + + self.es.indices.delete(index=index_name) + def delete_document(self, body, index=None, parent=None, routing=None): kwargs = { 'index': index or self._index, diff --git a/readthedocs/search/tests/__init__.py b/readthedocs/search/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/readthedocs/search/tests/conftest.py b/readthedocs/search/tests/conftest.py new file mode 100644 index 00000000000..136c80619af --- /dev/null +++ b/readthedocs/search/tests/conftest.py @@ -0,0 +1,85 @@ +import pytest +from django_dynamic_fixture import G +from faker import Faker + +from readthedocs.builds.models import Version +from readthedocs.search.indexes import Index, ProjectIndex, PageIndex, SectionIndex + +fake = Faker() + + +@pytest.fixture(autouse=True) +def mock_elastic_index(mocker): + mocker.patch.object(Index, '_index', fake.word()) + + +@pytest.fixture +def search(): + # Create the index. + index = Index() + index_name = index.timestamped_index() + index.create_index(index_name) + index.update_aliases(index_name) + # Update mapping + proj = ProjectIndex() + proj.put_mapping() + page = PageIndex() + page.put_mapping() + sec = SectionIndex() + sec.put_mapping() + + yield + index.delete_index(index_name=index_name) + + +@pytest.fixture +def make_page_content(): + + def make_content(): + data = { + 'title': fake.sentence(), + 'body': fake.paragraphs(), + 'toc': fake.word() + } + return data + + yield make_content + + +@pytest.fixture +def make_page_file(make_page_content, make_temp_json_dir): + def make_file(): + import json + page_content = make_page_content() + file_name = fake.file_name(extension='fjson') + directory = make_temp_json_dir() + file_path = directory.join(file_name) + json.dump(page_content, file_path) + return directory + return make_file + + +@pytest.fixture +def make_temp_json_dir(tmpdir_factory): + def make_dir(): + return tmpdir_factory.mktemp('json') + + return make_dir + + +@pytest.mark.django_db +@pytest.fixture +def version(): + name = fake.name() + return G(Version, project__name=str(name)) + + +@pytest.fixture +def project(version, mocker, make_page_file): + project = version.project + directory = make_page_file() + print directory + media_path = mocker.patch('readthedocs.projects.models.Project.get_production_media_path') + media_path.side_effect = make_page_content + print project.get_production_media_path() + return version.project \ No newline at end of file diff --git a/readthedocs/search/tests/test_views.py b/readthedocs/search/tests/test_views.py new file mode 100644 index 00000000000..a2ca5452ed9 --- /dev/null +++ b/readthedocs/search/tests/test_views.py @@ -0,0 +1,17 @@ +import pytest +from django.core.management import call_command +from django.core.urlresolvers import reverse + + +@pytest.mark.django_db +@pytest.mark.search +class TestElasticSearch(object): + + @pytest.fixture(autouse=True) + def elastic_index(self, project, search): + call_command('reindex_elasticsearch') + + def test_search_by_project_name(self, search, client, project): + url = reverse('search') + resp = client.get(url, {'q': project.name}) + assert project.name in resp.content \ No newline at end of file diff --git a/requirements/testing.txt b/requirements/testing.txt index 110106df658..46cb7f4c09a 100644 --- a/requirements/testing.txt +++ b/requirements/testing.txt @@ -8,6 +8,8 @@ pytest-xdist==1.22.0 apipkg==1.4 execnet==1.5.0 Mercurial==4.4.2 +Faker==0.8.15 +pytest-mock==1.10.0 # local debugging tools datadiff From ef2396df53a86cf0b23e46a69c7a041a3ed6429e Mon Sep 17 00:00:00 2001 From: Safwan Rahman Date: Sun, 20 May 2018 08:53:53 +0600 Subject: [PATCH 02/18] fixing up mocking path --- readthedocs/search/parse_json.py | 1 + readthedocs/search/tests/conftest.py | 15 ++++++++------- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/readthedocs/search/parse_json.py b/readthedocs/search/parse_json.py index 4583822ac5e..ead3c4144b3 100644 --- a/readthedocs/search/parse_json.py +++ b/readthedocs/search/parse_json.py @@ -38,6 +38,7 @@ def process_all_json_files(version, build_dir=True): # pylint: disable=bare-except except: pass + print page_list return page_list diff --git a/readthedocs/search/tests/conftest.py b/readthedocs/search/tests/conftest.py index 136c80619af..75a479fa139 100644 --- a/readthedocs/search/tests/conftest.py +++ b/readthedocs/search/tests/conftest.py @@ -1,3 +1,4 @@ +import json import pytest from django_dynamic_fixture import G from faker import Faker @@ -37,9 +38,10 @@ def make_page_content(): def make_content(): data = { + 'current_page_name': fake.sentence(), 'title': fake.sentence(), - 'body': fake.paragraphs(), - 'toc': fake.word() + 'body': fake.text(), + 'toc': fake.text() } return data @@ -49,12 +51,13 @@ def make_content(): @pytest.fixture def make_page_file(make_page_content, make_temp_json_dir): def make_file(): - import json page_content = make_page_content() file_name = fake.file_name(extension='fjson') directory = make_temp_json_dir() file_path = directory.join(file_name) - json.dump(page_content, file_path) + with open(str(file_path), 'w') as f: + json.dump(page_content, f) + return directory return make_file @@ -77,9 +80,7 @@ def version(): @pytest.fixture def project(version, mocker, make_page_file): project = version.project - directory = make_page_file() - print directory media_path = mocker.patch('readthedocs.projects.models.Project.get_production_media_path') - media_path.side_effect = make_page_content + media_path.return_value = str(make_page_file()) print project.get_production_media_path() return version.project \ No newline at end of file From 238f13667fed6359cb0cf29d90a383054379f965 Mon Sep 17 00:00:00 2001 From: Safwan Rahman Date: Sun, 20 May 2018 09:05:51 +0600 Subject: [PATCH 03/18] adding elasticsearch to travis --- .travis.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.travis.yml b/.travis.yml index b08e0cc0319..9c36c4b92d9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,6 +3,8 @@ python: - 2.7 - 3.6 sudo: false +env: + - ES_VERSION=1.3.9 ES_DOWNLOAD_URL=https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-${ES_VERSION}.tar.gz matrix: include: - python: 2.7 @@ -18,6 +20,9 @@ cache: - ~/.cache/pip - ~/.nvm/nvm.sh install: + - wget ${ES_DOWNLOAD_URL} + - tar -xzf elasticsearch-${ES_VERSION}.tar.gz + - ./elasticsearch-${ES_VERSION}/bin/elasticsearch & - pip install tox-travis - curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.2/install.sh | bash - source ~/.nvm/nvm.sh From 91de9064de49c4de0bf5aeabf2844a4baae22000 Mon Sep 17 00:00:00 2001 From: Safwan Rahman Date: Sun, 20 May 2018 09:14:12 +0600 Subject: [PATCH 04/18] install plugin --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 9c36c4b92d9..f70d1acd275 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,6 +23,7 @@ install: - wget ${ES_DOWNLOAD_URL} - tar -xzf elasticsearch-${ES_VERSION}.tar.gz - ./elasticsearch-${ES_VERSION}/bin/elasticsearch & + - ./elasticsearch-${ES_VERSION}/bin/plugin -install elasticsearch/elasticsearch-analysis-icu/2.3.0 - pip install tox-travis - curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.2/install.sh | bash - source ~/.nvm/nvm.sh From 8e74ff73d504e95ae439d384763250f04ca7af26 Mon Sep 17 00:00:00 2001 From: Safwan Rahman Date: Sun, 20 May 2018 09:20:28 +0600 Subject: [PATCH 05/18] fixup --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index f70d1acd275..fd343160ac2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,8 +22,8 @@ cache: install: - wget ${ES_DOWNLOAD_URL} - tar -xzf elasticsearch-${ES_VERSION}.tar.gz - - ./elasticsearch-${ES_VERSION}/bin/elasticsearch & - ./elasticsearch-${ES_VERSION}/bin/plugin -install elasticsearch/elasticsearch-analysis-icu/2.3.0 + - ./elasticsearch-${ES_VERSION}/bin/elasticsearch & - pip install tox-travis - curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.2/install.sh | bash - source ~/.nvm/nvm.sh From 7339ee609e78830fdb272a49c2c0719e1e2ab81d Mon Sep 17 00:00:00 2001 From: Safwan Rahman Date: Sun, 20 May 2018 10:55:05 +0600 Subject: [PATCH 06/18] fixing test --- readthedocs/search/parse_json.py | 1 - 1 file changed, 1 deletion(-) diff --git a/readthedocs/search/parse_json.py b/readthedocs/search/parse_json.py index ead3c4144b3..4583822ac5e 100644 --- a/readthedocs/search/parse_json.py +++ b/readthedocs/search/parse_json.py @@ -38,7 +38,6 @@ def process_all_json_files(version, build_dir=True): # pylint: disable=bare-except except: pass - print page_list return page_list From d705f6c0bde0a10f95e1439d81ff3d1bc07cafb1 Mon Sep 17 00:00:00 2001 From: Safwan Rahman Date: Sun, 20 May 2018 11:15:46 +0600 Subject: [PATCH 07/18] fixup --- .travis.yml | 5 +---- readthedocs/search/tests/conftest.py | 1 - readthedocs/search/tests/test_views.py | 2 +- scripts/travis/install_elasticsearch.sh | 7 +++++++ 4 files changed, 9 insertions(+), 6 deletions(-) create mode 100755 scripts/travis/install_elasticsearch.sh diff --git a/.travis.yml b/.travis.yml index fd343160ac2..496e6de8243 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,10 +20,7 @@ cache: - ~/.cache/pip - ~/.nvm/nvm.sh install: - - wget ${ES_DOWNLOAD_URL} - - tar -xzf elasticsearch-${ES_VERSION}.tar.gz - - ./elasticsearch-${ES_VERSION}/bin/plugin -install elasticsearch/elasticsearch-analysis-icu/2.3.0 - - ./elasticsearch-${ES_VERSION}/bin/elasticsearch & + - ./scripts/travis/install_elasticsearch.sh - pip install tox-travis - curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.2/install.sh | bash - source ~/.nvm/nvm.sh diff --git a/readthedocs/search/tests/conftest.py b/readthedocs/search/tests/conftest.py index 75a479fa139..cca3caf0514 100644 --- a/readthedocs/search/tests/conftest.py +++ b/readthedocs/search/tests/conftest.py @@ -82,5 +82,4 @@ def project(version, mocker, make_page_file): project = version.project media_path = mocker.patch('readthedocs.projects.models.Project.get_production_media_path') media_path.return_value = str(make_page_file()) - print project.get_production_media_path() return version.project \ No newline at end of file diff --git a/readthedocs/search/tests/test_views.py b/readthedocs/search/tests/test_views.py index a2ca5452ed9..59822b3de41 100644 --- a/readthedocs/search/tests/test_views.py +++ b/readthedocs/search/tests/test_views.py @@ -14,4 +14,4 @@ def elastic_index(self, project, search): def test_search_by_project_name(self, search, client, project): url = reverse('search') resp = client.get(url, {'q': project.name}) - assert project.name in resp.content \ No newline at end of file + assert project.name.encode('utf-8') in resp.content diff --git a/scripts/travis/install_elasticsearch.sh b/scripts/travis/install_elasticsearch.sh new file mode 100755 index 00000000000..f63f3ae6168 --- /dev/null +++ b/scripts/travis/install_elasticsearch.sh @@ -0,0 +1,7 @@ +if [ $ES_DOWNLOAD_URL ] +then + wget ${ES_DOWNLOAD_URL} + tar -xzf elasticsearch-${ES_VERSION}.tar.gz + ./elasticsearch-${ES_VERSION}/bin/plugin -install elasticsearch/elasticsearch-analysis-icu/2.3.0 + ./elasticsearch-${ES_VERSION}/bin/elasticsearch & +fi From 656db3ca176724530c569b360a4503346b68e4b5 Mon Sep 17 00:00:00 2001 From: Safwan Rahman Date: Mon, 21 May 2018 09:03:08 +0600 Subject: [PATCH 08/18] implement test for file search --- readthedocs/search/indexes.py | 5 ++ readthedocs/search/tests/conftest.py | 71 ++++++++++++-------------- readthedocs/search/tests/test_views.py | 39 ++++++++++++-- 3 files changed, 71 insertions(+), 44 deletions(-) diff --git a/readthedocs/search/indexes.py b/readthedocs/search/indexes.py index e74aa900a99..6b5b2ee9c36 100644 --- a/readthedocs/search/indexes.py +++ b/readthedocs/search/indexes.py @@ -107,6 +107,11 @@ def create_index(self, index=None): } self.es.indices.create(index=index, body=body) + def refresh_index(self, index=None): + index = index or self._index + + self.es.indices.refresh(index=index) + def put_mapping(self, index=None): index = index or self._index self.es.indices.put_mapping(self._type, self.get_mapping(), index) diff --git a/readthedocs/search/tests/conftest.py b/readthedocs/search/tests/conftest.py index cca3caf0514..be0231f2a09 100644 --- a/readthedocs/search/tests/conftest.py +++ b/readthedocs/search/tests/conftest.py @@ -3,7 +3,7 @@ from django_dynamic_fixture import G from faker import Faker -from readthedocs.builds.models import Version +from readthedocs.projects.models import Project from readthedocs.search.indexes import Index, ProjectIndex, PageIndex, SectionIndex fake = Faker() @@ -11,9 +11,10 @@ @pytest.fixture(autouse=True) def mock_elastic_index(mocker): - mocker.patch.object(Index, '_index', fake.word()) + mocker.patch.object(Index, '_index', fake.word().lower()) +@pytest.fixture(autouse=True) @pytest.fixture def search(): # Create the index. @@ -29,57 +30,49 @@ def search(): sec = SectionIndex() sec.put_mapping() - yield + yield index index.delete_index(index_name=index_name) @pytest.fixture -def make_page_content(): +def project(): + return G(Project) + + +@pytest.fixture +def page_json(): + version_contents = {} - def make_content(): + def create_dummy_json(): data = { - 'current_page_name': fake.sentence(), + 'path': fake.word(), 'title': fake.sentence(), - 'body': fake.text(), - 'toc': fake.text() + 'content': fake.text(), + 'sections': fake.sentences(), + 'headers': fake.sentences() } return data - yield make_content + def get_dummy_json(version, *args, **kwargs): + """Get dummy json content for a version page""" + # Check existing content of that version + # If not exist, generate new dummy content + content = version_contents.get(version.id) + if not content: + content = create_dummy_json() + # save in order to not regenerate dummy content for same version + version_contents[version.id] = content -@pytest.fixture -def make_page_file(make_page_content, make_temp_json_dir): - def make_file(): - page_content = make_page_content() - file_name = fake.file_name(extension='fjson') - directory = make_temp_json_dir() - file_path = directory.join(file_name) - with open(str(file_path), 'w') as f: - json.dump(page_content, f) + return [content] - return directory - return make_file + return get_dummy_json @pytest.fixture -def make_temp_json_dir(tmpdir_factory): - def make_dir(): - return tmpdir_factory.mktemp('json') - - return make_dir +def mock_parse_json(mocker, page_json): - -@pytest.mark.django_db -@pytest.fixture -def version(): - name = fake.name() - return G(Version, project__name=str(name)) - - -@pytest.fixture -def project(version, mocker, make_page_file): - project = version.project - media_path = mocker.patch('readthedocs.projects.models.Project.get_production_media_path') - media_path.return_value = str(make_page_file()) - return version.project \ No newline at end of file + # patch the function from `projects.tasks` because it has been point to there + # http://www.voidspace.org.uk/python/mock/patch.html#where-to-patch + mocked_function = mocker.patch('readthedocs.projects.tasks.process_all_json_files') + mocked_function.side_effect = page_json diff --git a/readthedocs/search/tests/test_views.py b/readthedocs/search/tests/test_views.py index 59822b3de41..fced55a1405 100644 --- a/readthedocs/search/tests/test_views.py +++ b/readthedocs/search/tests/test_views.py @@ -1,17 +1,46 @@ import pytest from django.core.management import call_command from django.core.urlresolvers import reverse +from django_dynamic_fixture import G +from pyquery import PyQuery as pq + +from readthedocs.builds.models import Version +from readthedocs.projects.models import Project +from readthedocs.search import parse_json @pytest.mark.django_db @pytest.mark.search class TestElasticSearch(object): + url = reverse('search') @pytest.fixture(autouse=True) - def elastic_index(self, project, search): + def elastic_index(self, mock_parse_json, project, search): call_command('reindex_elasticsearch') + search.refresh_index() + + def test_search_by_project_name(self, client, project): + resp = client.get(self.url, {'q': project.name}) + assert resp.status_code == 200 + + page = pq(resp.content) + content = page.find('.module-list-wrapper .module-item-title') + assert project.name.encode('utf-8') in content.text() + + def test_search_by_file_content(self, client, page_json, project): + + versions = project.versions.all() + # There should be only one version of the project + assert len(versions) == 1 + + data = page_json(version=versions[0])[0] + # Query with the first word of title + title = data['title'] + query = title.split()[0] + + resp = client.get(self.url, {'q': query, 'type': 'file'}) + assert resp.status_code == 200 - def test_search_by_project_name(self, search, client, project): - url = reverse('search') - resp = client.get(url, {'q': project.name}) - assert project.name.encode('utf-8') in resp.content + page = pq(resp.content) + content = page.find('.module-list-wrapper .module-item-title') + assert title in content.text() From d1fe6ba6b9117d4e3d71ffa08f970f71e7e18662 Mon Sep 17 00:00:00 2001 From: Safwan Rahman Date: Mon, 21 May 2018 13:26:50 +0600 Subject: [PATCH 09/18] fixup --- readthedocs/search/tests/test_views.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/readthedocs/search/tests/test_views.py b/readthedocs/search/tests/test_views.py index fced55a1405..b8d3b2b921f 100644 --- a/readthedocs/search/tests/test_views.py +++ b/readthedocs/search/tests/test_views.py @@ -25,7 +25,7 @@ def test_search_by_project_name(self, client, project): page = pq(resp.content) content = page.find('.module-list-wrapper .module-item-title') - assert project.name.encode('utf-8') in content.text() + assert project.name.encode('utf-8') in content.text().encode('utf-8') def test_search_by_file_content(self, client, page_json, project): From 47ff8d942a5563a4783805717de43c1d97dda1c0 Mon Sep 17 00:00:00 2001 From: Safwan Rahman Date: Tue, 22 May 2018 03:08:01 +0600 Subject: [PATCH 10/18] run search test only if --including-search passed --- .travis.yml | 2 +- conftest.py | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 conftest.py diff --git a/.travis.yml b/.travis.yml index 496e6de8243..f4943c9f2ca 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,7 +14,7 @@ matrix: - python: 2.7 env: TOXENV=eslint script: - - tox + - tox "'--including-search'" cache: directories: - ~/.cache/pip diff --git a/conftest.py b/conftest.py new file mode 100644 index 00000000000..c455660670e --- /dev/null +++ b/conftest.py @@ -0,0 +1,9 @@ +def pytest_addoption(parser): + parser.addoption('--including-search', action='store_true', dest="searchtests", + default=False, help="enable search tests") + + +def pytest_configure(config): + if not config.option.searchtests: + # Include `not search` to parameters so that search test do not perform + setattr(config.option, 'markexpr', 'not search') \ No newline at end of file From c6f39e76803fca03c37fcb97c518c9c36d8edbe9 Mon Sep 17 00:00:00 2001 From: Safwan Rahman Date: Tue, 22 May 2018 04:16:48 +0600 Subject: [PATCH 11/18] fixing travis --- .travis.yml | 5 ++--- scripts/travis/run_tests.sh | 5 +++++ 2 files changed, 7 insertions(+), 3 deletions(-) create mode 100755 scripts/travis/run_tests.sh diff --git a/.travis.yml b/.travis.yml index f4943c9f2ca..4ab6bfd7fa8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,10 +11,9 @@ matrix: env: TOXENV=docs - python: 2.7 env: TOXENV=lint + script: tox - python: 2.7 env: TOXENV=eslint -script: - - tox "'--including-search'" cache: directories: - ~/.cache/pip @@ -30,7 +29,7 @@ install: - npm install - bower install script: - - tox + - ./scripts/travis/run_tests.sh notifications: slack: rooms: diff --git a/scripts/travis/run_tests.sh b/scripts/travis/run_tests.sh new file mode 100755 index 00000000000..99c56387033 --- /dev/null +++ b/scripts/travis/run_tests.sh @@ -0,0 +1,5 @@ +if ! [[ "$TOXENV" =~ ^(docs|lint|eslint) ]]; +then + args="'--including-search'" +fi +tox $args From 085d5baf42c6724583165c5c60952b94114b0ef4 Mon Sep 17 00:00:00 2001 From: Safwan Rahman Date: Mon, 28 May 2018 01:40:59 +0600 Subject: [PATCH 12/18] implementing tests for facets --- readthedocs/search/tests/conftest.py | 42 ++++-------- .../search/tests/data/kuma/docker.json | 25 ++++++++ .../search/tests/data/kuma/documentation.json | 22 +++++++ .../tests/data/pipeline/installation.json | 33 ++++++++++ .../search/tests/data/pipeline/signals.json | 27 ++++++++ readthedocs/search/tests/dummy_data.py | 27 ++++++++ readthedocs/search/tests/test_views.py | 64 ++++++++++++++++++- readthedocs/search/views.py | 1 + readthedocs/settings/dev.py | 4 +- .../templates/search/elastic_search.html | 4 +- 10 files changed, 212 insertions(+), 37 deletions(-) create mode 100644 readthedocs/search/tests/data/kuma/docker.json create mode 100644 readthedocs/search/tests/data/kuma/documentation.json create mode 100644 readthedocs/search/tests/data/pipeline/installation.json create mode 100644 readthedocs/search/tests/data/pipeline/signals.json create mode 100644 readthedocs/search/tests/dummy_data.py diff --git a/readthedocs/search/tests/conftest.py b/readthedocs/search/tests/conftest.py index be0231f2a09..50febb56d85 100644 --- a/readthedocs/search/tests/conftest.py +++ b/readthedocs/search/tests/conftest.py @@ -1,10 +1,10 @@ -import json import pytest from django_dynamic_fixture import G from faker import Faker from readthedocs.projects.models import Project from readthedocs.search.indexes import Index, ProjectIndex, PageIndex, SectionIndex +from .dummy_data import DUMMY_PAGE_JSON, ALL_PROJECTS fake = Faker() @@ -35,44 +35,26 @@ def search(): @pytest.fixture -def project(): - return G(Project) +def all_projects(): + return [G(Project, slug=project_name, name=project_name) for project_name in ALL_PROJECTS] @pytest.fixture -def page_json(): - version_contents = {} +def project(all_projects): + # Return a single project + return all_projects[0] - def create_dummy_json(): - data = { - 'path': fake.word(), - 'title': fake.sentence(), - 'content': fake.text(), - 'sections': fake.sentences(), - 'headers': fake.sentences() - } - return data - def get_dummy_json(version, *args, **kwargs): - """Get dummy json content for a version page""" - - # Check existing content of that version - # If not exist, generate new dummy content - content = version_contents.get(version.id) - if not content: - content = create_dummy_json() - # save in order to not regenerate dummy content for same version - version_contents[version.id] = content - - return [content] - - return get_dummy_json +def get_dummy_page_json(version, *args, **kwargs): + dummy_page_json = DUMMY_PAGE_JSON + project_name = version.project.name + return dummy_page_json.get(project_name) @pytest.fixture -def mock_parse_json(mocker, page_json): +def mock_parse_json(mocker): # patch the function from `projects.tasks` because it has been point to there # http://www.voidspace.org.uk/python/mock/patch.html#where-to-patch mocked_function = mocker.patch('readthedocs.projects.tasks.process_all_json_files') - mocked_function.side_effect = page_json + mocked_function.side_effect = get_dummy_page_json diff --git a/readthedocs/search/tests/data/kuma/docker.json b/readthedocs/search/tests/data/kuma/docker.json new file mode 100644 index 00000000000..43972c7d945 --- /dev/null +++ b/readthedocs/search/tests/data/kuma/docker.json @@ -0,0 +1,25 @@ +{ + "content": "Docker\nDocker is used for development and (soon) for deployment.\nDocker Images\nDocker images are used in development, usually with the local working files mounted in the images to set behaviour.\nImages are built by Jenkins, after tests pass, and are published to quay.io. We try to store the configuration in the environment, so that the published images can be used in deployments by setting environment variables to deployment-specific values.\nHere are some of the images used in the Kuma project:\nkuma\nThe kuma Docker image builds on the kuma_base image, installing a kuma branch and building the assets needed for running as a webservice. The environment can be customized for different deployments.\nThe image can be recreated locally with make build-kuma.\nThe image tagged latest is used by default for development. It can be created locally with make build-kuma VERSION=latest. The official latest image is created from the master branch in Jenkins and published to quay.io.\nkuma_base\nThe kuma_base Docker image contains the OS and libraries (C, Python, and Node.js) that support the kuma project. The kuma image extends this by installing the kuma source and building assets needed for production.\nThe image can be recreated locally with make build-base.\nThe image tagged latest is used by default for development. It can be created localled with make build-base VERSION=latest. The official latest image is created from the master branch in Jenkins and published to quay.io\nkumascript\nThe kumascript Docker image contains the kumascript rendering engine and support files. The environment can be customized for different deployments.\nThe image can be recreated locally with make build-kumascript.\nThe image tagged latest is used by default for development. It can be created locally with make build-kumascript KS_VERSION=latest. The official latest image is created from the master branch in Jenkins and published to quay.io.\nintegration-tests\nThe integration-tests Docker image contains browser-based integration tests that check the functionality of a running Kuma deployment.\nThe image can be recreated locally with docker build -f docker/images/integration-tests/ ., but this is only necessary for image development. Most developer will follow the Client-side testing documentation to develop and run these integration tests.\nThe image is built and used in Jenkins in the stage-integration-tests and prod-integration-tests pipelines, configured by scripts in the Jenkinsfiles folder. It is not published to quay.io.", + "headers": [ + "Docker", + "Docker Images", + "kuma", + "kuma_base", + "kumascript", + "integration-tests" + ], + "title": "Docker", + "sections": [ + { + "content": "\nDocker is used for development and (soon) for deployment.\n", + "id": "docker", + "title": "Docker" + }, + { + "content": "\n

Docker Images\u00b6

\n

Docker images are used in development, usually with the local\nworking files mounted in the images to set behaviour.

\n

Images are built by Jenkins, after tests pass, and are\npublished to quay.io. We try to\nstore the configuration in the environment, so that the\npublished images can be used in deployments by setting\nenvironment variables to deployment-specific values.

\n

Here are some of the images used in the Kuma project:

\n
\n

kuma\u00b6

\n

The kuma Docker image builds on the kuma_base image, installing a kuma branch\nand building the assets needed for running as a webservice. The environment\ncan be customized for different deployments.

\n

The image can be recreated locally with make build-kuma.

\n

The image tagged latest is used by default for development. It can be\ncreated locally with make build-kuma VERSION=latest. The official latest\nimage is created from the master branch in Jenkins and published to\nquay.io.

\n
\n
\n

kuma_base\u00b6

\n

The kuma_base Docker image contains the OS and libraries (C, Python, and\nNode.js) that support the kuma project. The kuma image extends this by\ninstalling the kuma source and building assets needed for production.

\n

The image can be recreated locally with make build-base.

\n

The image tagged latest is used by default for development. It can be\ncreated localled with make build-base VERSION=latest. The official\nlatest image is created from the master branch in Jenkins and published to\nquay.io

\n
\n
\n

kumascript\u00b6

\n

The kumascript Docker image contains the kumascript rendering engine and\nsupport files. The environment can be customized for different deployments.

\n

The image can be recreated locally with make build-kumascript.

\n

The image tagged latest is used by default for development. It can be\ncreated locally with make build-kumascript KS_VERSION=latest. The official\nlatest image is created from the master branch in Jenkins and published to\nquay.io.

\n
\n
\n

integration-tests\u00b6

\n

The integration-tests Docker image contains browser-based integration tests\nthat check the functionality of a running Kuma deployment.

\n

The image can be recreated locally with\ndocker build -f docker/images/integration-tests/ ., but this is only\nnecessary for image development. Most developer will follow the\nClient-side testing documentation to develop and run these integration tests.

\n

The image is built and used in Jenkins in the stage-integration-tests and\nprod-integration-tests pipelines, configured by scripts in the\nJenkinsfiles folder. It is not published to quay.io.

\n
\n", + "id": "docker-images", + "title": "Docker Images" + } + ], + "path": "docker" +} \ No newline at end of file diff --git a/readthedocs/search/tests/data/kuma/documentation.json b/readthedocs/search/tests/data/kuma/documentation.json new file mode 100644 index 00000000000..cb9a42e2706 --- /dev/null +++ b/readthedocs/search/tests/data/kuma/documentation.json @@ -0,0 +1,22 @@ +{ + "content": "Documentation\nThis documentation is generated and published at Read the Docs whenever the master branch is updated.\nGitHub can render our .rst documents as ReStructuredText, which is close enough to Sphinx for most code reviews, without features like links between documents.\nIt is occasionally necessary to generate the documentation locally. It is easiest to do this with a virtualenv on the host system, using Docker only to regenerate the MDN Sphinx template. If you are not comfortable with that style of development, it can be done entirely in Docker using docker-compose.\nGenerating documentation\nSphinx uses a Makefile in the docs subfolder to build documentation in several formats. MDN only uses the HTML format, and the generated document index is at docs/_build/html/index.html.\nTo generate the documentation in a virtualenv on the host machine, first install the requirements:\npip install -r requirements/docs.txt\nThen switch to the docs folder to use the Makefile:\ncd docs make html python -m webbrowser file://${PWD}/_build/html/index.html\nTo generate the documentation with Docker:\ndocker-compose run --rm --user $(id -u) web sh -c \"\\ virtualenv /tmp/.venvs/docs && \\ . /tmp/.venvs/docs/bin/activate && \\ pip install -r /app/requirements/docs.txt && \\ cd /app/docs && \\ make html\" python -m webbrowser file://${PWD}/docs/_build/html/index.html\nA virtualenv is required, to avoid a pip bug when changing the version of a system-installed package.", + "headers": [ + "Documentation", + "Generating documentation", + "Installation" + ], + "title": "Documentation", + "sections": [ + { + "content": "\nThis documentation is generated and published at\nRead the Docs whenever the master branch is updated.\n\nGitHub can render our .rst documents as ReStructuredText, which is\nclose enough to Sphinx for most code reviews, without features like links\nbetween documents.\n\nIt is occasionally necessary to generate the documentation locally. It is\neasiest to do this with a virtualenv on the host system, using Docker only to\nregenerate the MDN Sphinx template. If you are not comfortable with that style\nof development, it can be done entirely in Docker using docker-compose.\n", + "id": "documentation", + "title": "Documentation" + }, + { + "content": "\n

Generating documentation\u00b6

\n

Sphinx uses a Makefile in the docs subfolder to build documentation in\nseveral formats. MDN only uses the HTML format, and the generated document\nindex is at docs/_build/html/index.html.

\n

To generate the documentation in a virtualenv on the host machine, first\ninstall the requirements:

\n
pip install -r requirements/docs.txt\n
\n
\n

Then switch to the docs folder to use the Makefile:

\n
cd docs\nmake html\npython -m webbrowser file://${PWD}/_build/html/index.html\n
\n
\n

To generate the documentation with Docker:

\n
docker-compose run --rm --user $(id -u) web sh -c \"\\\n  virtualenv /tmp/.venvs/docs && \\\n  . /tmp/.venvs/docs/bin/activate && \\\n  pip install -r /app/requirements/docs.txt && \\\n  cd /app/docs && \\\n  make html\"\npython -m webbrowser file://${PWD}/docs/_build/html/index.html\n
\n
\n

A virtualenv is required, to avoid a pip bug when changing the version\nof a system-installed package.

\n", + "id": "generating-documentation", + "title": "Generating documentation" + } + ], + "path": "documentation" +} \ No newline at end of file diff --git a/readthedocs/search/tests/data/pipeline/installation.json b/readthedocs/search/tests/data/pipeline/installation.json new file mode 100644 index 00000000000..3fc9f0b00a9 --- /dev/null +++ b/readthedocs/search/tests/data/pipeline/installation.json @@ -0,0 +1,33 @@ +{ + "content": "Installation\nEither check out Pipeline from GitHub or to pull a release off PyPI\npip install django-pipeline\nAdd \u2018pipeline\u2019 to your INSTALLED_APPS\nINSTALLED_APPS = ( 'pipeline', )\nUse a pipeline storage for STATICFILES_STORAGE\nSTATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'\nAdd the PipelineFinder to STATICFILES_FINDERS\nSTATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'pipeline.finders.PipelineFinder', )\nNote\nYou need to use Django>=1.7 to be able to use this version of pipeline.\nUpgrading from 1.3\nTo upgrade from pipeline 1.3, you will need to follow these steps:\nUpdate templates to use the new syntax\n{# pipeline<1.4 #} {% load compressed %} {% compressed_js 'group' %} {% compressed_css 'group' %}\n{# pipeline>=1.4 #} {% load pipeline %} {% javascript 'group' %} {% stylesheet 'group' %}\nAdd the PipelineFinder to STATICFILES_FINDERS\nSTATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'pipeline.finders.PipelineFinder', )\nUpgrading from 1.5\nTo upgrade from pipeline 1.5, you will need update all your PIPELINE_* settings and move them under the new PIPELINE setting. See Configuration.\nRecommendations\nPipeline\u2019s default CSS and JS compressor is Yuglify. Yuglify wraps UglifyJS and cssmin, applying the default YUI configurations to them. It can be downloaded from: https://github.com/yui/yuglify/.\nIf you do not install yuglify, make sure to disable the compressor in your settings.", + "headers": [ + "Installation", + "Upgrading from 1.3", + "Upgrading from 1.5", + "Recommendations" + ], + "title": "Installation", + "sections": [ + { + "content": "\n\n
  • Either check out Pipeline from GitHub or to pull a release off PyPI

    \n
    pip install django-pipeline\n
    \n
    \n
  • \n
  • Add \u2018pipeline\u2019 to your INSTALLED_APPS

    \n
    INSTALLED_APPS = (\n    'pipeline',\n)\n
    \n
    \n
  • \n
  • Use a pipeline storage for STATICFILES_STORAGE

    \n
    STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'\n
    \n
    \n
  • \n
  • Add the PipelineFinder to STATICFILES_FINDERS

    \n
    STATICFILES_FINDERS = (\n    'django.contrib.staticfiles.finders.FileSystemFinder',\n    'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n    'pipeline.finders.PipelineFinder',\n)\n
    \n
    \n
  • \n\n\n\n

    Note

    \n

    You need to use Django>=1.7 to be able to use this version of pipeline.

    \n\n", + "id": "installation", + "title": "Installation" + }, + { + "content": "\n

    Upgrading from 1.3\u00b6

    \n

    To upgrade from pipeline 1.3, you will need to follow these steps:

    \n
      \n
    1. Update templates to use the new syntax

      \n
      \n
      {# pipeline<1.4 #}\n{% load compressed %}\n{% compressed_js 'group' %}\n{% compressed_css 'group' %}\n
      \n
      \n
      {# pipeline>=1.4 #}\n{% load pipeline %}\n{% javascript 'group' %}\n{% stylesheet 'group' %}\n
      \n
      \n
      \n
    2. \n
    3. Add the PipelineFinder to STATICFILES_FINDERS

      \n
      STATICFILES_FINDERS = (\n    'django.contrib.staticfiles.finders.FileSystemFinder',\n    'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n    'pipeline.finders.PipelineFinder',\n)\n
      \n
      \n
    4. \n
    \n", + "id": "upgrading-from-1-3", + "title": "Upgrading from 1.3" + }, + { + "content": "\n

    Upgrading from 1.5\u00b6

    \n

    To upgrade from pipeline 1.5, you will need update all your PIPELINE_*\nsettings and move them under the new PIPELINE setting.\nSee Configuration.

    \n", + "id": "upgrading-from-1-5", + "title": "Upgrading from 1.5" + }, + { + "content": "\n

    Recommendations\u00b6

    \n

    Pipeline\u2019s default CSS and JS compressor is Yuglify.\nYuglify wraps UglifyJS and cssmin, applying the default YUI configurations to them.\nIt can be downloaded from: https://github.com/yui/yuglify/.

    \n

    If you do not install yuglify, make sure to disable the compressor in your settings.

    \n", + "id": "recommendations", + "title": "Recommendations" + } + ], + "path": "installation" +} \ No newline at end of file diff --git a/readthedocs/search/tests/data/pipeline/signals.json b/readthedocs/search/tests/data/pipeline/signals.json new file mode 100644 index 00000000000..3bdd02e32d2 --- /dev/null +++ b/readthedocs/search/tests/data/pipeline/signals.json @@ -0,0 +1,27 @@ +{ + "content": "Signals\nList of all signals sent by pipeline.\ncss_compressed\npipeline.signals.css_compressed\nWhenever a css package is compressed, this signal is sent after the compression.\nArguments sent with this signal :\nsender:\nThe Packager class that compressed the group.\npackage:\nThe package actually compressed.\njs_compressed\npipeline.signals.js_compressed\nWhenever a js package is compressed, this signal is sent after the compression.\nArguments sent with this signal :\nsender:\nThe Packager class that compressed the group.\npackage:\nThe package actually compressed.", + "headers": [ + "Signals", + "css_compressed", + "js_compressed" + ], + "title": "Signals", + "sections": [ + { + "content": "\nList of all signals sent by pipeline.\n", + "id": "signals", + "title": "Signals" + }, + { + "content": "\n

    css_compressed\u00b6

    \n

    pipeline.signals.css_compressed

    \n
    \n

    Whenever a css package is compressed, this signal is sent after the compression.

    \n

    Arguments sent with this signal :

    \n
    \n
    \n\n\n\n\n\n\n\n\n
    sender:The Packager class that compressed the group.
    package:The package actually compressed.
    \n
    \n
    \n", + "id": "css-compressed", + "title": "css_compressed" + }, + { + "content": "\n

    js_compressed\u00b6

    \n

    pipeline.signals.js_compressed

    \n
    \n

    Whenever a js package is compressed, this signal is sent after the compression.

    \n

    Arguments sent with this signal :

    \n
    \n
    \n\n\n\n\n\n\n\n\n
    sender:The Packager class that compressed the group.
    package:The package actually compressed.
    \n
    \n
    \n", + "id": "js-compressed", + "title": "js_compressed" + } + ], + "path": "signals" +} \ No newline at end of file diff --git a/readthedocs/search/tests/dummy_data.py b/readthedocs/search/tests/dummy_data.py new file mode 100644 index 00000000000..884ed4c821d --- /dev/null +++ b/readthedocs/search/tests/dummy_data.py @@ -0,0 +1,27 @@ +import json +import os + +_DATA_FILES = { + 'pipeline': ['installation.json', 'signals.json'], + 'kuma': ['documentation.json', 'docker.json'], +} + + +def _get_dummy_json(): + dictionary = {} + for key, value in _DATA_FILES.items(): + data = [] + for file_name in value: + current_path = os.path.abspath(os.path.dirname(__file__)) + path = os.path.join(current_path, "data", key, file_name) + with open(path) as f: + content = json.load(f) + data.append(content) + + dictionary[key] = data + + return dictionary + + +DUMMY_PAGE_JSON = _get_dummy_json() +ALL_PROJECTS = DUMMY_PAGE_JSON.keys() diff --git a/readthedocs/search/tests/test_views.py b/readthedocs/search/tests/test_views.py index b8d3b2b921f..0311781064c 100644 --- a/readthedocs/search/tests/test_views.py +++ b/readthedocs/search/tests/test_views.py @@ -4,9 +4,11 @@ from django_dynamic_fixture import G from pyquery import PyQuery as pq +from readthedocs.builds.constants import LATEST from readthedocs.builds.models import Version from readthedocs.projects.models import Project from readthedocs.search import parse_json +from .dummy_data import DUMMY_PAGE_JSON @pytest.mark.django_db @@ -15,7 +17,7 @@ class TestElasticSearch(object): url = reverse('search') @pytest.fixture(autouse=True) - def elastic_index(self, mock_parse_json, project, search): + def elastic_index(self, mock_parse_json, all_projects, search): call_command('reindex_elasticsearch') search.refresh_index() @@ -27,13 +29,13 @@ def test_search_by_project_name(self, client, project): content = page.find('.module-list-wrapper .module-item-title') assert project.name.encode('utf-8') in content.text().encode('utf-8') - def test_search_by_file_content(self, client, page_json, project): + def test_search_by_file_content(self, client, project): versions = project.versions.all() # There should be only one version of the project assert len(versions) == 1 - data = page_json(version=versions[0])[0] + data = DUMMY_PAGE_JSON[project.slug][0] # Query with the first word of title title = data['title'] query = title.split()[0] @@ -44,3 +46,59 @@ def test_search_by_file_content(self, client, page_json, project): page = pq(resp.content) content = page.find('.module-list-wrapper .module-item-title') assert title in content.text() + + def test_file_search_show_projects(self, client): + """Test that search result page shows list of projects while searching for files""" + + # `Installation` word is present both in `kuma` and `pipeline` files + # so search with this phrase + resp = client.get(self.url, {'q': "Installation", 'type': 'file'}) + assert resp.status_code == 200 + + page = pq(resp.content) + content = page.find('.module-list-wrapper .module-item-title') + + # There should be 2 search result + assert len(content) == 2 + + # there should be 2 projects in the left side column + content = page.find('.navigable .project-list') + assert len(content) == 2 + text = content.text() + + # kuma and pipeline should be there + assert 'kuma' and 'pipeline' in text + + @pytest.mark.xfail(reason="Versions are not showing correctly! Fixme while rewrite!") + def test_file_search_show_versions(self, client, all_projects, search, settings): + # override the settings to index all versions + settings.INDEX_ONLY_LATEST = False + + project = all_projects[0] + # Create some versions of the project + versions = [G(Version, project=project) for _ in range(3)] + call_command('reindex_elasticsearch') + search.refresh_index() + + data = DUMMY_PAGE_JSON[project.slug][0] + title = data['title'] + query = title.split()[0] + + resp = client.get(self.url, {'q': query, 'type': 'file'}) + assert resp.status_code == 200 + + page = pq(resp.content) + content = page.find('.navigable .version-list') + # There should be total 4 versions + # one is latest, and other 3 that we created above + assert len(content) == 4 + + project_versions = [v.slug for v in versions] + [LATEST] + content_versions = [] + for element in content: + text = element.text_content() + # strip and split to keep the version slug only + slug = text.strip().split('\n')[0] + content_versions.append(slug) + + assert sorted(project_versions) == sorted(content_versions) diff --git a/readthedocs/search/views.py b/readthedocs/search/views.py index 7d3a51d5fc2..661febe2b35 100644 --- a/readthedocs/search/views.py +++ b/readthedocs/search/views.py @@ -66,6 +66,7 @@ def elastic_search(request): for term in results['facets'][facet_type]['terms']: facets[facet_type][term['term']] = term['count'] + if settings.DEBUG: print(pprint(results)) print(pprint(facets)) diff --git a/readthedocs/settings/dev.py b/readthedocs/settings/dev.py index 9cbf4e9f5fc..d74fde24058 100644 --- a/readthedocs/settings/dev.py +++ b/readthedocs/settings/dev.py @@ -29,8 +29,8 @@ def DATABASES(self): # noqa SLUMBER_USERNAME = 'test' SLUMBER_PASSWORD = 'test' # noqa: ignore dodgy check - SLUMBER_API_HOST = 'http://localhost:8000' - PUBLIC_API_URL = 'http://localhost:8000' + SLUMBER_API_HOST = 'http://127.0.0.1:8000' + PUBLIC_API_URL = 'http://127.0.0.1:8000' BROKER_URL = 'redis://localhost:6379/0' CELERY_RESULT_BACKEND = 'redis://localhost:6379/0' diff --git a/readthedocs/templates/search/elastic_search.html b/readthedocs/templates/search/elastic_search.html index 165279670bb..8e07fc37553 100644 --- a/readthedocs/templates/search/elastic_search.html +++ b/readthedocs/templates/search/elastic_search.html @@ -46,7 +46,7 @@
    Language
    {% if facets.project %}
    Projects
    {% for name, count in facets.project.items %} -
  • +
  • {% if project == name %} {{ name }} {% else %} @@ -63,7 +63,7 @@
    Projects
    Version
    {% for name, count in facets.version.items %} -
  • +
  • {% if version == name %} {{ name }} {% else %} From e095687f21727cba7ecaa8d2eed6f49671cca3b5 Mon Sep 17 00:00:00 2001 From: Safwan Rahman Date: Mon, 28 May 2018 01:48:06 +0600 Subject: [PATCH 13/18] fixup --- readthedocs/search/tests/conftest.py | 1 - readthedocs/search/tests/test_views.py | 2 -- 2 files changed, 3 deletions(-) diff --git a/readthedocs/search/tests/conftest.py b/readthedocs/search/tests/conftest.py index 50febb56d85..fcdc3a42d21 100644 --- a/readthedocs/search/tests/conftest.py +++ b/readthedocs/search/tests/conftest.py @@ -15,7 +15,6 @@ def mock_elastic_index(mocker): @pytest.fixture(autouse=True) -@pytest.fixture def search(): # Create the index. index = Index() diff --git a/readthedocs/search/tests/test_views.py b/readthedocs/search/tests/test_views.py index 0311781064c..6e19fcc82c3 100644 --- a/readthedocs/search/tests/test_views.py +++ b/readthedocs/search/tests/test_views.py @@ -6,8 +6,6 @@ from readthedocs.builds.constants import LATEST from readthedocs.builds.models import Version -from readthedocs.projects.models import Project -from readthedocs.search import parse_json from .dummy_data import DUMMY_PAGE_JSON From e1e9653a2f0bca0ce037821b2623a8498c1fbc83 Mon Sep 17 00:00:00 2001 From: Safwan Rahman Date: Sat, 2 Jun 2018 01:34:08 +0600 Subject: [PATCH 14/18] adding test subprojects --- readthedocs/search/indexes.py | 1 - readthedocs/search/tests/test_views.py | 26 ++++++++++++++++++++++++-- readthedocs/search/views.py | 1 - 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/readthedocs/search/indexes.py b/readthedocs/search/indexes.py index 6b5b2ee9c36..19c4b2ba772 100644 --- a/readthedocs/search/indexes.py +++ b/readthedocs/search/indexes.py @@ -109,7 +109,6 @@ def create_index(self, index=None): def refresh_index(self, index=None): index = index or self._index - self.es.indices.refresh(index=index) def put_mapping(self, index=None): diff --git a/readthedocs/search/tests/test_views.py b/readthedocs/search/tests/test_views.py index 6e19fcc82c3..ed950b70197 100644 --- a/readthedocs/search/tests/test_views.py +++ b/readthedocs/search/tests/test_views.py @@ -14,10 +14,13 @@ class TestElasticSearch(object): url = reverse('search') + def _reindex_elasticsearch(self, es_index): + call_command('reindex_elasticsearch') + es_index.refresh_index() + @pytest.fixture(autouse=True) def elastic_index(self, mock_parse_json, all_projects, search): - call_command('reindex_elasticsearch') - search.refresh_index() + self._reindex_elasticsearch(es_index=search) def test_search_by_project_name(self, client, project): resp = client.get(self.url, {'q': project.name}) @@ -100,3 +103,22 @@ def test_file_search_show_versions(self, client, all_projects, search, settings) content_versions.append(slug) assert sorted(project_versions) == sorted(content_versions) + + def test_file_search_subprojects(self, client, all_projects, search): + """File search should return results from subprojects also""" + project = all_projects[0] + subproject = all_projects[1] + project.add_subproject(subproject) + self._reindex_elasticsearch(es_index=search) + # Add another project as subproject of the project + + # Now search with subproject content but explicitly filter by the parent project + data = DUMMY_PAGE_JSON[subproject.slug][1] + title = data['title'] + print(title) + resp = client.get(self.url, {'q': title.split()[0], 'type': 'file', 'project': project.slug}) + assert resp.status_code == 200 + + page = pq(resp.content) + content = page.find('.module-list-wrapper .module-item-title') + assert title in content.text() diff --git a/readthedocs/search/views.py b/readthedocs/search/views.py index 661febe2b35..7d3a51d5fc2 100644 --- a/readthedocs/search/views.py +++ b/readthedocs/search/views.py @@ -66,7 +66,6 @@ def elastic_search(request): for term in results['facets'][facet_type]['terms']: facets[facet_type][term['term']] = term['count'] - if settings.DEBUG: print(pprint(results)) print(pprint(facets)) From ce78ab9040aed51e7d64df615c506b6258da73a1 Mon Sep 17 00:00:00 2001 From: Safwan Rahman Date: Mon, 4 Jun 2018 00:25:06 +0600 Subject: [PATCH 15/18] adding tests and fixup comments --- conftest.py | 3 + docs/tests.rst | 5 + .../commands/reindex_elasticsearch.py | 2 +- readthedocs/search/tests/conftest.py | 16 ++- .../search/tests/data/kuma/docker.json | 4 +- .../search/tests/data/kuma/documentation.json | 9 +- .../tests/data/pipeline/installation.json | 2 +- .../search/tests/data/pipeline/signals.json | 2 +- readthedocs/search/tests/test_views.py | 114 +++++++++++------- readthedocs/search/tests/utils.py | 14 +++ requirements/testing.txt | 1 - 11 files changed, 112 insertions(+), 60 deletions(-) create mode 100644 readthedocs/search/tests/utils.py diff --git a/conftest.py b/conftest.py index c455660670e..319819bedc2 100644 --- a/conftest.py +++ b/conftest.py @@ -1,3 +1,6 @@ +import logging + + def pytest_addoption(parser): parser.addoption('--including-search', action='store_true', dest="searchtests", default=False, help="enable search tests") diff --git a/docs/tests.rst b/docs/tests.rst index b817205ce24..3902a7bc5a7 100644 --- a/docs/tests.rst +++ b/docs/tests.rst @@ -17,6 +17,11 @@ environments by running:: tox +In order to run all test including the search tests, include `"'--including-search'"` +argument:: + + tox "'--including-search'" + To target a specific environment:: tox -e py27 diff --git a/readthedocs/core/management/commands/reindex_elasticsearch.py b/readthedocs/core/management/commands/reindex_elasticsearch.py index b736a1cd426..d5b9e43ac6e 100644 --- a/readthedocs/core/management/commands/reindex_elasticsearch.py +++ b/readthedocs/core/management/commands/reindex_elasticsearch.py @@ -53,4 +53,4 @@ def handle(self, *args, **options): update_search(version.pk, commit, delete_non_commit_files=False) except Exception: - log.exception('Reindex failed for %s', version) + log.debug('Reindex failed for %s', version) diff --git a/readthedocs/search/tests/conftest.py b/readthedocs/search/tests/conftest.py index fcdc3a42d21..c4f1431340f 100644 --- a/readthedocs/search/tests/conftest.py +++ b/readthedocs/search/tests/conftest.py @@ -1,3 +1,8 @@ +import random +import string +from copy import deepcopy +from random import shuffle + import pytest from django_dynamic_fixture import G from faker import Faker @@ -11,11 +16,12 @@ @pytest.fixture(autouse=True) def mock_elastic_index(mocker): - mocker.patch.object(Index, '_index', fake.word().lower()) + index_name = ''.join([random.choice(string.ascii_letters) for _ in xrange(5)]) + mocker.patch.object(Index, '_index', index_name.lower()) @pytest.fixture(autouse=True) -def search(): +def es_index(mock_elastic_index): # Create the index. index = Index() index_name = index.timestamped_index() @@ -35,7 +41,9 @@ def search(): @pytest.fixture def all_projects(): - return [G(Project, slug=project_name, name=project_name) for project_name in ALL_PROJECTS] + projects = [G(Project, slug=project_slug, name=project_slug) for project_slug in ALL_PROJECTS] + shuffle(projects) + return projects @pytest.fixture @@ -50,7 +58,7 @@ def get_dummy_page_json(version, *args, **kwargs): return dummy_page_json.get(project_name) -@pytest.fixture +@pytest.fixture(autouse=True) def mock_parse_json(mocker): # patch the function from `projects.tasks` because it has been point to there diff --git a/readthedocs/search/tests/data/kuma/docker.json b/readthedocs/search/tests/data/kuma/docker.json index 43972c7d945..cc2317c4e41 100644 --- a/readthedocs/search/tests/data/kuma/docker.json +++ b/readthedocs/search/tests/data/kuma/docker.json @@ -1,5 +1,5 @@ { - "content": "Docker\nDocker is used for development and (soon) for deployment.\nDocker Images\nDocker images are used in development, usually with the local working files mounted in the images to set behaviour.\nImages are built by Jenkins, after tests pass, and are published to quay.io. We try to store the configuration in the environment, so that the published images can be used in deployments by setting environment variables to deployment-specific values.\nHere are some of the images used in the Kuma project:\nkuma\nThe kuma Docker image builds on the kuma_base image, installing a kuma branch and building the assets needed for running as a webservice. The environment can be customized for different deployments.\nThe image can be recreated locally with make build-kuma.\nThe image tagged latest is used by default for development. It can be created locally with make build-kuma VERSION=latest. The official latest image is created from the master branch in Jenkins and published to quay.io.\nkuma_base\nThe kuma_base Docker image contains the OS and libraries (C, Python, and Node.js) that support the kuma project. The kuma image extends this by installing the kuma source and building assets needed for production.\nThe image can be recreated locally with make build-base.\nThe image tagged latest is used by default for development. It can be created localled with make build-base VERSION=latest. The official latest image is created from the master branch in Jenkins and published to quay.io\nkumascript\nThe kumascript Docker image contains the kumascript rendering engine and support files. The environment can be customized for different deployments.\nThe image can be recreated locally with make build-kumascript.\nThe image tagged latest is used by default for development. It can be created locally with make build-kumascript KS_VERSION=latest. The official latest image is created from the master branch in Jenkins and published to quay.io.\nintegration-tests\nThe integration-tests Docker image contains browser-based integration tests that check the functionality of a running Kuma deployment.\nThe image can be recreated locally with docker build -f docker/images/integration-tests/ ., but this is only necessary for image development. Most developer will follow the Client-side testing documentation to develop and run these integration tests.\nThe image is built and used in Jenkins in the stage-integration-tests and prod-integration-tests pipelines, configured by scripts in the Jenkinsfiles folder. It is not published to quay.io.", + "content": "kuma-Docker Docker is used for development and (soon) for deployment.\nDocker Images\nDocker images are used in development, usually with the local working files mounted in the images to set behaviour.\nImages are built by Jenkins, after tests pass, and are published to quay.io. We try to store the configuration in the environment, so that the published images can be used in deployments by setting environment variables to deployment-specific values.\nHere are some of the images used in the Kuma project:\nkuma\nThe kuma Docker image builds on the kuma_base image, installing a kuma branch and building the assets needed for running as a webservice. The environment can be customized for different deployments.\nThe image can be recreated locally with make build-kuma.\nThe image tagged latest is used by default for development. It can be created locally with make build-kuma VERSION=latest. The official latest image is created from the master branch in Jenkins and published to quay.io.\nkuma_base\nThe kuma_base Docker image contains the OS and libraries (C, Python, and Node.js) that support the kuma project. The kuma image extends this by installing the kuma source and building assets needed for production.\nThe image can be recreated locally with make build-base.\nThe image tagged latest is used by default for development. It can be created localled with make build-base VERSION=latest. The official latest image is created from the master branch in Jenkins and published to quay.io\nkumascript\nThe kumascript Docker image contains the kumascript rendering engine and support files. The environment can be customized for different deployments.\nThe image can be recreated locally with make build-kumascript.\nThe image tagged latest is used by default for development. It can be created locally with make build-kumascript KS_VERSION=latest. The official latest image is created from the master branch in Jenkins and published to quay.io.\nintegration-tests\nThe integration-tests Docker image contains browser-based integration tests that check the functionality of a running Kuma deployment.\nThe image can be recreated locally with docker build -f docker/images/integration-tests/ ., but this is only necessary for image development. Most developer will follow the Client-side testing to develop and run these integration tests.\nThe image is built and used in Jenkins in the stage-integration-tests and prod-integration-tests pipelines, configured by scripts in the Jenkinsfiles folder. It is not published to quay.io.", "headers": [ "Docker", "Docker Images", @@ -16,7 +16,7 @@ "title": "Docker" }, { - "content": "\n

    Docker Images\u00b6

    \n

    Docker images are used in development, usually with the local\nworking files mounted in the images to set behaviour.

    \n

    Images are built by Jenkins, after tests pass, and are\npublished to quay.io. We try to\nstore the configuration in the environment, so that the\npublished images can be used in deployments by setting\nenvironment variables to deployment-specific values.

    \n

    Here are some of the images used in the Kuma project:

    \n
    \n

    kuma\u00b6

    \n

    The kuma Docker image builds on the kuma_base image, installing a kuma branch\nand building the assets needed for running as a webservice. The environment\ncan be customized for different deployments.

    \n

    The image can be recreated locally with make build-kuma.

    \n

    The image tagged latest is used by default for development. It can be\ncreated locally with make build-kuma VERSION=latest. The official latest\nimage is created from the master branch in Jenkins and published to\nquay.io.

    \n
    \n
    \n

    kuma_base\u00b6

    \n

    The kuma_base Docker image contains the OS and libraries (C, Python, and\nNode.js) that support the kuma project. The kuma image extends this by\ninstalling the kuma source and building assets needed for production.

    \n

    The image can be recreated locally with make build-base.

    \n

    The image tagged latest is used by default for development. It can be\ncreated localled with make build-base VERSION=latest. The official\nlatest image is created from the master branch in Jenkins and published to\nquay.io

    \n
    \n
    \n

    kumascript\u00b6

    \n

    The kumascript Docker image contains the kumascript rendering engine and\nsupport files. The environment can be customized for different deployments.

    \n

    The image can be recreated locally with make build-kumascript.

    \n

    The image tagged latest is used by default for development. It can be\ncreated locally with make build-kumascript KS_VERSION=latest. The official\nlatest image is created from the master branch in Jenkins and published to\nquay.io.

    \n
    \n
    \n

    integration-tests\u00b6

    \n

    The integration-tests Docker image contains browser-based integration tests\nthat check the functionality of a running Kuma deployment.

    \n

    The image can be recreated locally with\ndocker build -f docker/images/integration-tests/ ., but this is only\nnecessary for image development. Most developer will follow the\nClient-side testing documentation to develop and run these integration tests.

    \n

    The image is built and used in Jenkins in the stage-integration-tests and\nprod-integration-tests pipelines, configured by scripts in the\nJenkinsfiles folder. It is not published to quay.io.

    \n
    \n", + "content": "\n

    Docker Images\u00b6

    \n

    Docker images are used in development, usually with the local\nworking files mounted in the images to set behaviour.

    \n

    Images are built by Jenkins, after tests pass, and are\npublished to quay.io. We try to\nstore the configuration in the environment, so that the\npublished images can be used in deployments by setting\nenvironment variables to deployment-specific values.

    \n

    Here are some of the images used in the Kuma project:

    \n
    \n

    kuma\u00b6

    \n

    The kuma Docker image builds on the kuma_base image, installing a kuma branch\nand building the assets needed for running as a webservice. The environment\ncan be customized for different deployments.

    \n

    The image can be recreated locally with make build-kuma.

    \n

    The image tagged latest is used by default for development. It can be\ncreated locally with make build-kuma VERSION=latest. The official latest\nimage is created from the master branch in Jenkins and published to\nquay.io.

    \n
    \n
    \n

    kuma_base\u00b6

    \n

    The kuma_base Docker image contains the OS and libraries (C, Python, and\nNode.js) that support the kuma project. The kuma image extends this by\ninstalling the kuma source and building assets needed for production.

    \n

    The image can be recreated locally with make build-base.

    \n

    The image tagged latest is used by default for development. It can be\ncreated localled with make build-base VERSION=latest. The official\nlatest image is created from the master branch in Jenkins and published to\nquay.io

    \n
    \n
    \n

    kumascript\u00b6

    \n

    The kumascript Docker image contains the kumascript rendering engine and\nsupport files. The environment can be customized for different deployments.

    \n

    The image can be recreated locally with make build-kumascript.

    \n

    The image tagged latest is used by default for development. It can be\ncreated locally with make build-kumascript KS_VERSION=latest. The official\nlatest image is created from the master branch in Jenkins and published to\nquay.io.

    \n
    \n
    \n

    integration-tests\u00b6

    \n

    The integration-tests Docker image contains browser-based integration tests\nthat check the functionality of a running Kuma deployment.

    \n

    The image can be recreated locally with\ndocker build -f docker/images/integration-tests/ ., but this is only\nnecessary for image development. Most developer will follow the\nClient-side testing to develop and run these integration tests.

    \n

    The image is built and used in Jenkins in the stage-integration-tests and\nprod-integration-tests pipelines, configured by scripts in the\nJenkinsfiles folder. It is not published to quay.io.

    \n
    \n", "id": "docker-images", "title": "Docker Images" } diff --git a/readthedocs/search/tests/data/kuma/documentation.json b/readthedocs/search/tests/data/kuma/documentation.json index cb9a42e2706..310a01d05c8 100644 --- a/readthedocs/search/tests/data/kuma/documentation.json +++ b/readthedocs/search/tests/data/kuma/documentation.json @@ -1,19 +1,18 @@ { - "content": "Documentation\nThis documentation is generated and published at Read the Docs whenever the master branch is updated.\nGitHub can render our .rst documents as ReStructuredText, which is close enough to Sphinx for most code reviews, without features like links between documents.\nIt is occasionally necessary to generate the documentation locally. It is easiest to do this with a virtualenv on the host system, using Docker only to regenerate the MDN Sphinx template. If you are not comfortable with that style of development, it can be done entirely in Docker using docker-compose.\nGenerating documentation\nSphinx uses a Makefile in the docs subfolder to build documentation in several formats. MDN only uses the HTML format, and the generated document index is at docs/_build/html/index.html.\nTo generate the documentation in a virtualenv on the host machine, first install the requirements:\npip install -r requirements/docs.txt\nThen switch to the docs folder to use the Makefile:\ncd docs make html python -m webbrowser file://${PWD}/_build/html/index.html\nTo generate the documentation with Docker:\ndocker-compose run --rm --user $(id -u) web sh -c \"\\ virtualenv /tmp/.venvs/docs && \\ . /tmp/.venvs/docs/bin/activate && \\ pip install -r /app/requirements/docs.txt && \\ cd /app/docs && \\ make html\" python -m webbrowser file://${PWD}/docs/_build/html/index.html\nA virtualenv is required, to avoid a pip bug when changing the version of a system-installed package.", + "content": "kuma-Documentation This documentation is generated and published at Read the Docs whenever the master branch is updated. GitHub can render our .rst documents as ReStructuredText, which is close enough to Sphinx for most code reviews, without features like links between documents.\nIt is occasionally necessary to generate the documentation locally. It is easiest to do this with a virtualenv on the host system, using only to regenerate the MDN Sphinx template. If you are not comfortable with that style of development, it can be done entirely in using -compose.\nGenerating documentation\nSphinx uses a Makefile in the docs subfolder to build documentation in several formats. MDN only uses the HTML format, and the generated document index is at docs/_build/html/index.html.\nTo generate the documentation in a virtualenv on the host machine, first install the requirements:\npip install -r requirements/docs.txt\nThen switch to the docs folder to use the Makefile:\ncd docs make html python -m webbrowser file://${PWD}/_build/html/index.html\nTo generate the documentation with :\n-compose run --rm --user $(id -u) web sh -c \"\\ virtualenv /tmp/.venvs/docs && \\ . /tmp/.venvs/docs/bin/activate && \\ pip install -r /app/requirements/docs.txt && \\ cd /app/docs && \\ make html\" python -m webbrowser file://${PWD}/docs/_build/html/index.html\nA virtualenv is required, to avoid a pip bug when changing the version of a system-installed package.", "headers": [ "Documentation", - "Generating documentation", - "Installation" + "Generating documentation" ], "title": "Documentation", "sections": [ { - "content": "\nThis documentation is generated and published at\nRead the Docs whenever the master branch is updated.\n\nGitHub can render our .rst documents as ReStructuredText, which is\nclose enough to Sphinx for most code reviews, without features like links\nbetween documents.\n\nIt is occasionally necessary to generate the documentation locally. It is\neasiest to do this with a virtualenv on the host system, using Docker only to\nregenerate the MDN Sphinx template. If you are not comfortable with that style\nof development, it can be done entirely in Docker using docker-compose.\n", + "content": "\nThis documentation is generated and published at\nRead the Docs whenever the master branch is updated.\n\nGitHub can render our .rst documents as ReStructuredText, which is\nclose enough to Sphinx for most code reviews, without features like links\nbetween documents.\n\nIt is occasionally necessary to generate the documentation locally. It is\neasiest to do this with a virtualenv on the host system, using only to\nregenerate the MDN Sphinx template. If you are not comfortable with that style\nof development, it can be done entirely in using -compose.\n", "id": "documentation", "title": "Documentation" }, { - "content": "\n

    Generating documentation\u00b6

    \n

    Sphinx uses a Makefile in the docs subfolder to build documentation in\nseveral formats. MDN only uses the HTML format, and the generated document\nindex is at docs/_build/html/index.html.

    \n

    To generate the documentation in a virtualenv on the host machine, first\ninstall the requirements:

    \n
    pip install -r requirements/docs.txt\n
    \n
    \n

    Then switch to the docs folder to use the Makefile:

    \n
    cd docs\nmake html\npython -m webbrowser file://${PWD}/_build/html/index.html\n
    \n
    \n

    To generate the documentation with Docker:

    \n
    docker-compose run --rm --user $(id -u) web sh -c \"\\\n  virtualenv /tmp/.venvs/docs && \\\n  . /tmp/.venvs/docs/bin/activate && \\\n  pip install -r /app/requirements/docs.txt && \\\n  cd /app/docs && \\\n  make html\"\npython -m webbrowser file://${PWD}/docs/_build/html/index.html\n
    \n
    \n

    A virtualenv is required, to avoid a pip bug when changing the version\nof a system-installed package.

    \n", + "content": "\n

    Generating documentation\u00b6

    \n

    Sphinx uses a Makefile in the docs subfolder to build documentation in\nseveral formats. MDN only uses the HTML format, and the generated document\nindex is at docs/_build/html/index.html.

    \n

    To generate the documentation in a virtualenv on the host machine, first\ninstall the requirements:

    \n
    pip install -r requirements/docs.txt\n
    \n
    \n

    Then switch to the docs folder to use the Makefile:

    \n
    cd docs\nmake html\npython -m webbrowser file://${PWD}/_build/html/index.html\n
    \n
    \n

    To generate the documentation with :

    \n
    -compose run --rm --user $(id -u) web sh -c \"\\\n  virtualenv /tmp/.venvs/docs && \\\n  . /tmp/.venvs/docs/bin/activate && \\\n  pip install -r /app/requirements/docs.txt && \\\n  cd /app/docs && \\\n  make html\"\npython -m webbrowser file://${PWD}/docs/_build/html/index.html\n
    \n
    \n

    A virtualenv is required, to avoid a pip bug when changing the version\nof a system-installed package.

    \n", "id": "generating-documentation", "title": "Generating documentation" } diff --git a/readthedocs/search/tests/data/pipeline/installation.json b/readthedocs/search/tests/data/pipeline/installation.json index 3fc9f0b00a9..30fb78d1d78 100644 --- a/readthedocs/search/tests/data/pipeline/installation.json +++ b/readthedocs/search/tests/data/pipeline/installation.json @@ -1,5 +1,5 @@ { - "content": "Installation\nEither check out Pipeline from GitHub or to pull a release off PyPI\npip install django-pipeline\nAdd \u2018pipeline\u2019 to your INSTALLED_APPS\nINSTALLED_APPS = ( 'pipeline', )\nUse a pipeline storage for STATICFILES_STORAGE\nSTATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'\nAdd the PipelineFinder to STATICFILES_FINDERS\nSTATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'pipeline.finders.PipelineFinder', )\nNote\nYou need to use Django>=1.7 to be able to use this version of pipeline.\nUpgrading from 1.3\nTo upgrade from pipeline 1.3, you will need to follow these steps:\nUpdate templates to use the new syntax\n{# pipeline<1.4 #} {% load compressed %} {% compressed_js 'group' %} {% compressed_css 'group' %}\n{# pipeline>=1.4 #} {% load pipeline %} {% javascript 'group' %} {% stylesheet 'group' %}\nAdd the PipelineFinder to STATICFILES_FINDERS\nSTATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'pipeline.finders.PipelineFinder', )\nUpgrading from 1.5\nTo upgrade from pipeline 1.5, you will need update all your PIPELINE_* settings and move them under the new PIPELINE setting. See Configuration.\nRecommendations\nPipeline\u2019s default CSS and JS compressor is Yuglify. Yuglify wraps UglifyJS and cssmin, applying the default YUI configurations to them. It can be downloaded from: https://github.com/yui/yuglify/.\nIf you do not install yuglify, make sure to disable the compressor in your settings.", + "content": "Pipeline-Installation Either check out Pipeline from GitHub or to pull a release off PyPI\npip install django-pipeline\nAdd \u2018pipeline\u2019 to your INSTALLED_APPS\nINSTALLED_APPS = ( 'pipeline', )\nUse a pipeline storage for STATICFILES_STORAGE\nSTATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'\nAdd the PipelineFinder to STATICFILES_FINDERS\nSTATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'pipeline.finders.PipelineFinder', )\nNote\nYou need to use Django>=1.7 to be able to use this version of pipeline.\nUpgrading from 1.3\nTo upgrade from pipeline 1.3, you will need to follow these steps:\nUpdate templates to use the new syntax\n{# pipeline<1.4 #} {% load compressed %} {% compressed_js 'group' %} {% compressed_css 'group' %}\n{# pipeline>=1.4 #} {% load pipeline %} {% javascript 'group' %} {% stylesheet 'group' %}\nAdd the PipelineFinder to STATICFILES_FINDERS\nSTATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'pipeline.finders.PipelineFinder', )\nUpgrading from 1.5\nTo upgrade from pipeline 1.5, you will need update all your PIPELINE_* settings and move them under the new PIPELINE setting. See Configuration.\nRecommendations\nPipeline\u2019s default CSS and JS compressor is Yuglify. Yuglify wraps UglifyJS and cssmin, applying the default YUI configurations to them. It can be downloaded from: https://github.com/yui/yuglify/.\nIf you do not install yuglify, make sure to disable the compressor in your settings.", "headers": [ "Installation", "Upgrading from 1.3", diff --git a/readthedocs/search/tests/data/pipeline/signals.json b/readthedocs/search/tests/data/pipeline/signals.json index 3bdd02e32d2..3bf3a80537c 100644 --- a/readthedocs/search/tests/data/pipeline/signals.json +++ b/readthedocs/search/tests/data/pipeline/signals.json @@ -1,5 +1,5 @@ { - "content": "Signals\nList of all signals sent by pipeline.\ncss_compressed\npipeline.signals.css_compressed\nWhenever a css package is compressed, this signal is sent after the compression.\nArguments sent with this signal :\nsender:\nThe Packager class that compressed the group.\npackage:\nThe package actually compressed.\njs_compressed\npipeline.signals.js_compressed\nWhenever a js package is compressed, this signal is sent after the compression.\nArguments sent with this signal :\nsender:\nThe Packager class that compressed the group.\npackage:\nThe package actually compressed.", + "content": "pipeline-Signals List of all signals sent by pipeline.\ncss_compressed\npipeline.signals.css_compressed\nWhenever a css package is compressed, this signal is sent after the compression.\nArguments sent with this signal :\nsender:\nThe Packager class that compressed the group.\npackage:\nThe package actually compressed.\njs_compressed\npipeline.signals.js_compressed\nWhenever a js package is compressed, this signal is sent after the compression.\nArguments sent with this signal :\nsender:\nThe Packager class that compressed the group.\npackage:\nThe package actually compressed.", "headers": [ "Signals", "css_compressed", diff --git a/readthedocs/search/tests/test_views.py b/readthedocs/search/tests/test_views.py index ed950b70197..ea9a5931747 100644 --- a/readthedocs/search/tests/test_views.py +++ b/readthedocs/search/tests/test_views.py @@ -6,6 +6,7 @@ from readthedocs.builds.constants import LATEST from readthedocs.builds.models import Version +from readthedocs.search.tests.utils import get_search_query from .dummy_data import DUMMY_PAGE_JSON @@ -18,49 +19,49 @@ def _reindex_elasticsearch(self, es_index): call_command('reindex_elasticsearch') es_index.refresh_index() + def _get_search_result(self, url, client, search_params): + resp = client.get(url, search_params) + assert resp.status_code == 200 + + page = pq(resp.content) + result = page.find('.module-list-wrapper .module-item-title') + return result, page + @pytest.fixture(autouse=True) - def elastic_index(self, mock_parse_json, all_projects, search): - self._reindex_elasticsearch(es_index=search) + def elastic_index(self, mock_parse_json, all_projects, es_index): + self._reindex_elasticsearch(es_index=es_index) def test_search_by_project_name(self, client, project): - resp = client.get(self.url, {'q': project.name}) - assert resp.status_code == 200 + result, _ = self._get_search_result(url=self.url, client=client, + search_params={'q': project.name}) - page = pq(resp.content) - content = page.find('.module-list-wrapper .module-item-title') - assert project.name.encode('utf-8') in content.text().encode('utf-8') + assert project.name.encode('utf-8') in result.text().encode('utf-8') - def test_search_by_file_content(self, client, project): + @pytest.mark.parametrize('data_type', ['content', 'headers', 'title']) + @pytest.mark.parametrize('page_num', [0, 1]) + def test_search_by_file_content(self, client, project, data_type, page_num): + query = get_search_query(project_slug=project.slug, page_num=page_num, + data_type=data_type) versions = project.versions.all() # There should be only one version of the project assert len(versions) == 1 + print(query, page_num, data_type) - data = DUMMY_PAGE_JSON[project.slug][0] - # Query with the first word of title - title = data['title'] - query = title.split()[0] - - resp = client.get(self.url, {'q': query, 'type': 'file'}) - assert resp.status_code == 200 - - page = pq(resp.content) - content = page.find('.module-list-wrapper .module-item-title') - assert title in content.text() + result, _ = self._get_search_result(url=self.url, client=client, + search_params={'q': query, 'type': 'file'}) + assert len(result) == 1 def test_file_search_show_projects(self, client): """Test that search result page shows list of projects while searching for files""" - # `Installation` word is present both in `kuma` and `pipeline` files + # `Github` word is present both in `kuma` and `pipeline` files # so search with this phrase - resp = client.get(self.url, {'q': "Installation", 'type': 'file'}) - assert resp.status_code == 200 - - page = pq(resp.content) - content = page.find('.module-list-wrapper .module-item-title') + result, page = self._get_search_result(url=self.url, client=client, + search_params={'q': 'GitHub', 'type': 'file'}) # There should be 2 search result - assert len(content) == 2 + assert len(result) == 2 # there should be 2 projects in the left side column content = page.find('.navigable .project-list') @@ -70,25 +71,51 @@ def test_file_search_show_projects(self, client): # kuma and pipeline should be there assert 'kuma' and 'pipeline' in text + def test_file_search_filter_by_project(self, client): + """Test that search result are filtered according to project""" + + # `Github` word is present both in `kuma` and `pipeline` files + # so search with this phrase but filter through `kuma` project + search_params = {'q': 'GitHub', 'type': 'file', 'project': 'kuma'} + result, page = self._get_search_result(url=self.url, client=client, + search_params=search_params) + + # There should be 1 search result as we have filtered + assert len(result) == 1 + content = page.find('.navigable .project-list') + + # kuma should should be there only + assert 'kuma' in result.text() + assert 'pipeline' not in result.text() + + # But there should be 2 projects in the left side column + # as the query is preset in both projects + content = page.find('.navigable .project-list') + if len(content) != 2: + pytest.xfail("failing because currently project list not show") + else: + assert 'kuma' and 'pipeline' in content.text() + + @pytest.mark.xfail(reason="Versions are not showing correctly! Fixme while rewrite!") - def test_file_search_show_versions(self, client, all_projects, search, settings): + def test_file_search_show_versions(self, client, all_projects, es_index, settings): # override the settings to index all versions settings.INDEX_ONLY_LATEST = False project = all_projects[0] # Create some versions of the project versions = [G(Version, project=project) for _ in range(3)] - call_command('reindex_elasticsearch') - search.refresh_index() + self._reindex_elasticsearch(es_index=es_index) - data = DUMMY_PAGE_JSON[project.slug][0] - title = data['title'] - query = title.split()[0] + query = get_search_query(project_slug=project.slug) - resp = client.get(self.url, {'q': query, 'type': 'file'}) - assert resp.status_code == 200 + result, page = self._get_search_result(url=self.url, client=client, + search_params={'q': query, 'type': 'file'}) + + # There should be only one result because by default + # only latest version result should be there + assert len(result) == 1 - page = pq(resp.content) content = page.find('.navigable .version-list') # There should be total 4 versions # one is latest, and other 3 that we created above @@ -104,21 +131,18 @@ def test_file_search_show_versions(self, client, all_projects, search, settings) assert sorted(project_versions) == sorted(content_versions) - def test_file_search_subprojects(self, client, all_projects, search): + def test_file_search_subprojects(self, client, all_projects, es_index): """File search should return results from subprojects also""" project = all_projects[0] subproject = all_projects[1] project.add_subproject(subproject) - self._reindex_elasticsearch(es_index=search) + self._reindex_elasticsearch(es_index=es_index) # Add another project as subproject of the project # Now search with subproject content but explicitly filter by the parent project - data = DUMMY_PAGE_JSON[subproject.slug][1] - title = data['title'] - print(title) - resp = client.get(self.url, {'q': title.split()[0], 'type': 'file', 'project': project.slug}) - assert resp.status_code == 200 + query = get_search_query(project_slug=subproject.slug) + search_params = {'q': query, 'type': 'file', 'project': project.slug} + result, page = self._get_search_result(url=self.url, client=client, + search_params=search_params) - page = pq(resp.content) - content = page.find('.module-list-wrapper .module-item-title') - assert title in content.text() + assert len(result) == 1 diff --git a/readthedocs/search/tests/utils.py b/readthedocs/search/tests/utils.py new file mode 100644 index 00000000000..bd7a1ecd152 --- /dev/null +++ b/readthedocs/search/tests/utils.py @@ -0,0 +1,14 @@ +from readthedocs.search.tests.dummy_data import DUMMY_PAGE_JSON + + +def get_search_query(project_slug, page_num=0, data_type='title'): + all_pages = DUMMY_PAGE_JSON[project_slug] + file_data = all_pages[page_num] + query_data = file_data[data_type] + + if data_type in ['headers']: + # The data is in list. slice in order to get the text + query_data = query_data[0] + + query = query_data.split()[0] + return query diff --git a/requirements/testing.txt b/requirements/testing.txt index 46cb7f4c09a..8c871f90c8f 100644 --- a/requirements/testing.txt +++ b/requirements/testing.txt @@ -8,7 +8,6 @@ pytest-xdist==1.22.0 apipkg==1.4 execnet==1.5.0 Mercurial==4.4.2 -Faker==0.8.15 pytest-mock==1.10.0 # local debugging tools From 65d49e9cbe9c85a32e8c0d1ef60bb8909e2f4a0a Mon Sep 17 00:00:00 2001 From: Safwan Rahman Date: Mon, 4 Jun 2018 00:37:25 +0600 Subject: [PATCH 16/18] fixup --- readthedocs/search/tests/conftest.py | 6 +----- readthedocs/search/tests/test_views.py | 1 - 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/readthedocs/search/tests/conftest.py b/readthedocs/search/tests/conftest.py index c4f1431340f..59961f3a7e2 100644 --- a/readthedocs/search/tests/conftest.py +++ b/readthedocs/search/tests/conftest.py @@ -1,22 +1,18 @@ import random import string -from copy import deepcopy from random import shuffle import pytest from django_dynamic_fixture import G -from faker import Faker from readthedocs.projects.models import Project from readthedocs.search.indexes import Index, ProjectIndex, PageIndex, SectionIndex from .dummy_data import DUMMY_PAGE_JSON, ALL_PROJECTS -fake = Faker() - @pytest.fixture(autouse=True) def mock_elastic_index(mocker): - index_name = ''.join([random.choice(string.ascii_letters) for _ in xrange(5)]) + index_name = ''.join([random.choice(string.ascii_letters) for _ in range(5)]) mocker.patch.object(Index, '_index', index_name.lower()) diff --git a/readthedocs/search/tests/test_views.py b/readthedocs/search/tests/test_views.py index ea9a5931747..ec8af63d1c3 100644 --- a/readthedocs/search/tests/test_views.py +++ b/readthedocs/search/tests/test_views.py @@ -7,7 +7,6 @@ from readthedocs.builds.constants import LATEST from readthedocs.builds.models import Version from readthedocs.search.tests.utils import get_search_query -from .dummy_data import DUMMY_PAGE_JSON @pytest.mark.django_db From fcef7464b8b49d68e985c40a5848c056b2d9286a Mon Sep 17 00:00:00 2001 From: Safwan Rahman Date: Tue, 5 Jun 2018 01:48:52 +0600 Subject: [PATCH 17/18] Adding another project and test for languages --- conftest.py | 9 +++- .../commands/reindex_elasticsearch.py | 4 +- readthedocs/search/tests/data/docs/story.json | 32 ++++++++++++++ .../search/tests/data/docs/wiping.json | 15 +++++++ .../search/tests/data/kuma/docker.json | 4 +- readthedocs/search/tests/dummy_data.py | 1 + readthedocs/search/tests/test_views.py | 43 +++++++++++++++---- .../templates/search/elastic_search.html | 4 +- 8 files changed, 97 insertions(+), 15 deletions(-) create mode 100644 readthedocs/search/tests/data/docs/story.json create mode 100644 readthedocs/search/tests/data/docs/wiping.json diff --git a/conftest.py b/conftest.py index 319819bedc2..73474ea6726 100644 --- a/conftest.py +++ b/conftest.py @@ -1,5 +1,7 @@ import logging +import pytest + def pytest_addoption(parser): parser.addoption('--including-search', action='store_true', dest="searchtests", @@ -9,4 +11,9 @@ def pytest_addoption(parser): def pytest_configure(config): if not config.option.searchtests: # Include `not search` to parameters so that search test do not perform - setattr(config.option, 'markexpr', 'not search') \ No newline at end of file + setattr(config.option, 'markexpr', 'not search') + + +@pytest.fixture(autouse=True) +def settings_modification(settings): + settings.CELERY_ALWAYS_EAGER = True diff --git a/readthedocs/core/management/commands/reindex_elasticsearch.py b/readthedocs/core/management/commands/reindex_elasticsearch.py index d5b9e43ac6e..f5f8d35b8be 100644 --- a/readthedocs/core/management/commands/reindex_elasticsearch.py +++ b/readthedocs/core/management/commands/reindex_elasticsearch.py @@ -52,5 +52,5 @@ def handle(self, *args, **options): try: update_search(version.pk, commit, delete_non_commit_files=False) - except Exception: - log.debug('Reindex failed for %s', version) + except Exception as e: + log.exception('Reindex failed for %s, %s', version, e) diff --git a/readthedocs/search/tests/data/docs/story.json b/readthedocs/search/tests/data/docs/story.json new file mode 100644 index 00000000000..5b7c0ebda37 --- /dev/null +++ b/readthedocs/search/tests/data/docs/story.json @@ -0,0 +1,32 @@ +{ + "content": "Philosophy\nRead the Docs is Open Source software. We have licensed the code base as MIT, which provides almost no restrictions on the use of the code.\nHowever, as a project there are things that we care about more than others. We built Read the Docs to support documentation in the Open Source community. The code is open for people to contribute to, so that they may build features into https://readthedocs.org that they want. We also believe sharing the code openly is a valuable learning tool, especially for demonsrating how to collaborate and maintain an enormous website.\nOfficial Support\nThe time of the core developers of Read the Docs is limited. We provide official support for the following things:\nLocal development on the Python code base\nUsage of https://readthedocs.org for Open Source projects\nBug fixes in the code base, as it applies to running it on https://readthedocs.org\nUnsupported\nThere are use cases that we don\u2019t support, because it doesn\u2019t further our goal of promoting documentation in the Open Source Community.\nWe do not support:\nSpecific usage of Sphinx and Mkdocs, that don\u2019t affect our hosting\nCustom s of Read the Docs at your company\n of Read the Docs on other platforms\nAny issues outside of the Read the Docs Python Code\nRationale\nRead the Docs was founded to improve documentation in the Open Source Community. We fully recognize and allow the code to be used for internal installs at companies, but we will not spend our time supporting it. Our time is limited, and we want to spend it on the mission that we set out to originally support.\nIf you feel strongly about installing Read the Docs internal to a company, we will happily link to third party resources on this topic. Please open an issue with a proposal if you want to take on this task.", + "headers": [ + "Official Support", + "Unsupported", + "Rationale" + ], + "title": "Philosophy", + "sections": [ + { + "content": "\nRead the Docs is Open Source software.\nWe have licensed the code base as MIT,\nwhich provides almost no restrictions on the use of the code.\n\nHowever,\nas a project there are things that we care about more than others.\nWe built Read the Docs to support documentation in the Open Source community.\nThe code is open for people to contribute to,\nso that they may build features into https://readthedocs.org that they want.\nWe also believe sharing the code openly is a valuable learning tool,\nespecially for demonsrating how to collaborate and maintain an enormous website.\n", + "id": "read-the-docs-open-source-philosophy", + "title": "Read the Docs Open Source Philosophy" + }, + { + "content": "\n

    Official Support\u00b6

    \n

    The time of the core developers of Read the Docs is limited.\nWe provide official support for the following things:

    \n\n", + "id": "official-support", + "title": "Official Support" + }, + { + "content": "\n

    Unsupported\u00b6

    \n

    There are use cases that we don\u2019t support,\nbecause it doesn\u2019t further our goal of promoting documentation in the Open Source Community.

    \n

    We do not support:

    \n
      \n
    • Specific usage of Sphinx and Mkdocs, that don\u2019t affect our hosting
    • \n
    • Custom of Read the Docs at your company
    • \n
    • of Read the Docs on other platforms
    • \n
    • Any issues outside of the Read the Docs Python Code
    • \n
    \n", + "id": "unsupported", + "title": "Unsupported" + }, + { + "content": "\n

    Rationale\u00b6

    \n

    Read the Docs was founded to improve documentation in the Open Source Community.\nWe fully recognize and allow the code to be used for internal installs at companies,\nbut we will not spend our time supporting it.\nOur time is limited,\nand we want to spend it on the mission that we set out to originally support.

    \n

    If you feel strongly about installing Read the Docs internal to a company,\nwe will happily link to third party resources on this topic.\nPlease open an issue with a proposal if you want to take on this task.

    \n", + "id": "rationale", + "title": "Rationale" + } + ], + "path": "open-source-philosophy" +} \ No newline at end of file diff --git a/readthedocs/search/tests/data/docs/wiping.json b/readthedocs/search/tests/data/docs/wiping.json new file mode 100644 index 00000000000..76d22318189 --- /dev/null +++ b/readthedocs/search/tests/data/docs/wiping.json @@ -0,0 +1,15 @@ +{ + "content": "Wiping a Build Environment\nSometimes it happen that your Builds start failing because the build environment where the documentation is created is stale or broken. This could happen for a couple of different reasons like pip not upgrading a package properly or a corrupted cached Python package.\nIn any of these cases (and many others), the solution could be just wiping out the existing build environment files and allow Read the Docs to create a new fresh one.\nFollow these steps to wipe the build environment:\nGo to Versions\nClick on the Edit button of the version you want to wipe on the right side of the page\nGo to the bottom of the page and click the wipe link, next to the \u201cSave\u201d button\nNote\nBy wiping the documentation build environment, all the rst, md, and code files associated with it will be removed but not the documentation already built (HTML and PDF files). Your documentation will still online after wiping the build environment.\nNow you can re-build the version with a fresh build environment!", + "headers": [ + "Wiping a Build Environment" + ], + "title": "Wiping a Build Environment", + "sections": [ + { + "content": "\nSometimes it happen that your Builds start failing because the build\nenvironment where the documentation is created is stale or\nbroken. This could happen for a couple of different reasons like pip\nnot upgrading a package properly or a corrupted cached Python package.\n\nIn any of these cases (and many others), the solution could be just\nwiping out the existing build environment files and allow Read the\nDocs to create a new fresh one.\n\nFollow these steps to wipe the build environment:\n\n\n
  • Go to Versions
  • \n
  • Click on the Edit button of the version you want to wipe on the\nright side of the page
  • \n
  • Go to the bottom of the page and click the wipe link, next to\nthe \u201cSave\u201d button
  • \n\n\n\n

    Note

    \n

    By wiping the documentation build environment, all the rst, md,\nand code files associated with it will be removed but not the\ndocumentation already built (HTML and PDF files). Your\ndocumentation will still online after wiping the build environment.

    \n\n\nNow you can re-build the version with a fresh build environment!\n", + "id": "wiping-a-build-environment", + "title": "Wiping a Build Environment" + } + ], + "path": "guides/wipe-environment" +} \ No newline at end of file diff --git a/readthedocs/search/tests/data/kuma/docker.json b/readthedocs/search/tests/data/kuma/docker.json index cc2317c4e41..3f86764073a 100644 --- a/readthedocs/search/tests/data/kuma/docker.json +++ b/readthedocs/search/tests/data/kuma/docker.json @@ -1,5 +1,5 @@ { - "content": "kuma-Docker Docker is used for development and (soon) for deployment.\nDocker Images\nDocker images are used in development, usually with the local working files mounted in the images to set behaviour.\nImages are built by Jenkins, after tests pass, and are published to quay.io. We try to store the configuration in the environment, so that the published images can be used in deployments by setting environment variables to deployment-specific values.\nHere are some of the images used in the Kuma project:\nkuma\nThe kuma Docker image builds on the kuma_base image, installing a kuma branch and building the assets needed for running as a webservice. The environment can be customized for different deployments.\nThe image can be recreated locally with make build-kuma.\nThe image tagged latest is used by default for development. It can be created locally with make build-kuma VERSION=latest. The official latest image is created from the master branch in Jenkins and published to quay.io.\nkuma_base\nThe kuma_base Docker image contains the OS and libraries (C, Python, and Node.js) that support the kuma project. The kuma image extends this by installing the kuma source and building assets needed for production.\nThe image can be recreated locally with make build-base.\nThe image tagged latest is used by default for development. It can be created localled with make build-base VERSION=latest. The official latest image is created from the master branch in Jenkins and published to quay.io\nkumascript\nThe kumascript Docker image contains the kumascript rendering engine and support files. The environment can be customized for different deployments.\nThe image can be recreated locally with make build-kumascript.\nThe image tagged latest is used by default for development. It can be created locally with make build-kumascript KS_VERSION=latest. The official latest image is created from the master branch in Jenkins and published to quay.io.\nintegration-tests\nThe integration-tests Docker image contains browser-based integration tests that check the functionality of a running Kuma deployment.\nThe image can be recreated locally with docker build -f docker/images/integration-tests/ ., but this is only necessary for image development. Most developer will follow the Client-side testing to develop and run these integration tests.\nThe image is built and used in Jenkins in the stage-integration-tests and prod-integration-tests pipelines, configured by scripts in the Jenkinsfiles folder. It is not published to quay.io.", + "content": "kuma-Docker Docker is used for development and (soon) for deployment.\nDocker Images\nDocker images are used in development, usually with the local working files mounted in the images to set behaviour.\nImages are built by Jenkins, after tests pass, and are published to quay.io. We try to store the configuration in the environment, so that the published images can be used in deployments by setting environment variables to deployment-specific values.\nHere are some of the images used in the Kuma project:\nkuma\nThe kuma Docker image builds on the kuma_base image, installing a kuma branch and building the assets needed for running as a webservice. The environment can be customized for different deployments.\nThe image can be recreated locally with make build-kuma.\nThe image tagged latest is used by default for development. It can be created locally with make build-kuma VERSION=latest. The latest image is created from the master branch in Jenkins and published to quay.io.\nkuma_base\nThe kuma_base Docker image contains the OS and libraries (C, Python, and Node.js) that support the kuma project. The kuma image extends this by installing the kuma source and building assets needed for production.\nThe image can be recreated locally with make build-base.\nThe image tagged latest is used by default for development. It can be created localled with make build-base VERSION=latest. The latest image is created from the master branch in Jenkins and published to quay.io\nkumascript\nThe kumascript Docker image contains the kumascript rendering engine and support files. The environment can be customized for different deployments.\nThe image can be recreated locally with make build-kumascript.\nThe image tagged latest is used by default for development. It can be created locally with make build-kumascript KS_VERSION=latest. The latest image is created from the master branch in Jenkins and published to quay.io.\nintegration-tests\nThe integration-tests Docker image contains browser-based integration tests that check the functionality of a running Kuma deployment.\nThe image can be recreated locally with docker build -f docker/images/integration-tests/ ., but this is only necessary for image development. Most developer will follow the Client-side testing to develop and run these integration tests.\nThe image is built and used in Jenkins in the stage-integration-tests and prod-integration-tests pipelines, configured by scripts in the Jenkinsfiles folder. It is not published to quay.io.", "headers": [ "Docker", "Docker Images", @@ -16,7 +16,7 @@ "title": "Docker" }, { - "content": "\n

    Docker Images\u00b6

    \n

    Docker images are used in development, usually with the local\nworking files mounted in the images to set behaviour.

    \n

    Images are built by Jenkins, after tests pass, and are\npublished to quay.io. We try to\nstore the configuration in the environment, so that the\npublished images can be used in deployments by setting\nenvironment variables to deployment-specific values.

    \n

    Here are some of the images used in the Kuma project:

    \n
    \n

    kuma\u00b6

    \n

    The kuma Docker image builds on the kuma_base image, installing a kuma branch\nand building the assets needed for running as a webservice. The environment\ncan be customized for different deployments.

    \n

    The image can be recreated locally with make build-kuma.

    \n

    The image tagged latest is used by default for development. It can be\ncreated locally with make build-kuma VERSION=latest. The official latest\nimage is created from the master branch in Jenkins and published to\nquay.io.

    \n
    \n
    \n

    kuma_base\u00b6

    \n

    The kuma_base Docker image contains the OS and libraries (C, Python, and\nNode.js) that support the kuma project. The kuma image extends this by\ninstalling the kuma source and building assets needed for production.

    \n

    The image can be recreated locally with make build-base.

    \n

    The image tagged latest is used by default for development. It can be\ncreated localled with make build-base VERSION=latest. The official\nlatest image is created from the master branch in Jenkins and published to\nquay.io

    \n
    \n
    \n

    kumascript\u00b6

    \n

    The kumascript Docker image contains the kumascript rendering engine and\nsupport files. The environment can be customized for different deployments.

    \n

    The image can be recreated locally with make build-kumascript.

    \n

    The image tagged latest is used by default for development. It can be\ncreated locally with make build-kumascript KS_VERSION=latest. The official\nlatest image is created from the master branch in Jenkins and published to\nquay.io.

    \n
    \n
    \n

    integration-tests\u00b6

    \n

    The integration-tests Docker image contains browser-based integration tests\nthat check the functionality of a running Kuma deployment.

    \n

    The image can be recreated locally with\ndocker build -f docker/images/integration-tests/ ., but this is only\nnecessary for image development. Most developer will follow the\nClient-side testing to develop and run these integration tests.

    \n

    The image is built and used in Jenkins in the stage-integration-tests and\nprod-integration-tests pipelines, configured by scripts in the\nJenkinsfiles folder. It is not published to quay.io.

    \n
    \n", + "content": "\n

    Docker Images\u00b6

    \n

    Docker images are used in development, usually with the local\nworking files mounted in the images to set behaviour.

    \n

    Images are built by Jenkins, after tests pass, and are\npublished to quay.io. We try to\nstore the configuration in the environment, so that the\npublished images can be used in deployments by setting\nenvironment variables to deployment-specific values.

    \n

    Here are some of the images used in the Kuma project:

    \n
    \n

    kuma\u00b6

    \n

    The kuma Docker image builds on the kuma_base image, installing a kuma branch\nand building the assets needed for running as a webservice. The environment\ncan be customized for different deployments.

    \n

    The image can be recreated locally with make build-kuma.

    \n

    The image tagged latest is used by default for development. It can be\ncreated locally with make build-kuma VERSION=latest. The latest\nimage is created from the master branch in Jenkins and published to\nquay.io.

    \n
    \n
    \n

    kuma_base\u00b6

    \n

    The kuma_base Docker image contains the OS and libraries (C, Python, and\nNode.js) that support the kuma project. The kuma image extends this by\ninstalling the kuma source and building assets needed for production.

    \n

    The image can be recreated locally with make build-base.

    \n

    The image tagged latest is used by default for development. It can be\ncreated localled with make build-base VERSION=latest. The \nlatest image is created from the master branch in Jenkins and published to\nquay.io

    \n
    \n
    \n

    kumascript\u00b6

    \n

    The kumascript Docker image contains the kumascript rendering engine and\nsupport files. The environment can be customized for different deployments.

    \n

    The image can be recreated locally with make build-kumascript.

    \n

    The image tagged latest is used by default for development. It can be\ncreated locally with make build-kumascript KS_VERSION=latest. The \nlatest image is created from the master branch in Jenkins and published to\nquay.io.

    \n
    \n
    \n

    integration-tests\u00b6

    \n

    The integration-tests Docker image contains browser-based integration tests\nthat check the functionality of a running Kuma deployment.

    \n

    The image can be recreated locally with\ndocker build -f docker/images/integration-tests/ ., but this is only\nnecessary for image development. Most developer will follow the\nClient-side testing to develop and run these integration tests.

    \n

    The image is built and used in Jenkins in the stage-integration-tests and\nprod-integration-tests pipelines, configured by scripts in the\nJenkinsfiles folder. It is not published to quay.io.

    \n
    \n", "id": "docker-images", "title": "Docker Images" } diff --git a/readthedocs/search/tests/dummy_data.py b/readthedocs/search/tests/dummy_data.py index 884ed4c821d..fbd4eed1f11 100644 --- a/readthedocs/search/tests/dummy_data.py +++ b/readthedocs/search/tests/dummy_data.py @@ -4,6 +4,7 @@ _DATA_FILES = { 'pipeline': ['installation.json', 'signals.json'], 'kuma': ['documentation.json', 'docker.json'], + 'docs': ['story.json', 'wiping.json'], } diff --git a/readthedocs/search/tests/test_views.py b/readthedocs/search/tests/test_views.py index ec8af63d1c3..354e40e9939 100644 --- a/readthedocs/search/tests/test_views.py +++ b/readthedocs/search/tests/test_views.py @@ -6,6 +6,7 @@ from readthedocs.builds.constants import LATEST from readthedocs.builds.models import Version +from readthedocs.projects.models import Project from readthedocs.search.tests.utils import get_search_query @@ -36,17 +37,44 @@ def test_search_by_project_name(self, client, project): assert project.name.encode('utf-8') in result.text().encode('utf-8') + def test_search_project_show_languages(self, client, project, es_index): + """Test that searching project should show all available languages""" + # Create a project in bn and add it as a translation + G(Project, language='bn', name=project.name) + self._reindex_elasticsearch(es_index=es_index) + + result, page = self._get_search_result(url=self.url, client=client, + search_params={'q': project.name}) + + content = page.find('.navigable .language-list') + # There should be 2 languages + assert len(content) == 2 + assert 'bn' in content.text() + + def test_search_project_filter_language(self, client, project, es_index): + """Test that searching project filtered according to language""" + # Create a project in bn and add it as a translation + translate = G(Project, language='bn', name=project.name) + self._reindex_elasticsearch(es_index=es_index) + search_params = {'q': project.name, 'language': 'bn'} + + result, page = self._get_search_result(url=self.url, client=client, + search_params=search_params) + + # There should be only 1 result + assert len(result) == 1 + + content = page.find('.navigable .language-list') + # There should be 1 languages + assert len(content) == 1 + assert 'bn' in content.text() + @pytest.mark.parametrize('data_type', ['content', 'headers', 'title']) @pytest.mark.parametrize('page_num', [0, 1]) def test_search_by_file_content(self, client, project, data_type, page_num): query = get_search_query(project_slug=project.slug, page_num=page_num, data_type=data_type) - versions = project.versions.all() - # There should be only one version of the project - assert len(versions) == 1 - print(query, page_num, data_type) - result, _ = self._get_search_result(url=self.url, client=client, search_params={'q': query, 'type': 'file'}) assert len(result) == 1 @@ -88,14 +116,13 @@ def test_file_search_filter_by_project(self, client): assert 'pipeline' not in result.text() # But there should be 2 projects in the left side column - # as the query is preset in both projects + # as the query is present in both projects content = page.find('.navigable .project-list') if len(content) != 2: pytest.xfail("failing because currently project list not show") else: assert 'kuma' and 'pipeline' in content.text() - @pytest.mark.xfail(reason="Versions are not showing correctly! Fixme while rewrite!") def test_file_search_show_versions(self, client, all_projects, es_index, settings): # override the settings to index all versions @@ -134,9 +161,9 @@ def test_file_search_subprojects(self, client, all_projects, es_index): """File search should return results from subprojects also""" project = all_projects[0] subproject = all_projects[1] + # Add another project as subproject of the project project.add_subproject(subproject) self._reindex_elasticsearch(es_index=es_index) - # Add another project as subproject of the project # Now search with subproject content but explicitly filter by the parent project query = get_search_query(project_slug=subproject.slug) diff --git a/readthedocs/templates/search/elastic_search.html b/readthedocs/templates/search/elastic_search.html index 8e07fc37553..1a3dd0211e8 100644 --- a/readthedocs/templates/search/elastic_search.html +++ b/readthedocs/templates/search/elastic_search.html @@ -27,7 +27,7 @@ {% if facets.language %}
    Language
    {% for name, count in facets.language.items %} -
  • +
  • {% if language == name %} {{ name }} {% else %} @@ -61,7 +61,7 @@
    Projects
    {% if facets.version %} -
    Version
    +
    Version
    {% for name, count in facets.version.items %}
  • {% if version == name %} From e443a5b68c9f6f1f81ea37ccda1af3ed01f0f246 Mon Sep 17 00:00:00 2001 From: Safwan Rahman Date: Tue, 5 Jun 2018 07:25:12 +0600 Subject: [PATCH 18/18] fixing up according to review --- readthedocs/search/tests/data/docs/story.json | 8 ++++---- readthedocs/search/tests/data/docs/wiping.json | 4 ++-- readthedocs/search/tests/test_views.py | 12 ++++++------ readthedocs/search/tests/utils.py | 6 +++++- 4 files changed, 17 insertions(+), 13 deletions(-) diff --git a/readthedocs/search/tests/data/docs/story.json b/readthedocs/search/tests/data/docs/story.json index 5b7c0ebda37..69226b65209 100644 --- a/readthedocs/search/tests/data/docs/story.json +++ b/readthedocs/search/tests/data/docs/story.json @@ -1,5 +1,5 @@ { - "content": "Philosophy\nRead the Docs is Open Source software. We have licensed the code base as MIT, which provides almost no restrictions on the use of the code.\nHowever, as a project there are things that we care about more than others. We built Read the Docs to support documentation in the Open Source community. The code is open for people to contribute to, so that they may build features into https://readthedocs.org that they want. We also believe sharing the code openly is a valuable learning tool, especially for demonsrating how to collaborate and maintain an enormous website.\nOfficial Support\nThe time of the core developers of Read the Docs is limited. We provide official support for the following things:\nLocal development on the Python code base\nUsage of https://readthedocs.org for Open Source projects\nBug fixes in the code base, as it applies to running it on https://readthedocs.org\nUnsupported\nThere are use cases that we don\u2019t support, because it doesn\u2019t further our goal of promoting documentation in the Open Source Community.\nWe do not support:\nSpecific usage of Sphinx and Mkdocs, that don\u2019t affect our hosting\nCustom s of Read the Docs at your company\n of Read the Docs on other platforms\nAny issues outside of the Read the Docs Python Code\nRationale\nRead the Docs was founded to improve documentation in the Open Source Community. We fully recognize and allow the code to be used for internal installs at companies, but we will not spend our time supporting it. Our time is limited, and we want to spend it on the mission that we set out to originally support.\nIf you feel strongly about installing Read the Docs internal to a company, we will happily link to third party resources on this topic. Please open an issue with a proposal if you want to take on this task.", + "content": "Philosophy\nRead the Docs is Open Source software. We have licensed the code base as MIT, which provides almost no restrictions on the use of the code.\nHowever, as a project there are things that we care about more than others. We built Read the Docs to support in the Open Source community. The code is open for people to contribute to, so that they may build features into https://readthedocs.org that they want. We also believe sharing the code openly is a valuable learning tool, especially for demonsrating how to collaborate and maintain an enormous website.\nOfficial Support\nThe time of the core developers of Read the Docs is limited. We provide official support for the following things:\nLocal development on the Python code base\nUsage of https://readthedocs.org for Open Source projects\nBug fixes in the code base, as it applies to running it on https://readthedocs.org\nUnsupported\nThere are use cases that we don\u2019t support, because it doesn\u2019t further our goal of promoting in the Open Source Community.\nWe do not support:\nSpecific usage of Sphinx and Mkdocs, that don\u2019t affect our hosting\nCustom s of Read the Docs at your company\n of Read the Docs on other platforms\nAny issues outside of the Read the Docs Python Code\nRationale\nRead the Docs was founded to improve in the Open Source Community. We fully recognize and allow the code to be used for internal installs at companies, but we will not spend our time supporting it. Our time is limited, and we want to spend it on the mission that we set out to originally support.\nIf you feel strongly about installing Read the Docs internal to a company, we will happily link to third party resources on this topic. Please open an issue with a proposal if you want to take on this task.", "headers": [ "Official Support", "Unsupported", @@ -8,7 +8,7 @@ "title": "Philosophy", "sections": [ { - "content": "\nRead the Docs is Open Source software.\nWe have licensed the code base as MIT,\nwhich provides almost no restrictions on the use of the code.\n\nHowever,\nas a project there are things that we care about more than others.\nWe built Read the Docs to support documentation in the Open Source community.\nThe code is open for people to contribute to,\nso that they may build features into https://readthedocs.org that they want.\nWe also believe sharing the code openly is a valuable learning tool,\nespecially for demonsrating how to collaborate and maintain an enormous website.\n", + "content": "\nRead the Docs is Open Source software.\nWe have licensed the code base as MIT,\nwhich provides almost no restrictions on the use of the code.\n\nHowever,\nas a project there are things that we care about more than others.\nWe built Read the Docs to support in the Open Source community.\nThe code is open for people to contribute to,\nso that they may build features into https://readthedocs.org that they want.\nWe also believe sharing the code openly is a valuable learning tool,\nespecially for demonsrating how to collaborate and maintain an enormous website.\n", "id": "read-the-docs-open-source-philosophy", "title": "Read the Docs Open Source Philosophy" }, @@ -18,12 +18,12 @@ "title": "Official Support" }, { - "content": "\n

    Unsupported\u00b6

    \n

    There are use cases that we don\u2019t support,\nbecause it doesn\u2019t further our goal of promoting documentation in the Open Source Community.

    \n

    We do not support:

    \n
      \n
    • Specific usage of Sphinx and Mkdocs, that don\u2019t affect our hosting
    • \n
    • Custom of Read the Docs at your company
    • \n
    • of Read the Docs on other platforms
    • \n
    • Any issues outside of the Read the Docs Python Code
    • \n
    \n", + "content": "\n

    Unsupported\u00b6

    \n

    There are use cases that we don\u2019t support,\nbecause it doesn\u2019t further our goal of promoting in the Open Source Community.

    \n

    We do not support:

    \n
      \n
    • Specific usage of Sphinx and Mkdocs, that don\u2019t affect our hosting
    • \n
    • Custom of Read the Docs at your company
    • \n
    • of Read the Docs on other platforms
    • \n
    • Any issues outside of the Read the Docs Python Code
    • \n
    \n", "id": "unsupported", "title": "Unsupported" }, { - "content": "\n

    Rationale\u00b6

    \n

    Read the Docs was founded to improve documentation in the Open Source Community.\nWe fully recognize and allow the code to be used for internal installs at companies,\nbut we will not spend our time supporting it.\nOur time is limited,\nand we want to spend it on the mission that we set out to originally support.

    \n

    If you feel strongly about installing Read the Docs internal to a company,\nwe will happily link to third party resources on this topic.\nPlease open an issue with a proposal if you want to take on this task.

    \n", + "content": "\n

    Rationale\u00b6

    \n

    Read the Docs was founded to improve in the Open Source Community.\nWe fully recognize and allow the code to be used for internal installs at companies,\nbut we will not spend our time supporting it.\nOur time is limited,\nand we want to spend it on the mission that we set out to originally support.

    \n

    If you feel strongly about installing Read the Docs internal to a company,\nwe will happily link to third party resources on this topic.\nPlease open an issue with a proposal if you want to take on this task.

    \n", "id": "rationale", "title": "Rationale" } diff --git a/readthedocs/search/tests/data/docs/wiping.json b/readthedocs/search/tests/data/docs/wiping.json index 76d22318189..a54889e05fa 100644 --- a/readthedocs/search/tests/data/docs/wiping.json +++ b/readthedocs/search/tests/data/docs/wiping.json @@ -1,12 +1,12 @@ { - "content": "Wiping a Build Environment\nSometimes it happen that your Builds start failing because the build environment where the documentation is created is stale or broken. This could happen for a couple of different reasons like pip not upgrading a package properly or a corrupted cached Python package.\nIn any of these cases (and many others), the solution could be just wiping out the existing build environment files and allow Read the Docs to create a new fresh one.\nFollow these steps to wipe the build environment:\nGo to Versions\nClick on the Edit button of the version you want to wipe on the right side of the page\nGo to the bottom of the page and click the wipe link, next to the \u201cSave\u201d button\nNote\nBy wiping the documentation build environment, all the rst, md, and code files associated with it will be removed but not the documentation already built (HTML and PDF files). Your documentation will still online after wiping the build environment.\nNow you can re-build the version with a fresh build environment!", + "content": "Wiping a Build Environment\nSometimes it happen that your Builds start failing because the build environment where the is created is stale or broken. This could happen for a couple of different reasons like pip not upgrading a package properly or a corrupted cached Python package.\nIn any of these cases (and many others), the solution could be just wiping out the existing build environment files and allow Read the Docs to create a new fresh one.\nFollow these steps to wipe the build environment:\nGo to Versions\nClick on the Edit button of the version you want to wipe on the right side of the page\nGo to the bottom of the page and click the wipe link, next to the \u201cSave\u201d button\nNote\nBy wiping the build environment, all the rst, md, and code files associated with it will be removed but not the already built (HTML and PDF files). Your will still online after wiping the build environment.\nNow you can re-build the version with a fresh build environment!", "headers": [ "Wiping a Build Environment" ], "title": "Wiping a Build Environment", "sections": [ { - "content": "\nSometimes it happen that your Builds start failing because the build\nenvironment where the documentation is created is stale or\nbroken. This could happen for a couple of different reasons like pip\nnot upgrading a package properly or a corrupted cached Python package.\n\nIn any of these cases (and many others), the solution could be just\nwiping out the existing build environment files and allow Read the\nDocs to create a new fresh one.\n\nFollow these steps to wipe the build environment:\n\n\n
  • Go to Versions
  • \n
  • Click on the Edit button of the version you want to wipe on the\nright side of the page
  • \n
  • Go to the bottom of the page and click the wipe link, next to\nthe \u201cSave\u201d button
  • \n\n\n\n

    Note

    \n

    By wiping the documentation build environment, all the rst, md,\nand code files associated with it will be removed but not the\ndocumentation already built (HTML and PDF files). Your\ndocumentation will still online after wiping the build environment.

    \n\n\nNow you can re-build the version with a fresh build environment!\n", + "content": "\nSometimes it happen that your Builds start failing because the build\nenvironment where the is created is stale or\nbroken. This could happen for a couple of different reasons like pip\nnot upgrading a package properly or a corrupted cached Python package.\n\nIn any of these cases (and many others), the solution could be just\nwiping out the existing build environment files and allow Read the\nDocs to create a new fresh one.\n\nFollow these steps to wipe the build environment:\n\n\n
  • Go to Versions
  • \n
  • Click on the Edit button of the version you want to wipe on the\nright side of the page
  • \n
  • Go to the bottom of the page and click the wipe link, next to\nthe \u201cSave\u201d button
  • \n\n\n\n

    Note

    \n

    By wiping the build environment, all the rst, md,\nand code files associated with it will be removed but not the\n already built (HTML and PDF files). Your\n will still online after wiping the build environment.

    \n\n\nNow you can re-build the version with a fresh build environment!\n", "id": "wiping-a-build-environment", "title": "Wiping a Build Environment" } diff --git a/readthedocs/search/tests/test_views.py b/readthedocs/search/tests/test_views.py index 354e40e9939..096e67adb03 100644 --- a/readthedocs/search/tests/test_views.py +++ b/readthedocs/search/tests/test_views.py @@ -7,7 +7,7 @@ from readthedocs.builds.constants import LATEST from readthedocs.builds.models import Version from readthedocs.projects.models import Project -from readthedocs.search.tests.utils import get_search_query +from readthedocs.search.tests.utils import get_search_query_from_project_file @pytest.mark.django_db @@ -72,8 +72,8 @@ def test_search_project_filter_language(self, client, project, es_index): @pytest.mark.parametrize('data_type', ['content', 'headers', 'title']) @pytest.mark.parametrize('page_num', [0, 1]) def test_search_by_file_content(self, client, project, data_type, page_num): - query = get_search_query(project_slug=project.slug, page_num=page_num, - data_type=data_type) + query = get_search_query_from_project_file(project_slug=project.slug, page_num=page_num, + data_type=data_type) result, _ = self._get_search_result(url=self.url, client=client, search_params={'q': query, 'type': 'file'}) @@ -119,7 +119,7 @@ def test_file_search_filter_by_project(self, client): # as the query is present in both projects content = page.find('.navigable .project-list') if len(content) != 2: - pytest.xfail("failing because currently project list not show") + pytest.xfail("failing because currently all projects are not showing in project list") else: assert 'kuma' and 'pipeline' in content.text() @@ -133,7 +133,7 @@ def test_file_search_show_versions(self, client, all_projects, es_index, setting versions = [G(Version, project=project) for _ in range(3)] self._reindex_elasticsearch(es_index=es_index) - query = get_search_query(project_slug=project.slug) + query = get_search_query_from_project_file(project_slug=project.slug) result, page = self._get_search_result(url=self.url, client=client, search_params={'q': query, 'type': 'file'}) @@ -166,7 +166,7 @@ def test_file_search_subprojects(self, client, all_projects, es_index): self._reindex_elasticsearch(es_index=es_index) # Now search with subproject content but explicitly filter by the parent project - query = get_search_query(project_slug=subproject.slug) + query = get_search_query_from_project_file(project_slug=subproject.slug) search_params = {'q': query, 'type': 'file', 'project': project.slug} result, page = self._get_search_result(url=self.url, client=client, search_params=search_params) diff --git a/readthedocs/search/tests/utils.py b/readthedocs/search/tests/utils.py index bd7a1ecd152..a48ea83dd74 100644 --- a/readthedocs/search/tests/utils.py +++ b/readthedocs/search/tests/utils.py @@ -1,7 +1,11 @@ from readthedocs.search.tests.dummy_data import DUMMY_PAGE_JSON -def get_search_query(project_slug, page_num=0, data_type='title'): +def get_search_query_from_project_file(project_slug, page_num=0, data_type='title'): + """Return search query from the project's page file. + Query is generated from the value of `data_type` + """ + all_pages = DUMMY_PAGE_JSON[project_slug] file_data = all_pages[page_num] query_data = file_data[data_type]