From ebb8043a8d2e91f0a4775dc6f15ebeccb00694f0 Mon Sep 17 00:00:00 2001 From: Gang Li Date: Mon, 29 Nov 2021 22:36:56 +0800 Subject: [PATCH] Refactor: change indexing generation function The old way of indexing generation function is using a .index file as cache for each changed directory to avoid full scan of the whole s3 bucket, this brings bunch of complicated steps, like recursive folder scan and reading, and local file reading of .index to do merging. The new way is using a new function to scan and retrieving the contents in a single folder, which is cheap way to get the contents to avoid full scan. --- charon/pkgs/indexing.py | 323 +++++++++++++------------------------- charon/pkgs/maven.py | 84 +++++----- charon/pkgs/npm.py | 70 ++++++--- tests/test_maven_index.py | 28 ++-- tests/test_npm_index.py | 13 +- tests/test_pkgs_dryrun.py | 4 +- 6 files changed, 221 insertions(+), 301 deletions(-) diff --git a/charon/pkgs/indexing.py b/charon/pkgs/indexing.py index e44ac055..447904f8 100644 --- a/charon/pkgs/indexing.py +++ b/charon/pkgs/indexing.py @@ -13,7 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. """ -from botocore.exceptions import ClientError from charon.config import get_template from charon.storage import S3Client from charon.constants import INDEX_HTML_TEMPLATE @@ -50,225 +49,92 @@ def generate_index_file_content(self) -> str: return template.render(index=self) -def handle_create_index( - top_level: str, uploaded_files: List[str], s3_client: S3Client, bucket: str -): +def generate_indexes( + top_level: str, changed_dirs: List[str], s3_client: S3Client, bucket: str +) -> List[str]: if top_level[-1] != '/': top_level += '/' - repos, updated_indexes, temp_dirs = set(), set(), set() + s3_folders = set() # chopp down every lines, left s3 client key format - for path in uploaded_files: + for path in changed_dirs: path = path.replace(top_level, '') - repos.add(path) - for repo in repos: - repo_index = os.path.join(top_level, os.path.dirname(repo), '.index') - os.makedirs(os.path.dirname(repo_index), exist_ok=True) - with open(repo_index, 'a+', encoding='utf-8') as f: - f.write(os.path.basename(repo) + '\n') - updated_indexes.add(os.path.join(os.path.dirname(repo), '.index')) - - # updated_indexes containes only objects not in s3, record them on disk - for index in updated_indexes: - items = load_s3_index(s3_client, bucket, index) - if items != set(): - with open(os.path.join(top_level, index), 'a+', encoding='utf-8') as f: - _items = set(_.replace('\n', '') for _ in f.readlines()) - for item in items.difference(_items): - f.write(item + '\n') - elif index != '.index': - temp_dirs.add(os.path.dirname(index)) - - # the function will also merge indexes on disk - for temp_dir in temp_dirs: - virtual_folder_create(temp_dir, top_level, s3_client, bucket, updated_indexes) - - updated_indexes = {os.path.join(top_level, _) for _ in updated_indexes} - html_files = index_to_html(updated_indexes, top_level) - return updated_indexes.union(html_files) - - -def handle_delete_index( - top_level: str, deleted_files: List[str], s3_client: S3Client, bucket: str -): - if top_level[-1] != '/': - top_level += '/' - - repos, delete_indexes, updated_indexes, temp_dirs = set(), set(), set(), set() - - for path in deleted_files: - path = path.replace(top_level, '') - repos.add(path) - for repo in repos: - repo_index = os.path.join(top_level, os.path.dirname(repo), '.index') - os.makedirs(os.path.dirname(repo_index), exist_ok=True) - with open(repo_index, 'a+', encoding='utf-8') as f: - f.write(os.path.basename(repo) + '\n') - updated_indexes.add(os.path.join(os.path.dirname(repo), '.index')) - - # It's certain the index is not placed locally, load them from s3 - for index in set(updated_indexes): - items = load_s3_index(s3_client, bucket, index) - with open(os.path.join(top_level, index), 'r+', encoding='utf-8') as f: - _items = set(_.replace('\n', '') for _ in f.readlines()) - left_items = items.difference(_items) - if left_items != set(): - # cleans everthing locally - f.seek(0) - f.truncate() - for item in left_items: - f.write(item + '\n') - else: - temp_dirs.add(os.path.dirname(index)) - updated_indexes.remove(index) - delete_indexes.add(index) - - for temp_dir in temp_dirs: - virtual_folder_delete(temp_dir, top_level, s3_client, bucket, - updated_indexes, delete_indexes) - - html_files = set() - if updated_indexes != set(): - updated_indexes = {os.path.join(top_level, _) for _ in updated_indexes} - html_files = index_to_html(updated_indexes, top_level) - if delete_indexes != set(): - for index in set(delete_indexes): - delete_indexes.add(os.path.join(os.path.dirname(index), 'index.html')) - return delete_indexes, updated_indexes.union(html_files) - - -# e.g path: org/apache/httpcomponents/httpclient/4.5.6, updated_indexes contains every local index -def virtual_folder_create( - path: str, base_dir: str, s3_client: S3Client, bucket: str, updated_indexes: Set[str] -): - item = os.path.basename(path) + '/' - dir_index = os.path.join(os.path.dirname(path), '.index') - local_index_file = os.path.join(base_dir, dir_index) - updated_indexes.add(dir_index) - rec_flag = False - - # first load from disk to see if .index file exists that should contain current path - if os.path.exists(local_index_file): - items = load_local_index(local_index_file) - if item in items: - return - else: - # only appends line, no truncate and no overwrite - with open(local_index_file, 'a', encoding='utf-8') as f: - f.write(item + '\n') - else: - # if the .index file does not exist on local, try load it from s3 - items = load_s3_index(s3_client, bucket, dir_index) - # items will be empty if the s3 does not contain this .index file - if items == set(): - with open(local_index_file, 'a+', encoding='utf-8') as f: - f.write(item + '\n') - rec_flag = True - # if we load something from s3, that means the upper folder is present on their .index file - # then write everthing to local disk and our path as well + if path.startswith("/"): + path = path[1:] + if not path.endswith("/"): + path = path + "/" + s3_folders.add(path) + + generated_htmls = [] + s3_folders = sorted(s3_folders, key=FolderLenCompareKey) + for folder_ in s3_folders: + index_html = __generate_index_html( + s3_client, bucket, folder_, top_level + ) + if index_html: + generated_htmls.append(index_html) + + root_index = __generate_index_html( + s3_client, bucket, "/", top_level + ) + if root_index: + generated_htmls.append(root_index) + + return generated_htmls + + +def __generate_index_html( + s3_client: S3Client, + bucket: str, + folder_: str, + top_level: str +) -> str: + contents = s3_client.list_folder_content( + bucket_name=bucket, + folder=folder_ + ) + index = None + if len(contents) == 1 and contents[0].endswith("index.html"): + logger.info("The folder %s only contains index.html, " + "will remove it.", folder_) + if folder_ == "/": + removed_index = os.path.join(top_level, "index.html") else: - with open(local_index_file, 'a+', encoding='utf-8') as f: - for _ in items: - f.write(_ + '\n') - if item not in items: - f.write(item + '\n') - # when this is not root '.index' file, pass the upper folder to recursive - # this creates it's upper folder - if rec_flag and dir_index != '.index': - virtual_folder_create(os.path.dirname(path), base_dir, s3_client, bucket, - updated_indexes) - - return - - -def virtual_folder_delete( - path: str, base_dir: str, s3_client: S3Client, bucket: str, - updated_indexes: Set[str], delete_indexes: Set[str] -): - item = os.path.basename(path) + '/' - dir_index = os.path.join(os.path.dirname(path), '.index') - local_index_file = os.path.join(base_dir, dir_index) - updated_indexes.add(dir_index) - rec_flag = False - - if os.path.exists(local_index_file): - with open(local_index_file, 'r+', encoding='utf-8') as f: - items = set(_.replace('\n', '') for _ in f.readlines()) - if items == set(): - return - letf_items = items.difference({item}) - if letf_items == set(): - updated_indexes.remove(dir_index) - delete_indexes.add(dir_index) - rec_flag = True - else: - f.seek(0) - f.truncate() - for i in letf_items: - f.write(i + '\n') - else: - items = load_s3_index(s3_client, bucket, dir_index) - with open(local_index_file, 'w+', encoding='utf-8') as f: - letf_items = items.difference({item}) - if letf_items == set(): - updated_indexes.remove(dir_index) - delete_indexes.add(dir_index) - rec_flag = True - else: - for i in letf_items: - f.write(i + '\n') - - if rec_flag and dir_index != '.index': - virtual_folder_delete(os.path.dirname(path), base_dir, s3_client, bucket, - updated_indexes, delete_indexes) - - return - - -def index_to_html(items_files: Set[str], base_dir: str): - html_files = [] - for file in items_files: - with open(file, 'r', encoding='utf-8') as f: - items = set(_.replace('\n', '') for _ in f.readlines()) - if file != os.path.join(base_dir, '.index'): - path = os.path.dirname(file).replace(base_dir, '') - html_location = os.path.join(os.path.dirname(file), 'index.html') - items.add('../') - else: - path = '/' - html_location = os.path.join(base_dir, 'index.html') - items = sort_index_items(items) - html_files.append(html_location) - index = IndexedHTML(title=path, header=path, items=items) - with open(os.path.join(base_dir, html_location), 'w', encoding='utf-8') as index_html_file: - index_html_file.write(index.generate_index_file_content()) - return html_files - - -def load_s3_index(s3_client: S3Client, bucket: str, path: str) -> Set[str]: - try: - content = s3_client.read_file_content(bucket_name=bucket, key=path) - except ClientError as ex: - if ex.response['Error']['Code'] == 'NoSuchKey': - return set() - else: - raise - - stored_items = set(content.split('\n')[:-1]) - return stored_items - - -def load_local_index(local_index_file: str) -> Set[str]: - if os.path.exists(local_index_file): - with open(local_index_file, 'r', encoding='utf-8') as f: - items = set(_.replace('\n', '') for _ in f.readlines()) - return items + removed_index = os.path.join(top_level, folder_, "index.html") + s3_client.delete_files( + file_paths=[removed_index], + bucket_name=bucket, + product=None, + root=top_level + ) + elif len(contents) >= 1: + index = __to_html(contents, folder_, top_level) + + return index + + +def __to_html(contents: List[str], folder: str, top_level: str) -> str: + items = [] + if folder != "/": + items.append("../") + for c in contents: + if not c.endswith("index.html"): + items.append(c[len(folder):]) else: - return set() - - -def sort_index_items(items): + items.extend(contents) + items = __sort_index_items(items) + index = IndexedHTML(title=folder, header=folder, items=items) + html_path = os.path.join(top_level, folder, "index.html") + if folder == "/": + html_path = os.path.join(top_level, "index.html") + os.makedirs(os.path.dirname(html_path), exist_ok=True) + with open(html_path, 'w', encoding='utf-8') as html: + html.write(index.generate_index_file_content()) + return html_path + + +def __sort_index_items(items): sorted_items = sorted(items) # make sure metadata is the last element if 'maven-metadata.xml' in sorted_items: @@ -279,3 +145,36 @@ def sort_index_items(items): sorted_items.append('package.json') return sorted_items + + +class FolderLenCompareKey: + """Used as key function for folder sorting, will give DESC order + based on the length of the parts splitted by slash of the folder + path + """ + + def __init__(self, obj): + self.obj = obj + + def __lt__(self, other): + return self.__compare(other) < 0 + + def __gt__(self, other): + return self.__compare(other) > 0 + + def __le__(self, other): + return self.__compare(other) <= 0 + + def __ge__(self, other): + return self.__compare(other) >= 0 + + def __eq__(self, other): + return self.__compare(other) == 0 + + def __hash__(self) -> int: + return self.obj.__hash__() + + def __compare(self, other) -> int: + xitems = self.obj.split("/") + yitems = other.obj.split("/") + return len(yitems) - len(xitems) diff --git a/charon/pkgs/maven.py b/charon/pkgs/maven.py index 3ca0a3bf..f1c04b6c 100644 --- a/charon/pkgs/maven.py +++ b/charon/pkgs/maven.py @@ -205,7 +205,8 @@ def handle_maven_uploading( # and also collect poms for later metadata generation (top_level, valid_mvn_paths, - valid_poms) = _scan_paths(tmp_root, ignore_patterns, root) + valid_poms, + valid_dirs) = _scan_paths(tmp_root, ignore_patterns, root) # This prefix is a subdir under top-level directory in tarball # or root before real GAV dir structure @@ -224,11 +225,9 @@ def handle_maven_uploading( logger.info("Start uploading files to s3") s3_client = S3Client(dry_run=dry_run) bucket = bucket_name if bucket_name else AWS_DEFAULT_BUCKET - uploaded_files = [] - _uploaded_files, failed_files = s3_client.upload_files( + (_, failed_files) = s3_client.upload_files( file_paths=valid_mvn_paths, bucket_name=bucket, product=prod_key, root=top_level ) - uploaded_files.extend(_uploaded_files) logger.info("Files uploading done\n") # 5. Use uploaded poms to scan s3 for metadata refreshment @@ -240,28 +239,27 @@ def handle_maven_uploading( # 6. Upload all maven-metadata.xml if META_FILE_GEN_KEY in meta_files: logger.info("Start updating maven-metadata.xml to s3") - _uploaded_files, _failed_metas = s3_client.upload_metadatas( + (_, _failed_metas) = s3_client.upload_metadatas( meta_file_paths=meta_files[META_FILE_GEN_KEY], bucket_name=bucket, product=prod_key, root=top_level ) failed_metas.extend(_failed_metas) - uploaded_files.extend(_uploaded_files) logger.info("maven-metadata.xml updating done\n") # this step generates index.html for each dir and add them to file list # index is similar to metadata, it will be overwritten everytime if do_index: logger.info("Start generating index files to s3") - index_files = uploaded_files - if META_FILE_GEN_KEY in meta_files: - index_files = index_files + meta_files[META_FILE_GEN_KEY] - created_files = indexing.handle_create_index(top_level, index_files, s3_client, bucket) + created_indexes = indexing.generate_indexes(top_level, valid_dirs, s3_client, bucket) logger.info("Index files generation done.\n") + logger.info("Start updating index files to s3") - _uploaded_files, _failed_metas = s3_client.upload_metadatas( - meta_file_paths=created_files, bucket_name=bucket, product=None, root=top_level + (_, _failed_metas) = s3_client.upload_metadatas( + meta_file_paths=created_indexes, + bucket_name=bucket, + product=None, root=top_level ) failed_metas.extend(_failed_metas) logger.info("Index files updating done\n") @@ -301,7 +299,8 @@ def handle_maven_del( # and also collect poms for later metadata generation (top_level, valid_mvn_paths, - valid_poms) = _scan_paths(tmp_root, ignore_patterns, root) + valid_poms, + valid_dirs) = _scan_paths(tmp_root, ignore_patterns, root) # 3. Parse GA from valid_poms for later maven metadata refreshing logger.info("Start generating maven-metadata.xml files for all artifacts") @@ -317,7 +316,7 @@ def handle_maven_del( logger.info("Start deleting files from s3") s3_client = S3Client(dry_run=dry_run) bucket = bucket_name if bucket_name else AWS_DEFAULT_BUCKET - deleted_files, failed_files = s3_client.delete_files( + (_, failed_files) = s3_client.delete_files( valid_mvn_paths, bucket_name=bucket, product=prod_key, @@ -336,45 +335,33 @@ def handle_maven_del( all_meta_files = [] for _, files in meta_files.items(): all_meta_files.extend(files) - (deleted_metas, _) = s3_client.delete_files( + s3_client.delete_files( file_paths=all_meta_files, bucket_name=bucket, product=prod_key, root=top_level ) - deleted_files += deleted_metas failed_metas = meta_files.get(META_FILE_FAILED, []) if META_FILE_GEN_KEY in meta_files: - _uploaded_files, _failed_metas = s3_client.upload_metadatas( + (_, _failed_metas) = s3_client.upload_metadatas( meta_file_paths=meta_files[META_FILE_GEN_KEY], bucket_name=bucket, product=None, root=top_level ) failed_metas.extend(_failed_metas) - for m_file in _uploaded_files: - if m_file.replace(top_level, '') in deleted_files: - deleted_files.remove(m_file.replace(top_level, '')) - elif m_file.replace(top_level + '/', '') in deleted_files: - deleted_files.remove(m_file.replace(top_level + '/', '')) logger.info("maven-metadata.xml updating done\n") if do_index: logger.info("Start generating index files for all changed entries") - delete_index, update_index = indexing.handle_delete_index( - top_level, deleted_files, s3_client, bucket) + created_indexes = indexing.generate_indexes(top_level, valid_dirs, s3_client, bucket) logger.info("Index files generation done.\n") logger.info("Start updating index to s3") - if update_index != []: - _, _failed_metas = s3_client.upload_metadatas( - meta_file_paths=update_index, - bucket_name=bucket, - product=None, - root=top_level - ) - failed_metas.extend(_failed_metas) - - s3_client.delete_files( - file_paths=delete_index, bucket_name=bucket, product=None, root=top_level + (_, _failed_index_files) = s3_client.upload_metadatas( + meta_file_paths=created_indexes, + bucket_name=bucket, + product=None, + root=top_level ) + failed_metas.extend(_failed_index_files) logger.info("Index files updating done.\n") else: logger.info("Bypassing indexing") @@ -398,23 +385,24 @@ def _extract_tarball(repo: str, prefix="", dir__=None) -> str: def _scan_paths(files_root: str, ignore_patterns: List[str], - root: str) -> Tuple[str, List, List]: + root: str) -> Tuple[str, List[str], List[str], List[str]]: # 2. scan for paths and filter out the ignored paths, # and also collect poms for later metadata generation logger.info("Scan %s to collect files", files_root) top_level = root - valid_mvn_paths, non_mvn_paths, ignored_paths, valid_poms = [], [], [], [] + valid_mvn_paths, non_mvn_paths, ignored_paths, valid_poms, valid_dirs = [], [], [], [], [] + changed_dirs = set() top_found = False for root_dir, dirs, names in os.walk(files_root): for directory in dirs: - if directory == top_level: - top_level = os.path.join(root_dir, directory) - top_found = True - break - if os.path.join(root_dir, directory) == os.path.join(files_root, top_level): - top_level = os.path.join(files_root, top_level) - top_found = True - break + changed_dirs.add(os.path.join(root_dir, directory)) + if not top_found: + if directory == top_level: + top_level = os.path.join(root_dir, directory) + top_found = True + if os.path.join(root_dir, directory) == os.path.join(files_root, top_level): + top_level = os.path.join(files_root, top_level) + top_found = True for name in names: path = os.path.join(root_dir, name) @@ -441,6 +429,10 @@ def _scan_paths(files_root: str, ignore_patterns: List[str], top_level ) top_level = files_root + else: + for c in changed_dirs: + if c.startswith(top_level): + valid_dirs.append(c) logger.info("Files scanning done.\n") if ignore_patterns and len(ignore_patterns) > 0: @@ -449,7 +441,7 @@ def _scan_paths(files_root: str, ignore_patterns: List[str], ignore_patterns, "\n".join(ignored_paths) ) - return (top_level, valid_mvn_paths, valid_poms) + return (top_level, valid_mvn_paths, valid_poms, valid_dirs) def _generate_metadatas( diff --git a/charon/pkgs/npm.py b/charon/pkgs/npm.py index 5d12f7ee..e837d8e5 100644 --- a/charon/pkgs/npm.py +++ b/charon/pkgs/npm.py @@ -19,7 +19,7 @@ from json import load, loads, dump, JSONDecodeError import tarfile from tempfile import mkdtemp -from typing import Tuple +from typing import Set, Tuple from semantic_version import compare @@ -83,6 +83,8 @@ def handle_npm_uploading( logger.error("Error: the extracted target_dir path %s does not exist.", target_dir) sys.exit(1) + valid_dirs = __get_path_tree(valid_paths, target_dir) + logger.info("Start uploading files to s3") client = S3Client(dry_run=dry_run) bucket = bucket_name if bucket_name else AWS_DEFAULT_BUCKET @@ -114,16 +116,18 @@ def handle_npm_uploading( # this step generates index.html for each dir and add them to file list # index is similar to metadata, it will be overwritten everytime if do_index: - index_files = uploaded_files - if META_FILE_GEN_KEY in meta_files: - index_files += [meta_files[META_FILE_GEN_KEY]] - created_files = indexing.handle_create_index(target_dir, index_files, client, bucket) - logger.info("Start uploading index files to s3") - _, _failed_metas = client.upload_metadatas( - meta_file_paths=created_files, bucket_name=bucket, product=None, root=target_dir + logger.info("Start generating index files to s3") + created_indexes = indexing.generate_indexes(target_dir, valid_dirs, client, bucket) + logger.info("Index files generation done.\n") + + logger.info("Start updating index files to s3") + (_, _failed_metas) = client.upload_metadatas( + meta_file_paths=created_indexes, + bucket_name=bucket, + product=None, root=target_dir ) failed_metas.extend(_failed_metas) - logger.info("Index files uploading done\n") + logger.info("Index files updating done\n") else: logger.info("Bypass indexing\n") @@ -147,6 +151,8 @@ def handle_npm_del( tarball_path, prefix=product, dir__=dir_ ) + valid_dirs = __get_path_tree(valid_paths, target_dir) + logger.info("Start deleting files from s3") client = S3Client(dry_run=dry_run) bucket = bucket_name if bucket_name else AWS_DEFAULT_BUCKET @@ -184,23 +190,19 @@ def handle_npm_del( logger.info("package.json uploading done") if do_index: - logger.info("Start uploading index to s3") - delete_index, update_index = indexing.handle_delete_index( - target_dir, deleted_files, client, bucket) - - if update_index != []: - _, _failed_metas = client.upload_metadatas( - meta_file_paths=update_index, - bucket_name=bucket, - product=None, - root=target_dir - ) - failed_metas.extend(_failed_metas) - - client.delete_files( - file_paths=delete_index, bucket_name=bucket, product=None, root=target_dir + logger.info("Start generating index files for all changed entries") + created_indexes = indexing.generate_indexes(target_dir, valid_dirs, client, bucket) + logger.info("Index files generation done.\n") + + logger.info("Start updating index to s3") + (_, _failed_index_files) = client.upload_metadatas( + meta_file_paths=created_indexes, + bucket_name=bucket, + product=None, + root=target_dir ) - logger.info("index uploading done") + failed_metas.extend(_failed_index_files) + logger.info("Index files updating done.\n") else: logger.info("Bypassing indexing\n") @@ -423,3 +425,21 @@ def _del_none(d): elif isinstance(value, dict): _del_none(value) return d + + +def __get_path_tree(paths: str, prefix: str) -> Set[str]: + valid_dirs = set() + for f in paths: + dir_ = os.path.dirname(f) + if dir_.startswith(prefix): + dir_ = dir_[len(prefix):] + if dir_.startswith("/"): + dir_ = dir_[1:] + temp = "" + for d in dir_.split("/"): + temp = os.path.join(temp, d) + if f.startswith(prefix): + valid_dirs.add(os.path.join(prefix, temp)) + else: + valid_dirs.add(temp) + return valid_dirs diff --git a/tests/test_maven_index.py b/tests/test_maven_index.py index 67e4fa75..8e021a79 100644 --- a/tests/test_maven_index.py +++ b/tests/test_maven_index.py @@ -77,14 +77,15 @@ def test_uploading_index(self): test_zip = os.path.join(os.getcwd(), "tests/input/commons-client-4.5.6.zip") product = "commons-client-4.5.6" handle_maven_uploading( - test_zip, product, bucket_name=TEST_BUCKET, dir_=self.tempdir + test_zip, product, + bucket_name=TEST_BUCKET, + dir_=self.tempdir ) test_bucket = self.mock_s3.Bucket(TEST_BUCKET) objs = list(test_bucket.objects.all()) - self.assertEqual(30, len(objs)) - actual_files = [obj.key for obj in objs] + self.assertEqual(21, len(actual_files)) for f in COMMONS_LOGGING_INDEXES: self.assertIn(f, actual_files) @@ -125,12 +126,14 @@ def test_overlap_upload_index(self): test_zip = os.path.join(os.getcwd(), "tests/input/commons-client-4.5.9.zip") product_459 = "commons-client-4.5.9" handle_maven_uploading( - test_zip, product_459, bucket_name=TEST_BUCKET, dir_=self.tempdir + test_zip, product_459, + bucket_name=TEST_BUCKET, + dir_=self.tempdir ) test_bucket = self.mock_s3.Bucket(TEST_BUCKET) objs = list(test_bucket.objects.all()) - self.assertEqual(36, len(objs)) + self.assertEqual(26, len(objs)) indedx_obj = test_bucket.Object(COMMONS_CLIENT_INDEX) index_content = str(indedx_obj.get()["Body"].read(), "utf-8") @@ -165,14 +168,15 @@ def test_deletion_index(self): test_zip = os.path.join(os.getcwd(), "tests/input/commons-client-4.5.6.zip") product_456 = "commons-client-4.5.6" handle_maven_del( - test_zip, product_456, bucket_name=TEST_BUCKET, dir_=self.tempdir + test_zip, product_456, + bucket_name=TEST_BUCKET, + dir_=self.tempdir ) test_bucket = self.mock_s3.Bucket(TEST_BUCKET) objs = list(test_bucket.objects.all()) - self.assertEqual(30, len(objs)) - actual_files = [obj.key for obj in objs] + self.assertEqual(21, len(actual_files)) for assert_file in COMMONS_CLIENT_459_INDEXES: self.assertIn(assert_file, actual_files) @@ -218,11 +222,15 @@ def __prepare_content(self): test_zip = os.path.join(os.getcwd(), "tests/input/commons-client-4.5.6.zip") product_456 = "commons-client-4.5.6" handle_maven_uploading( - test_zip, product_456, bucket_name=TEST_BUCKET, dir_=self.tempdir + test_zip, product_456, + bucket_name=TEST_BUCKET, + dir_=self.tempdir ) test_zip = os.path.join(os.getcwd(), "tests/input/commons-client-4.5.9.zip") product_459 = "commons-client-4.5.9" handle_maven_uploading( - test_zip, product_459, bucket_name=TEST_BUCKET, dir_=self.tempdir + test_zip, product_459, + bucket_name=TEST_BUCKET, + dir_=self.tempdir ) diff --git a/tests/test_npm_index.py b/tests/test_npm_index.py index 8d3fc8ad..7b77b57b 100644 --- a/tests/test_npm_index.py +++ b/tests/test_npm_index.py @@ -72,7 +72,8 @@ def test_uploading_index(self): test_bucket = self.mock_s3.Bucket(TEST_BUCKET) objs = list(test_bucket.objects.all()) - self.assertEqual(13, len(objs)) + actual_files = [obj.key for obj in objs] + self.assertEqual(8, len(actual_files)) actual_files = [obj.key for obj in objs] @@ -99,9 +100,8 @@ def test_overlap_upload_index(self): test_bucket = self.mock_s3.Bucket(TEST_BUCKET) objs = list(test_bucket.objects.all()) - self.assertEqual(17, len(objs)) - actual_files = [obj.key for obj in objs] + self.assertEqual(11, len(objs)) for assert_file in CODE_FRAME_7_14_5_INDEXES: self.assertIn(assert_file, actual_files) @@ -122,14 +122,15 @@ def test_deletion_index(self): test_tgz = os.path.join(os.getcwd(), "tests/input/code-frame-7.14.5.tgz") product_7_14_5 = "code-frame-7.14.5" handle_npm_del( - test_tgz, product_7_14_5, bucket_name=TEST_BUCKET, dir_=self.tempdir + test_tgz, product_7_14_5, + bucket_name=TEST_BUCKET, + dir_=self.tempdir ) test_bucket = self.mock_s3.Bucket(TEST_BUCKET) objs = list(test_bucket.objects.all()) - self.assertEqual(13, len(objs)) - actual_files = [obj.key for obj in objs] + self.assertEqual(8, len(objs)) for assert_file in CODE_FRAME_7_15_8_INDEXES: self.assertIn(assert_file, actual_files) diff --git a/tests/test_pkgs_dryrun.py b/tests/test_pkgs_dryrun.py index be21a552..11a978b3 100644 --- a/tests/test_pkgs_dryrun.py +++ b/tests/test_pkgs_dryrun.py @@ -56,7 +56,7 @@ def test_maven_delete_dry_run(self): test_bucket = self.mock_s3.Bucket(TEST_BUCKET) objs = list(test_bucket.objects.all()) - self.assertEqual(36, len(objs)) + self.assertEqual(26, len(objs)) def test_npm_upload_dry_run(self): test_tgz = os.path.join(os.getcwd(), "tests/input/code-frame-7.14.5.tgz") @@ -86,7 +86,7 @@ def test_npm_deletion_dry_run(self): test_bucket = self.mock_s3.Bucket(TEST_BUCKET) objs = list(test_bucket.objects.all()) - self.assertEqual(17, len(objs)) + self.assertEqual(11, len(objs)) def __prepare_maven_content(self): test_zip = os.path.join(os.getcwd(), "tests/input/commons-client-4.5.6.zip")