diff --git a/README.md b/README.md new file mode 100644 index 0000000..4300f95 --- /dev/null +++ b/README.md @@ -0,0 +1,28 @@ +# Kaggle SIIM-ACR Pneumothorax Segmentation +## 8th Place + +## Hardware +Ubuntu 16.04 LTS +64 GB RAM / 2 TB HDD +1x NVIDIA Titan V100 32GB +1x Titan V 12GB + +## Software +Python 3.7.4 +CUDA 10.0 +cuDNN 7.6 +PyTorch 1.1 + +## Model checkpoints +Download from here: + +Models should be unzipped into `./segment/checkpoints/` in order to run code as is. There should be 3 folders: +``` +./segment/checkpoints/TRAIN_V100/ +./segment/checkpoints/TRAIN_SEGMENT/ +./segment/checkpoints/TRAIN_DEEPLABXY/ +``` + +See `entry_points.md` for reproducing results. Relative filepaths and directories are used, so the code should work as is. + +Note that `TRAIN_V100` and `TRAIN_DEEPLABXY` models require V100 32GB GPUs to train with the current configurations. If you wish to train these models on a lower capacity GPU, I suggest using the following flag options: `--grad-accum 8 --batch-size 2` or `--grad-accum 16 --batch-size 1`. Model performance is not guaranteed to be the same with these modifications. diff --git a/directory_structure.txt b/directory_structure.txt new file mode 100644 index 0000000..3cdd329 --- /dev/null +++ b/directory_structure.txt @@ -0,0 +1,9 @@ +. +./submit +./etl +./segment +./segment/loss +./segment/utils +./segment/scripts +./segment/model +./segment/data diff --git a/entry_points.md b/entry_points.md new file mode 100644 index 0000000..1b127af --- /dev/null +++ b/entry_points.md @@ -0,0 +1,60 @@ +## Setup environment + +``` +conda create -n siim-ptx python=3.7 pip + +conda install pytorch=1.1 torchvision cudatoolkit=10.0 -c pytorch + +# Install mmdetection +git clone https://github.com/open-mmlab/mmdetection/ +pip install Cython +python setup.py develop +# pip install -v -e . + +conda install pandas scikit-learn scikit-image +pip install albumentations pretrainedmodels pydicom adabound +``` + +## Download data + +Data should be downloaded into: + `./data/dicom-images-train/` + `./data/dicom-images-stage2/` + +Scripts to help with data downloading are available in `./etl/`, but make sure that data are in the appropriate directories. Note that we did not retrain models on stage 2 train. A list of image IDs to exclude from the stage 2 train data is available in `./stage1test.txt`. + +## Process data + +``` +cd ./etl/ +python 0_convert_data_to_png.py +python 1_get_png_masks_and_assign_labels.py +python 2_create_data_splits.py +``` + +## Train models + +``` +cd ./segment/scripts/ +bash TRAIN_V100.sh +bash TRAIN_SEGMENT.sh +bash TRAIN_DEEPLABXY.sh +``` + +## Predict on stage 2 test data + +``` +cd ./segment/scripts/ +bash STAGE2_PREDICT_V100.sh +bash STAGE2_PREDICT_SEGMENT.sh +bash STAGE2_PREDICT_DEEPLABXY.sh +``` + +## Create submission + +``` +cd ./submit/ +python create_submission_partitioned.py +``` + +Submissions will be in `./submissions/` as `submission0.csv` (best) and `submission1.csv`. diff --git a/etl/.DS_Store b/etl/.DS_Store new file mode 100644 index 0000000..5008ddf Binary files /dev/null and b/etl/.DS_Store differ diff --git a/etl/0_convert_data_to_png.py b/etl/0_convert_data_to_png.py new file mode 100644 index 0000000..81f9653 --- /dev/null +++ b/etl/0_convert_data_to_png.py @@ -0,0 +1,89 @@ +import pydicom +import cv2 +import os +import re + +import pandas as pd +import numpy as np + +from tqdm import tqdm + +def extract_meta(dicom): + return {'view': dicom.ViewPosition, + 'sex': dicom.PatientSex, + 'age': dicom.PatientAge, + 'monochrome': dicom.PhotometricInterpretation, + 'sop': dicom.SOPInstanceUID, + 'series': dicom.SeriesInstanceUID, + 'study': dicom.StudyInstanceUID} + +def listify(dct): + for key in dct.keys(): + dct[key] = [dct[key]] + return dct + +def convert_and_extract(dicoms, image_save_dir, df_savefile): + list_of_dicom_df = [] + for dcmfile in tqdm(dicoms, total=len(dicoms)): + tmp_dcm = pydicom.read_file(dcmfile, force=True) + tmp_meta = extract_meta(tmp_dcm) + tmp_meta['filename'] = dcmfile + tmp_meta['height'] = tmp_dcm.pixel_array.shape[0] + tmp_meta['width'] = tmp_dcm.pixel_array.shape[1] + tmp_meta_df = pd.DataFrame(listify(tmp_meta)) + list_of_dicom_df.append(tmp_meta_df) + tmp_array = tmp_dcm.pixel_array + assert tmp_array.dtype == 'uint8' + if tmp_meta['monochrome'] == 'MONOCHROME1': + print('Inverting image ...') + tmp_array = np.invert(tmp_array) + status = cv2.imwrite(os.path.join(image_save_dir, tmp_meta['sop'][0] + '.png'), tmp_array) + # + dicom_df = pd.concat(list_of_dicom_df) + dicom_df.to_csv(df_savefile, index=False) + +# Convert train +TRAIN_DICOM_DIR = '../data/dicom-images-train/' +TRAIN_IMAGE_DIR = '../data/pngs/train/' +TRAIN_DF_SAVEFILE = '../data/train_meta.csv' + +if not os.path.exists(TRAIN_IMAGE_DIR): os.makedirs(TRAIN_IMAGE_DIR) + +train_dicoms = [] +for root, dirs, files in os.walk(TRAIN_DICOM_DIR): + for fi in files: + if re.search('dcm', fi): + train_dicoms.append(os.path.join(root, fi)) + +convert_and_extract(train_dicoms, TRAIN_IMAGE_DIR, TRAIN_DF_SAVEFILE) + +# Convert test +TEST_DICOM_DIR = '../data/dicom-images-test/' +TEST_IMAGE_DIR = '../data/pngs/test/' +TEST_DF_SAVEFILE = '../data/test_meta.csv' + +if not os.path.exists(TEST_IMAGE_DIR): os.makedirs(TEST_IMAGE_DIR) + +test_dicoms = [] +for root, dirs, files in os.walk(TEST_DICOM_DIR): + for fi in files: + if re.search('dcm', fi): + test_dicoms.append(os.path.join(root, fi)) + +convert_and_extract(test_dicoms, TEST_IMAGE_DIR, TEST_DF_SAVEFILE) + +# Convert stage 2 test +TEST_DICOM_DIR = '../data/dicom-images-stage2/' +TEST_IMAGE_DIR = '../data/pngs/stage2/' +TEST_DF_SAVEFILE = '../data/stage2_meta.csv' + +if not os.path.exists(TEST_IMAGE_DIR): os.makedirs(TEST_IMAGE_DIR) + +test_dicoms = [] +for root, dirs, files in os.walk(TEST_DICOM_DIR): + for fi in files: + if re.search('dcm', fi): + test_dicoms.append(os.path.join(root, fi)) + +convert_and_extract(test_dicoms, TEST_IMAGE_DIR, TEST_DF_SAVEFILE) + diff --git a/etl/1_get_png_masks_and_assign_labels.py b/etl/1_get_png_masks_and_assign_labels.py new file mode 100644 index 0000000..49e45a4 --- /dev/null +++ b/etl/1_get_png_masks_and_assign_labels.py @@ -0,0 +1,67 @@ +import pandas as pd +import numpy as np +import cv2 +import os + +from tqdm import tqdm + +def rle2mask(rle, width, height): + mask = np.zeros(width * height) + array = np.asarray([int(x) for x in rle.split()]) + starts = array[0::2] + lengths = array[1::2] + # + current_position = 0 + for index, start in enumerate(starts): + current_position += start + mask[current_position:current_position+lengths[index]] = 1 + current_position += lengths[index] + # Need to rotate clockwise 90 degrees and flip vertically + return np.fliplr(np.rot90(mask.reshape(width, height), 3)).astype('uint8') + +train_meta = pd.read_csv('../data/train_meta.csv') +train_rle = pd.read_csv('../data/train-rle.csv') + +train = train_meta.merge(train_rle, left_on='sop', right_on='ImageId') + +# Create binary labels for pneumothorax +train['ptx_binary'] = [0 if _ == ' -1' else 1 for _ in train[' EncodedPixels']] + +TRAIN_MASKS_DIR = '../data/masks/train/' +TRAIN_MASKS_255_DIR = '../data/masks_255/train/' +if not os.path.exists(TRAIN_MASKS_DIR): os.makedirs(TRAIN_MASKS_DIR) + + +if not os.path.exists(TRAIN_MASKS_255_DIR): os.makedirs(TRAIN_MASKS_255_DIR) + +# Generate masks from RLE and save to PNG files +# Include empty masks +mask_size_dict = {} +for pid, df in tqdm(train.groupby('ImageId'), total=len(np.unique(train['ImageId']))): + if df[' EncodedPixels'].iloc[0] == ' -1': + # If empty, image should only have 1 row + # Create empty mask + mask = np.zeros((df['width'].iloc[0], df['height'].iloc[0])).astype('uint8') + else: + mask = np.zeros((df['width'].iloc[0], df['height'].iloc[0])).astype('uint8') + for rownum, row in df.iterrows(): + mask += rle2mask(row[' EncodedPixels'], df['width'].iloc[0], df['height'].iloc[0]) + mask[mask > 1] = 1 + mask_size_dict[pid] = np.sum(mask) + status = cv2.imwrite(os.path.join(TRAIN_MASKS_DIR, df['sop'].iloc[0] + '.png'), mask) + mask[mask == 1] = 255 + status = cv2.imwrite(os.path.join(TRAIN_MASKS_255_DIR, df['sop'].iloc[0] + '.png'), mask) +# Mask files and image files should share same name in different folders + +del train[' EncodedPixels'] + +train = train.drop_duplicates() +size_df = pd.DataFrame({'ImageId': list(mask_size_dict.keys()), + 'mask_size': [mask_size_dict[pid] for pid in mask_size_dict.keys()]}) +train = train.merge(size_df, on='ImageId') + +train.to_csv('../data/train_labels.csv', index=False) + + + + diff --git a/etl/2_create_data_splits.py b/etl/2_create_data_splits.py new file mode 100644 index 0000000..33716bd --- /dev/null +++ b/etl/2_create_data_splits.py @@ -0,0 +1,34 @@ +# We will create 10 inner and 10 outer folds +# We will probably not use all of them +NSPLITS = 10 + +import pandas as pd +import numpy as np + +from sklearn.model_selection import StratifiedKFold, KFold + +np.random.seed(88) + +train_df = pd.read_csv('../data/train_labels.csv') + +# Stratify based on mask size +train_df['strata'] = 0 +train_df.loc[train_df['mask_size'] > 0, 'strata'] = pd.qcut(train_df['mask_size'][train_df['mask_size'] > 0], 10, labels=range(1, 11)) + +train_df['outer'] = 888 +outer_skf = StratifiedKFold(n_splits=NSPLITS, shuffle=True, random_state=88) +outer_counter = 0 +for outer_train, outer_test in outer_skf.split(train_df, train_df['strata']): + train_df.loc[outer_test, 'outer'] = outer_counter + inner_skf = StratifiedKFold(n_splits=NSPLITS, shuffle=True, random_state=88) + inner_counter = 0 + train_df['inner{}'.format(outer_counter)] = 888 + inner_df = train_df[train_df['outer'] != outer_counter].reset_index(drop=True) + # Determine which IDs should be assigned to inner train + for inner_train, inner_valid in inner_skf.split(inner_df, inner_df['strata']): + inner_train_ids = inner_df.loc[inner_valid, 'ImageId'] + train_df.loc[train_df['ImageId'].isin(inner_train_ids), 'inner{}'.format(outer_counter)] = inner_counter + inner_counter += 1 + outer_counter += 1 + +train_df.to_csv('../data/train_labels_with_splits.csv', index=False) \ No newline at end of file diff --git a/etl/download_data.py b/etl/download_data.py new file mode 100644 index 0000000..4d7eaab --- /dev/null +++ b/etl/download_data.py @@ -0,0 +1,90 @@ +"""Script to download all instances in a DICOM Store.""" +import os +import posixpath +from concurrent import futures +from retrying import retry +import google.auth +from google.auth.transport.requests import AuthorizedSession + +import time +# URL of CHC API +CHC_API_URL = 'https://healthcare.googleapis.com/v1beta1' +PROJECT_ID = 'kaggle-siim-healthcare' +REGION = 'us-central1' +DATASET_ID = 'siim-pneumothorax' +TRAIN_DICOM_STORE_ID = 'dicom-images-train' +TEST_DICOM_STORE_ID = 'dicom-images-test' + +#SLEEP = None +SLEEP = 0.1 + +@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000) +def download_instance(dicom_web_url, dicom_store_id, study_uid, series_uid, + instance_uid, credentials): + """Downloads a DICOM instance and saves it under the current folder.""" + instance_url = posixpath.join(dicom_web_url, 'studies', study_uid, 'series', + series_uid, 'instances', instance_uid) + if SLEEP: time.sleep(SLEEP) + authed_session = AuthorizedSession(credentials) + if SLEEP: time.sleep(SLEEP) + response = authed_session.get( + instance_url, headers={'Accept': 'application/dicom; transfer-syntax=*'}) + file_path = posixpath.join(dicom_store_id, study_uid, series_uid, + instance_uid) + filename = '%s.dcm' % file_path + if not os.path.exists(filename): + os.makedirs(os.path.dirname(filename)) + with open(filename, 'wb') as f: + f.write(response.content) + + +def download_all_instances(dicom_store_id, credentials): + """Downloads all DICOM instances in the specified DICOM store.""" + # Get a list of all instances. + dicom_web_url = posixpath.join(CHC_API_URL, 'projects', PROJECT_ID, + 'locations', REGION, 'datasets', DATASET_ID, + 'dicomStores', dicom_store_id, 'dicomWeb') + qido_url = posixpath.join(dicom_web_url, 'instances') + authed_session = AuthorizedSession(credentials) + response = authed_session.get(qido_url, params={'limit': '15000'}) + if response.status_code != 200: + print(response.text) + return + content = response.json() + # DICOM Tag numbers + study_instance_uid_tag = '0020000D' + series_instance_uid_tag = '0020000E' + sop_instance_uid_tag = '00080018' + value_key = 'Value' + with futures.ThreadPoolExecutor() as executor: + future_to_study_uid = {} + for instance in content: + study_uid = instance[study_instance_uid_tag][value_key][0] + series_uid = instance[series_instance_uid_tag][value_key][0] + instance_uid = instance[sop_instance_uid_tag][value_key][0] + if SLEEP: time.sleep(SLEEP) + future = executor.submit(download_instance, dicom_web_url, dicom_store_id, + study_uid, series_uid, instance_uid, credentials) + future_to_study_uid[future] = study_uid + processed_count = 0 + for future in futures.as_completed(future_to_study_uid): + try: + future.result() + processed_count += 1 + if not processed_count % 100 or processed_count == len(content): + print('Processed instance %d out of %d' % + (processed_count, len(content))) + except Exception as e: + print('Failed to download a study. UID: %s \n exception: %s' % + (future_to_study_uid[future], e)) + + +def main(argv=None): + credentials, _ = google.auth.default() + print('Downloading all instances in %s DICOM store' % TRAIN_DICOM_STORE_ID) + download_all_instances(TRAIN_DICOM_STORE_ID, credentials) + print('Downloading all instances in %s DICOM store' % TEST_DICOM_STORE_ID) + download_all_instances(TEST_DICOM_STORE_ID, credentials) + + +main() \ No newline at end of file diff --git a/etl/get_annotations.sh b/etl/get_annotations.sh new file mode 100644 index 0000000..c849921 --- /dev/null +++ b/etl/get_annotations.sh @@ -0,0 +1,9 @@ +PROJECT_ID="kaggle-siim-healthcare" +REGION="us-central1" +DATASET_ID="siim-pneumothorax" +FHIR_STORE_ID="fhir-masks-train" +DOCUMENT_REFERENCE_ID="d70d8f3e-990a-4bc0-b11f-c87349f5d4eb" + +curl -X GET \ +-H "Authorization: Bearer "$(gcloud auth print-access-token) \ +"https://healthcare.googleapis.com/v1beta1/projects/${PROJECT_ID}/locations/${REGION}/datasets/${DATASET_ID}/fhirStores/${FHIR_STORE_ID}/fhir/DocumentReference/${DOCUMENT_REFERENCE_ID}" \ No newline at end of file diff --git a/reproducibility.py b/reproducibility.py new file mode 100644 index 0000000..4b943b3 --- /dev/null +++ b/reproducibility.py @@ -0,0 +1,19 @@ +import random, torch, numpy as np +# From: https://github.com/liaopeiyuan/ml-arsenal-public/blob/master/reproducibility.py + +def set_reproducibility(SEED): + print("Fixing random seed for reproducibility ...") + random.seed(SEED) + np.random.seed(SEED) + torch.manual_seed(SEED) + torch.cuda.manual_seed_all(SEED) + print ('\tSetting random seed to {} !'.format(SEED)) + print('') + # + torch.backends.cudnn.benchmark = True ##uses the inbuilt cudnn auto-tuner to find the fastest convolution algorithms. - + torch.backends.cudnn.enabled = True + print ('PyTorch environment ...') + print ('\ttorch.__version__ =', torch.__version__) + print ('\ttorch.version.cuda =', torch.version.cuda) + print ('\ttorch.backends.cudnn.version() =', torch.backends.cudnn.version()) + print ('\n') \ No newline at end of file diff --git a/segment/.DS_Store b/segment/.DS_Store new file mode 100644 index 0000000..669eb11 Binary files /dev/null and b/segment/.DS_Store differ diff --git a/segment/data/__init__.py b/segment/data/__init__.py new file mode 100644 index 0000000..b3f3ff8 --- /dev/null +++ b/segment/data/__init__.py @@ -0,0 +1 @@ +from .loader import * \ No newline at end of file diff --git a/segment/data/loader.py b/segment/data/loader.py new file mode 100644 index 0000000..6bd608a --- /dev/null +++ b/segment/data/loader.py @@ -0,0 +1,353 @@ +""" +Loaders for different datasets. +""" +import numpy as np +import cv2 +import torch +from torch.utils.data import Dataset, DataLoader, Sampler + +from utils.helper import channels_last_to_first, get_image_from_dicom + +class RatioSampler(Sampler): + # + def __init__(self, data_source, num_samples, pos_neg_ratio): + self.data_source = data_source + self.num_samples = num_samples + self.pos_neg_ratio = pos_neg_ratio + # + def __iter__(self): + pos_indices = [i for i, _ in enumerate(self.data_source.labels) if _ == 1] + neg_indices = [i for i, _ in enumerate(self.data_source.labels) if _ == 0] + ratio = 1 / self.pos_neg_ratio if self.pos_neg_ratio < 1 else self.pos_neg_ratio + if self.pos_neg_ratio > 1: + pos_num_samples = int(self.num_samples * (ratio / (1 + ratio))) + neg_num_samples = self.num_samples - pos_num_samples + else: + neg_num_samples = int(self.num_samples * (ratio / (1 + ratio))) + pos_num_samples = self.num_samples - neg_num_samples + pos_replace = False if len(pos_indices) >= pos_num_samples else True + neg_replace = False if len(neg_indices) >= neg_num_samples else True + pos = np.random.choice(pos_indices, pos_num_samples, replace=pos_replace) + neg = np.random.choice(neg_indices, neg_num_samples, replace=neg_replace) + combined = np.concatenate((pos, neg)) + np.random.shuffle(combined) + return iter(list(combined)) + # + def __len__(self): + return self.num_samples + +class XrayDataset(Dataset): + """ + Basic loader. + """ + def __init__(self, imgfiles, labels, dicom=True, grayscale=True, preprocess=None, pad=None, resize=None, transform=None, tta=None, test_mode=False): + self.imgfiles = imgfiles + self.labels = labels + self.dicom = dicom + self.grayscale = grayscale + self.preprocess = preprocess + self.pad = pad + self.resize = resize + self.transform = transform + self.tta = tta + self.test_mode = test_mode + + if transform and tta: + raise Exception('Cannot use both `transform` and `tta`') + + def __len__(self): + return len(self.imgfiles) + + def load_image(self, imgfile): + if self.dicom: + X = get_image_from_dicom(imgfile) + else: + if self.grayscale: + mode = cv2.IMREAD_GRAYSCALE + else: + mode = cv2.IMREAD_COLOR + X = cv2.imread(imgfile, mode) + # while X is None: + # i = np.random.choice(len(self.imgfiles)) + # if self.dicom: + # X = get_image_from_dicom(self.imgfiles[i]) + # else: + # X = cv2.imread(self.imgfiles[i], mode) + if self.grayscale: + X = np.repeat(np.expand_dims(X, axis=-1), 3, axis=-1) + return X + + def process_image(self, X): + if not self.test_mode: + # Randomly flip with probability 0.5 + if np.random.binomial(1, 0.5): X = np.fliplr(X) + # Randomly invert with probability 0.5 + if np.random.binomial(1, 0.5): X = np.invert(X) + # 3- Apply data augmentation + if self.transform: + X = self.transform(image=X)['image'] + if self.preprocess: X = self.preprocess(X) + X = channels_last_to_first(X) + elif self.tta: + X = np.asarray([ind_tta(image=X)['image'] for ind_tta in self.tta]) + if self.preprocess: X = [self.preprocess(_) for _ in X] + X = np.asarray([channels_last_to_first(_) for _ in X]) + else: + if self.preprocess: X = self.preprocess(X) + X = channels_last_to_first(X) + return X + + def __getitem__(self, i): + """ + Returns: x, y + - x: tensorized input + - y: tensorized label + """ + X = self.load_image(self.imgfiles[i]) + # 2- Pad and resize image + if self.pad: X = self.pad(X) + if self.resize: X = self.resize(image=X)['image'] + X = self.process_image(X) + y = np.asarray(self.labels[i]) + return torch.from_numpy(X.copy()).type('torch.FloatTensor'), \ + torch.from_numpy(y) + + +class XrayMaskDataset(Dataset): + """ + Basic loader. + """ + def __init__(self, + imgfiles, + maskfiles, + labels, + multiclass=True, + dicom=True, + grayscale=True, + preprocess=None, + pad=None, + resize=None, + transform=None, + crop=None, + inversion=False, + test_mode=False): + self.imgfiles = imgfiles + self.maskfiles = maskfiles + self.labels = labels + self.multiclass = multiclass + self.dicom = dicom + self.grayscale = grayscale + self.preprocess = preprocess + self.pad = pad + self.resize = resize + self.transform = transform + self.crop = crop + self.inversion = inversion + self.test_mode = test_mode + + def __len__(self): + return len(self.imgfiles) + + def load_image(self, imgfile): + if self.dicom: + X = get_image_from_dicom(imgfile) + else: + if self.grayscale: + mode = cv2.IMREAD_GRAYSCALE + else: + mode = cv2.IMREAD_COLOR + X = cv2.imread(imgfile, mode) + if self.grayscale: + X = np.repeat(np.expand_dims(X, axis=-1), 3, axis=-1) + return X + + def __getitem__(self, i): + """ + Returns: x, y + - x: tensorized input + - y: tensorized label + """ + # 1- Load image + X = self.load_image(self.imgfiles[i]) + y = cv2.imread(self.maskfiles[i], 0) + assert np.max(y) <= 1 + if self.inversion: + X = 255 - X + assert np.max(X) <= 255 and np.min(X) >= 0 + if not self.test_mode: + # Randomly flip with probability 0.5 + if np.random.binomial(1, 0.5): + X = np.fliplr(X) + y = np.fliplr(y) + # Randomly invert with probability 0.5 + # if np.random.binomial(1, 0.5): X = np.invert(X) + # 2- Pad and resize image + if self.pad: + X = self.pad(X) + y = self.pad(y) + if self.resize: + resized = self.resize(image=X, mask=y) + X = resized['image'] + y = resized['mask'] + # 3- Apply data augmentation + if self.transform: + transformed = self.transform(image=X, mask=y) + X = transformed['image'] + y = transformed['mask'] + # 4- Apply crop + if self.crop: + transformed = self.crop(image=X, mask=y) + X = transformed['image'] + y = transformed['mask'] + # 5- Apply preprocessing + if self.preprocess: X = self.preprocess(X) + X = channels_last_to_first(X) + torch_tensor_type = 'torch.FloatTensor' + if self.labels is None: + return torch.from_numpy(X).type(torch_tensor_type), \ + torch.from_numpy(y).type(torch_tensor_type), \ + torch.from_numpy(np.expand_dims(0, axis=0)).type(torch_tensor_type) + else: + return torch.from_numpy(X).type(torch_tensor_type), \ + torch.from_numpy(y).type(torch_tensor_type), \ + torch.from_numpy(np.expand_dims(self.labels[i], axis=0)).type(torch_tensor_type) + +def grid_patches(img, patch_size=512, num_rows=3, num_cols=3, return_coords=False): + """ + Assumes image shape is (C, H, W) + Generates * patches from an image. + Centers of patches gridded evenly length-/width-wise. + """ + if np.min(img.shape[1:]) < patch_size: + raise Exception('Patch size {} is greater than image size {}'.format(patch_size, img.shape)) + row_start = patch_size // 2 + row_end = img.shape[1] - patch_size // 2 + col_start = patch_size // 2 + col_end = img.shape[2] - patch_size // 2 + row_inc = (row_end - row_start) // (num_rows - 1) + col_inc = (col_end - col_start) // (num_cols - 1) + if row_inc == 0: row_inc = 1 + if col_inc == 0: col_inc = 1 + patch_list = [] + coord_list = [] + patch_masks = [] + for i in range(row_start, row_end+1, row_inc): + for j in range(col_start, col_end+1, col_inc): + patch_mask = np.zeros_like(img[0]) + x0 = i-patch_size//2 ; x1 = i+patch_size//2 + y0 = j-patch_size//2 ; y1 = j+patch_size//2 + patch = img[:, x0:x1, y0:y1] + patch_mask[x0:x1, y0:y1] = 1 + assert patch.shape == (img.shape[0], patch_size, patch_size) + patch_list.append(patch) + patch_masks.append(patch_mask) + coord_list.append([x0,x1,y0,y1]) + if return_coords: + return np.asarray(patch_list), coord_list + else: + return np.asarray(patch_list), np.asarray(patch_masks) + +class XrayCropStitchDataset(Dataset): + """ + Basic loader. + """ + def __init__(self, + imgfiles, + maskfiles, + labels, + multiclass=True, + dicom=True, + grayscale=True, + preprocess=None, + pad=None, + resize=None, + transform=None, + crop=None, + inversion=False, + test_mode=False): + self.imgfiles = imgfiles + self.maskfiles = maskfiles + self.labels = labels + self.multiclass = multiclass + self.dicom = dicom + self.grayscale = grayscale + self.preprocess = preprocess + self.pad = pad + self.resize = resize + self.transform = transform + self.crop = crop + self.inversion = inversion + self.test_mode = test_mode + + def __len__(self): + return len(self.imgfiles) + + def load_image(self, imgfile): + if self.dicom: + X = get_image_from_dicom(imgfile) + else: + if self.grayscale: + mode = cv2.IMREAD_GRAYSCALE + else: + mode = cv2.IMREAD_COLOR + X = cv2.imread(imgfile, mode) + if self.grayscale: + X = np.repeat(np.expand_dims(X, axis=-1), 3, axis=-1) + return X + + def __getitem__(self, i): + """ + Returns: x, y + - x: tensorized input + - y: tensorized label + """ + # 1- Load image + X = self.load_image(self.imgfiles[i]) + y = cv2.imread(self.maskfiles[i], 0) + assert np.max(y) <= 1 + if self.inversion: + X = 255 - X + assert np.max(X) <= 255 and np.min(X) >= 0 + if not self.test_mode: + # Randomly flip with probability 0.5 + if np.random.binomial(1, 0.5): + X = np.fliplr(X) + y = np.fliplr(y) + # Randomly invert with probability 0.5 + # if np.random.binomial(1, 0.5): X = np.invert(X) + # 2- Pad and resize image + if self.pad: + X = self.pad(X) + y = self.pad(y) + if self.resize: + resized = self.resize(image=X, mask=y) + X = resized['image'] + y = resized['mask'] + # 3- Apply data augmentation + if self.transform: + transformed = self.transform(image=X, mask=y) + X = transformed['image'] + y = transformed['mask'] + # 4- Apply crop + if self.crop: + transformed = self.crop(image=X, mask=y) + X = transformed['image'] + y = transformed['mask'] + # 5- Apply preprocessing + if self.preprocess: X = self.preprocess(X) + X = channels_last_to_first(X) + patches, patch_masks = grid_patches(X) + torch_tensor_type = 'torch.FloatTensor' + if self.labels is None: + return torch.from_numpy(patches).type(torch_tensor_type), \ + torch.from_numpy(patch_masks).type(torch_tensor_type), \ + torch.from_numpy(y).type(torch_tensor_type), \ + torch.from_numpy(np.expand_dims(0, axis=0)).type(torch_tensor_type) + else: + return torch.from_numpy(patches).type(torch_tensor_type), \ + torch.from_numpy(patch_masks).type(torch_tensor_type), \ + torch.from_numpy(y).type(torch_tensor_type), \ + torch.from_numpy(np.expand_dims(self.labels[i], axis=0)).type(torch_tensor_type) + + + diff --git a/segment/loss/__init__.py b/segment/loss/__init__.py new file mode 100644 index 0000000..1c03f51 --- /dev/null +++ b/segment/loss/__init__.py @@ -0,0 +1 @@ +from .lovasz_losses import * \ No newline at end of file diff --git a/segment/loss/lovasz_losses.py b/segment/loss/lovasz_losses.py new file mode 100644 index 0000000..20b68c1 --- /dev/null +++ b/segment/loss/lovasz_losses.py @@ -0,0 +1,292 @@ +""" +Lovasz-Softmax and Jaccard hinge loss in PyTorch +Maxim Berman 2018 ESAT-PSI KU Leuven (MIT License) +""" + +from __future__ import print_function, division +from torch import nn +import torch +from torch.autograd import Variable +import torch.nn.functional as F +import numpy as np +try: + from itertools import ifilterfalse +except ImportError: # py3k + from itertools import filterfalse + + +def lovasz_grad(gt_sorted): + """ + Computes gradient of the Lovasz extension w.r.t sorted errors + See Alg. 1 in paper + """ + p = len(gt_sorted) + gts = gt_sorted.sum() + intersection = gts - gt_sorted.float().cumsum(0) + union = gts + (1 - gt_sorted).float().cumsum(0) + jaccard = 1. - intersection / union + if p > 1: # cover 1-pixel case + jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] + return jaccard + + +def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True): + """ + IoU for foreground class + binary: 1 foreground, 0 background + """ + if not per_image: + preds, labels = (preds,), (labels,) + ious = [] + for pred, label in zip(preds, labels): + intersection = ((label == 1) & (pred == 1)).sum() + union = ((label == 1) | ((pred == 1) & (label != ignore))).sum() + if not union: + iou = EMPTY + else: + iou = float(intersection) / union + ious.append(iou) + iou = mean(ious) # mean accross images if per_image + return 100 * iou + + +def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False): + """ + Array of IoU for each (non ignored) class + """ + if not per_image: + preds, labels = (preds,), (labels,) + ious = [] + for pred, label in zip(preds, labels): + iou = [] + for i in range(C): + if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes) + intersection = ((label == i) & (pred == i)).sum() + union = ((label == i) | ((pred == i) & (label != ignore))).sum() + if not union: + iou.append(EMPTY) + else: + iou.append(float(intersection) / union) + ious.append(iou) + ious = map(mean, zip(*ious)) # mean accross images if per_image + return 100 * np.array(ious) + + +# --------------------------- BINARY LOSSES --------------------------- + + +def lovasz_hinge(logits, labels, per_image=True, ignore=None): + """ + Binary Lovasz hinge loss + logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) + labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) + per_image: compute the loss per image instead of per batch + ignore: void class id + """ + if per_image: + loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore)) + for log, lab in zip(logits, labels)) + else: + loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)) + return loss + + +def lovasz_hinge_flat(logits, labels): + """ + Binary Lovasz hinge loss + logits: [P] Variable, logits at each prediction (between -\infty and +\infty) + labels: [P] Tensor, binary ground truth labels (0 or 1) + ignore: label to ignore + """ + if len(labels) == 0: + # only void pixels, the gradients should be 0 + return logits.sum() * 0. + signs = 2. * labels.float() - 1. + errors = (1. - logits * Variable(signs)) + errors_sorted, perm = torch.sort(errors, dim=0, descending=True) + perm = perm.data + gt_sorted = labels[perm] + grad = lovasz_grad(gt_sorted) + loss = torch.dot(F.relu(errors_sorted), Variable(grad)) + return loss + +class LovaszHinge(nn.Module): + # + def __init__(self): + super(LovaszHinge, self).__init__() + # + def forward(self, probas, labels, per_image=False, ignore=None): + probas = probas[:,1,...] + return (lovasz_hinge(probas, labels, per_image, ignore) + lovasz_hinge(-probas, 1-labels, per_image, ignore))/2 + +def lovasz_hinge2(logits, labels, per_image=True, ignore=None): + """ + Binary Lovasz hinge loss + logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) + labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) + per_image: compute the loss per image instead of per batch + ignore: void class id + """ + if per_image: + loss = mean(lovasz_hinge_flat2(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore)) + for log, lab in zip(logits, labels)) + else: + loss = lovasz_hinge_flat2(*flatten_binary_scores(logits, labels, ignore)) + return loss + + +def lovasz_hinge_flat2(logits, labels): + """ + Binary Lovasz hinge loss + logits: [P] Variable, logits at each prediction (between -\infty and +\infty) + labels: [P] Tensor, binary ground truth labels (0 or 1) + ignore: label to ignore + """ + if len(labels) == 0: + # only void pixels, the gradients should be 0 + return logits.sum() * 0. + signs = 2. * labels.float() - 1. + errors = (1. - logits * Variable(signs)) + errors_sorted, perm = torch.sort(errors, dim=0, descending=True) + perm = perm.data + gt_sorted = labels[perm] + grad = lovasz_grad(gt_sorted) + weight = 1 + if labels.sum() == 0: + weight = 0 + loss = torch.dot(F.relu(errors_sorted), Variable(grad)) * weight + return loss + + +def flatten_binary_scores(scores, labels, ignore=None): + """ + Flattens predictions in the batch (binary case) + Remove labels equal to 'ignore' + """ + try: + scores = scores.view(-1) + labels = labels.view(-1) + except RuntimeError: + scores = scores.contiguous().view(-1) + labels = labels.contiguous().view(-1) + if ignore is None: + return scores, labels + valid = (labels != ignore) + vscores = scores[valid] + vlabels = labels[valid] + return vscores, vlabels + + +class StableBCELoss(torch.nn.modules.Module): + def __init__(self): + super(StableBCELoss, self).__init__() + def forward(self, input, target): + neg_abs = - input.abs() + loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log() + return loss.mean() + + +def binary_xloss(logits, labels, ignore=None): + """ + Binary Cross entropy loss + logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) + labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) + ignore: void class id + """ + logits, labels = flatten_binary_scores(logits, labels, ignore) + loss = StableBCELoss()(logits, Variable(labels.float())) + return loss + + +# --------------------------- MULTICLASS LOSSES --------------------------- + +def lovasz_softmax(probas, labels, only_present=False, per_image=False, ignore=None): + """ + Multi-class Lovasz-Softmax loss + probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1) + labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1) + only_present: average only on classes present in ground truth + per_image: compute the loss per image instead of per batch + ignore: void class labels + """ + if per_image: + loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), only_present=only_present) + for prob, lab in zip(probas, labels)) + else: + loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), only_present=only_present) + return loss + +class LovaszSoftmax(nn.Module): + # + def __init__(self): + super(LovaszSoftmax, self).__init__() + # + def forward(self, probas, labels, only_present=False, per_image=False, ignore=None): + probas = torch.softmax(probas, dim=1) + assert probas.min() >= 0 and probas.max() <= 1 + return lovasz_softmax(probas, labels, only_present, per_image, ignore) + + +def lovasz_softmax_flat(probas, labels, only_present=False): + """ + Multi-class Lovasz-Softmax loss + probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) + labels: [P] Tensor, ground truth labels (between 0 and C - 1) + only_present: average only on classes present in ground truth + """ + C = probas.size(1) + losses = [] + for c in range(C): + fg = (labels == c).float() # foreground for class c + if only_present and fg.sum() == 0: + continue + errors = (Variable(fg) - probas[:, c]).abs() + errors_sorted, perm = torch.sort(errors, 0, descending=True) + perm = perm.data + fg_sorted = fg[perm] + losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted)))) + return mean(losses) + + +def flatten_probas(probas, labels, ignore=None): + """ + Flattens predictions in the batch + """ + B, C, H, W = probas.size() + probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C + labels = labels.view(-1) + if ignore is None: + return probas, labels + valid = (labels != ignore) + vprobas = probas[valid.nonzero().squeeze()] + vlabels = labels[valid] + return vprobas, vlabels + +def xloss(logits, labels, ignore=None): + """ + Cross entropy loss + """ + return F.cross_entropy(logits, Variable(labels), ignore_index=255) + + +# --------------------------- HELPER FUNCTIONS --------------------------- + +def mean(l, ignore_nan=False, empty=0): + """ + nanmean compatible with generators. + """ + l = iter(l) + if ignore_nan: + l = ifilterfalse(np.isnan, l) + try: + n = 1 + acc = next(l) + except StopIteration: + if empty == 'raise': + raise ValueError('Empty mean') + return empty + for n, v in enumerate(l, 2): + acc += v + if n == 1: + return acc + return acc / n diff --git a/segment/loss/other_losses.py b/segment/loss/other_losses.py new file mode 100644 index 0000000..07302fc --- /dev/null +++ b/segment/loss/other_losses.py @@ -0,0 +1,263 @@ +from torch import nn +import numpy as np + +import torch +from torch.nn import functional as F + +from .lovasz_losses import lovasz_softmax + +class KLDivergence(nn.Module): + # + def __init__(self): + super(KLDivergence, self).__init__() + # + def forward(self, N, mu, logvar): + return 1. / N * torch.sum(mu ** 2. + logvar.exp() - 1. - logvar) + +class SoftDiceLoss(nn.Module): + # + def __init__(self, epsilon=1e-12, per_image=True): + super(SoftDiceLoss, self).__init__() + self.epsilon = epsilon + self.per_image = per_image + # + def forward(self, y_pred, y_true): + y_pred = torch.softmax(y_pred, dim=1)[:,1] + try: + y_pred = y_pred.view(-1) + except RuntimeError: + y_pred = y_pred.contiguous().view(-1) + y_true = y_true.view(-1) + assert y_pred.shape == y_true.shape + if self.per_image: + loss = 1. - (2. * torch.sum(y_true * y_pred, dim=-1) + self.epsilon) / (torch.sum(y_true ** 2., dim=-1) + torch.sum(y_pred ** 2., dim=-1) + self.epsilon) + loss = loss.mean() + else: + loss = 1. - (2. * torch.sum(y_true * y_pred) + self.epsilon)/ (torch.sum(y_true ** 2.) + torch.sum(y_pred ** 2.) + self.epsilon) + return loss + +class SoftDiceLossV2(nn.Module): + # + def __init__(self, smooth=1., per_image=False): + super(SoftDiceLossV2, self).__init__() + self.smooth = smooth + self.per_image = per_image + # + def forward(self, y_pred, y_true): + y_pred = torch.softmax(y_pred, dim=1)[:,1] + try: + y_pred = y_pred.view(-1) + except RuntimeError: + y_pred = y_pred.contiguous().view(-1) + y_true = y_true.view(-1) + assert y_pred.shape == y_true.shape + if self.per_image: + loss = 1. - (2. * torch.sum(y_true * y_pred, dim=-1) + self.smooth) / (torch.sum(y_true, dim=-1) + torch.sum(y_pred, dim=-1) + self.smooth) + loss = loss.mean() + else: + loss = 1. - (2. * torch.sum(y_true * y_pred) + self.smooth)/ (torch.sum(y_true) + torch.sum(y_pred) + self.smooth) + return loss + +class DiceBCELoss(nn.Module): + # + def __init__(self, dice_weight=0.5, bce_weight=0.5): + super(DiceBCELoss, self).__init__() + self.dice_weight = dice_weight + self.bce_weight = bce_weight + # + def forward(self, y_prob, y_true): + y_prob = y_prob[:,1] + y_pred = y_prob > 0 + y_pred = y_pred.float() + # Uses hard Dice + dice_loss = 1. - (2. * torch.sum(y_true * y_pred) + 1)/ (torch.sum(y_true) + torch.sum(y_pred) + 1) + bce_loss = F.binary_cross_entropy_with_logits(y_prob.flatten(), y_true.flatten()) + return (self.dice_weight * dice_loss + self.bce_weight * bce_loss) / (self.dice_weight + self.bce_weight) + +class DiceBCELossV2(nn.Module): + # + def __init__(self, dice_weight=0.5, bce_weight=0.5, epsilon=1e-12): + super(DiceBCELossv2, self).__init__() + weights_sum = dice_weight + bce_weight + self.dice_weight = dice_weight / weights_sum + self.bce_weight = bce_weight / weights_sum + self.epsilon = epsilon + # + def forward(self, y_pred, y_true): + y_prob = torch.softmax(y_pred, dim=1)[:,1].view(-1) + y_pred = y_pred[:,1].view(-1) + y_true = y_true.view(-1) + bce_loss = F.binary_cross_entropy_with_logits(y_pred, y_true) + dice_loss = 1. - (2. * torch.sum(y_true * y_prob) + self.epsilon) / (torch.sum(y_true ** 2.) + torch.sum(y_prob ** 2.) + self.epsilon) + return self.dice_weight * dice_loss + self.bce_weight * bce_loss + +class SoftDiceLovasz(nn.Module): + # + def __init__(self, dice_wt=0.5, lovasz_wt=0.5): + super(SoftDiceLovasz, self).__init__() + self.dice_wt = dice_wt + self.lovasz_wt = lovasz_wt + # + def forward(self, y_pred, y_true, only_present=False, per_image=False, ignore=None): + y_pred = torch.softmax(y_pred, dim=1) + dice = 1. - 2. * torch.sum(y_true * y_pred[:,1])/ (torch.sum(y_true ** 2.) + torch.sum(y_pred[:,1] ** 2.) + 1e-7) + lovasz = lovasz_softmax(y_pred, y_true, only_present, per_image, ignore) + return self.dice_wt * dice + self.lovasz_wt * lovasz + +class BCELoss(nn.Module): + # + def __init__(self, pos_weight=1., neg_weight=1.): + super(BCELoss, self).__init__() + self.pos_weight = pos_weight + self.neg_weight = neg_weight + # + def forward(self, y_pred, y_true, reduction='mean'): + try: + y_pred = y_pred[:,1].view(-1) + except RuntimeError: + y_pred = y_pred[:,1].contiguous().view(-1) + y_true = y_true.view(-1) + assert(y_pred.shape==y_true.shape) + loss = F.binary_cross_entropy_with_logits(y_pred, y_true, reduction='none') + pos = (y_true>0.5).float() + neg = (y_true<0.5).float() + loss = (self.pos_weight * pos * loss) + (self.neg_weight * neg * loss) + return loss.mean() + +class WeightedBCE(nn.Module): + # From Heng + def __init__(self, pos_frac=0.25, neg_frac=0.75): + super(WeightedBCE, self).__init__() + self.pos_frac = pos_frac + self.neg_frac = neg_frac + # + def forward(self, y_pred, y_true, reduction='mean'): + try: + y_pred = y_pred[:,1].view(-1) + except RuntimeError: + y_pred = y_pred[:,1].contiguous().view(-1) + y_true = y_true.view(-1) + assert(y_pred.shape==y_true.shape) + # + loss = F.binary_cross_entropy_with_logits(y_pred, y_true, reduction='none') + # + pos = (y_true>0.5).float() + neg = (y_true<0.5).float() + pos_weight = pos.sum().item() + 1e-12 + neg_weight = neg.sum().item() + 1e-12 + loss = (self.pos_frac*pos*loss/pos_weight + self.neg_frac*neg*loss/neg_weight).sum() + # + return loss + +class WeightedBCEv2(nn.Module): + def __init__(self): + super(WeightedBCEv2, self).__init__() + # + def forward(self, y_pred, y_true, reduction='mean'): + y_pred = y_pred[:,1].view(-1) + y_true = y_true.view(-1) + assert(y_pred.shape==y_true.shape) + + loss = F.binary_cross_entropy_with_logits(y_pred, y_true, reduction='none') + + pos = (y_true>0.5).float() + neg = (y_true<0.5).float() + pos_weight = (pos.sum().item() + 1) / len(y_true) + neg_weight = (neg.sum().item() + 1) / len(y_true) + pos_weight = 1 / pos_weight + neg_weight = 1 / neg_weight + pos_weight = np.log(pos_weight) + 1 + neg_weight = np.log(neg_weight) + 1 + pos_weight = pos_weight / (pos_weight + neg_weight) + neg_weight = neg_weight / (pos_weight + neg_weight) + loss = (pos*loss*pos_weight + neg*loss*neg_weight).mean() + return loss + +# class FocalLoss(nn.Module): +# # +# def __init__(self, alpha=0.25, gamma=2): +# super(FocalLoss, self).__init__() +# self.alpha = alpha +# self.gamma = gamma +# # +# def focal_loss_with_logits(y_pred, y_true): + +# def forward(self, y_pred, y_true): +# y_pred = y_pred[:,1].view(-1) +# y_true = y_true.view(-1) +# assert(y_pred.shape==y_true.shape) + +# bce_loss = F.binary_cross_entropy_with_logits(y_pred, y_true, reduction='none') +# print(bce_loss) +# pt = torch.exp(-bce_loss) +# print(pt) +# focal_loss = self.alpha * (1-pt) ** self.gamma * bce_loss +# print(focal_loss) +# return focal_loss.sum() + +class FocalLoss(nn.Module): + def __init__(self, gamma=1): + super().__init__() + self.gamma = gamma + + def forward(self, y_pred, y_true): + # Inspired by the implementation of binary_cross_entropy_with_logits + y_pred = y_pred[:,1].contiguous().view(-1) + y_true = y_true.contiguous().view(-1) + if not (y_true.size() == y_pred.size()): + raise ValueError("Target size ({}) must be the same as y_pred size ({})".format(y_true.size(), y_pred.size())) + + max_val = (-y_pred).clamp(min=0) + loss = y_pred - y_pred * y_true + max_val + ((-max_val).exp() + (-y_pred - max_val).exp()).log() + + # This formula gives us the log sigmoid of 1-p if y is 0 and of p if y is 1 + invprobs = F.logsigmoid(-y_pred * (y_true * 2 - 1)) + loss = (invprobs * self.gamma).exp() * loss + + return loss.mean() + +class FocalLoss2d(nn.Module): + + def __init__(self, gamma=2, size_average=False): + super(FocalLoss2d, self).__init__() + self.gamma = gamma + self.size_average = size_average + + + def forward(self, logit, target, class_weight=None, type='softmax'): + target = target.view(-1, 1).long() + + + if type=='sigmoid': + if class_weight is None: + class_weight = [1]*2 #[0.5, 0.5] + + prob = F.sigmoid(logit) + prob = prob.view(-1, 1) + prob = torch.cat((1-prob, prob), 1) + select = torch.FloatTensor(len(prob), 2).zero_().cuda() + select.scatter_(1, target, 1.) + + elif type=='softmax': + B,C,H,W = logit.size() + if class_weight is None: + class_weight =[1]*C #[1/C]*C + + logit = logit.permute(0, 2, 3, 1).contiguous().view(-1, C) + prob = F.softmax(logit,1) + select = torch.FloatTensor(len(prob), C).zero_().cuda() + select.scatter_(1, target, 1.) + + class_weight = torch.FloatTensor(class_weight).cuda().view(-1,1) + class_weight = torch.gather(class_weight, 0, target) + + prob = (prob*select).sum(1).view(-1,1) + prob = torch.clamp(prob,1e-8,1-1e-8) + batch_loss = - class_weight *(torch.pow((1-prob), self.gamma))*prob.log() + + if self.size_average: + loss = batch_loss.mean() + else: + loss = batch_loss + + return loss diff --git a/segment/model/backbones_deeplab.py b/segment/model/backbones_deeplab.py new file mode 100644 index 0000000..23bd5dc --- /dev/null +++ b/segment/model/backbones_deeplab.py @@ -0,0 +1,213 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .resnext import ResNeXt, ResNet + +class SpatialAttention2d(nn.Module): + def __init__(self, channel, conv_layer): + super(SpatialAttention2d, self).__init__() + self.squeeze = nn.Conv2d(channel, 1, kernel_size=1, bias=False) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + z = self.squeeze(x) + z = self.sigmoid(z) + return x * z + +class GAB(nn.Module): + def __init__(self, input_dim, conv_layer, reduction=4): + super(GAB, self).__init__() + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.conv1 = nn.Conv2d(input_dim, input_dim // reduction, kernel_size=1, stride=1) + self.conv2 = nn.Conv2d(input_dim // reduction, input_dim, kernel_size=1, stride=1) + self.relu = nn.ReLU(inplace=True) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + z = self.global_avgpool(x) + z = self.relu(self.conv1(z)) + z = self.sigmoid(self.conv2(z)) + return x * z + +class scSE(nn.Module): + def __init__(self, dim, conv_layer, reduction=4): + super(scSE, self).__init__() + self.satt = SpatialAttention2d(dim, conv_layer) + self.catt = GAB(dim, conv_layer, reduction) + + def forward(self, x): + return self.satt(x) + self.catt(x) + +def resnet50_gn_ws(output_stride=16, use_scse=True): + if output_stride == 16: + strides = (1, 2, 2, 1) + dilations = (1, 1, 1, 2) + elif output_stride == 8: + strides = (1, 2, 1, 1) + dilations = (1, 1, 2, 4) + backbone = dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + mg_rates=(1, 2, 4), + strides=strides, + dilations=dilations, + frozen_stages=0, + style='pytorch', + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), + conv_cfg=dict(type='ConvWS') + ) + # + model = backbone.pop('type') + model = eval(model)(**backbone) + model.init_weights('open-mmlab://jhu/resnet50_gn_ws') + model.input_range = [0, 255] + model.mean = [102.9801, 115.9465, 122.7717] + model.std = [1.0, 1.0, 1.0] + # + low_level = nn.Sequential(model.conv1, model.gn1, model.relu, model.maxpool, model.layer1) + if use_scse: + encoder = nn.Sequential(low_level, + scSE(256, nn.Conv2d), + model.layer2, + scSE(512, nn.Conv2d), + model.layer3, + scSE(1024, nn.Conv2d), + model.layer4, + scSE(2048, nn.Conv2d)) + else: + encoder = model + return (encoder, low_level), [256, 2048], model + +def resnet101_gn_ws(output_stride=16, use_scse=True): + if output_stride == 16: + strides = (1, 2, 2, 1) + dilations = (1, 1, 1, 2) + elif output_stride == 8: + strides = (1, 2, 1, 1) + dilations = (1, 1, 2, 4) + backbone = dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + mg_rates=(1, 2, 4), + strides=strides, + dilations=dilations, + frozen_stages=0, + style='pytorch', + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), + conv_cfg=dict(type='ConvWS') + ) + # + model = backbone.pop('type') + model = eval(model)(**backbone) + model.init_weights('open-mmlab://jhu/resnet101_gn_ws') + model.input_range = [0, 255] + model.mean = [123.675, 116.28, 103.53] + model.std = [58.395, 57.12, 57.375] + # + low_level = nn.Sequential(model.conv1, model.gn1, model.relu, model.maxpool, model.layer1) + if use_scse: + encoder = nn.Sequential(low_level, + scSE(256, nn.Conv2d), + model.layer2, + scSE(512, nn.Conv2d), + model.layer3, + scSE(1024, nn.Conv2d), + model.layer4, + scSE(2048, nn.Conv2d)) + else: + encoder = model + return (encoder, low_level), [256, 2048], model + + +def resnext50_gn_ws(output_stride=16, use_scse=True): + if output_stride == 16: + strides = (1, 2, 2, 1) + dilations = (1, 1, 1, 2) + elif output_stride == 8: + strides = (1, 2, 1, 1) + dilations = (1, 1, 2, 4) + backbone = dict( + type='ResNeXt', + depth=50, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + mg_rates=(1, 2, 4), + strides=strides, + dilations=dilations, + frozen_stages=0, + style='pytorch', + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), + conv_cfg=dict(type='ConvWS') + ) + # + model = backbone.pop('type') + model = eval(model)(**backbone) + model.init_weights('open-mmlab://jhu/resnext50_32x4d_gn_ws') + model.input_range = [0, 255] + model.mean = [123.675, 116.28, 103.53] + model.std = [58.395, 57.12, 57.375] + # + low_level = nn.Sequential(model.conv1, model.gn1, model.relu, model.maxpool, model.layer1) + if use_scse: + encoder = nn.Sequential(low_level, + scSE(256, nn.Conv2d), + model.layer2, + scSE(512, nn.Conv2d), + model.layer3, + scSE(1024, nn.Conv2d), + model.layer4, + scSE(2048, nn.Conv2d)) + else: + encoder = model + return (encoder, low_level), [256, 2048], model + +def resnext101_gn_ws(output_stride=16, use_scse=True): + if output_stride == 16: + strides = (1, 2, 2, 1) + dilations = (1, 1, 1, 2) + elif output_stride == 8: + strides = (1, 2, 1, 1) + dilations = (1, 1, 2, 4) + backbone = dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + mg_rates=(1, 2, 4), + strides=strides, + dilations=dilations, + frozen_stages=0, + style='pytorch', + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), + conv_cfg=dict(type='ConvWS') + ) + # + model = backbone.pop('type') + model = eval(model)(**backbone) + model.init_weights('open-mmlab://jhu/resnext101_32x4d_gn_ws') + model.input_range = [0, 255] + model.mean = [123.675, 116.28, 103.53] + model.std = [58.395, 57.12, 57.375] + # + low_level = nn.Sequential(model.conv1, model.gn1, model.relu, model.maxpool, model.layer1) + if use_scse: + encoder = nn.Sequential(low_level, + scSE(256, nn.Conv2d), + model.layer2, + scSE(512, nn.Conv2d), + model.layer3, + scSE(1024, nn.Conv2d), + model.layer4, + scSE(2048, nn.Conv2d)) + else: + encoder = model + return (encoder, low_level), [256, 2048], model \ No newline at end of file diff --git a/segment/model/backbones_deeplab_jpu.py b/segment/model/backbones_deeplab_jpu.py new file mode 100644 index 0000000..3b9680e --- /dev/null +++ b/segment/model/backbones_deeplab_jpu.py @@ -0,0 +1,662 @@ +from .resnext import ResNeXt, ResNet +from mmdet.models.backbones import HRNet + +import pretrainedmodels +import pretrainedmodels.utils +import torch + +from torch import nn +from torch.nn import functional as F + +class SpatialAttention2d(nn.Module): + def __init__(self, channel): + super(SpatialAttention2d, self).__init__() + self.squeeze = nn.Conv2d(channel, 1, kernel_size=1, bias=False) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + z = self.squeeze(x) + z = self.sigmoid(z) + return x * z + +class GAB(nn.Module): + def __init__(self, input_dim, reduction=4): + super(GAB, self).__init__() + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.conv1 = nn.Conv2d(input_dim, input_dim // reduction, kernel_size=1, stride=1) + self.conv2 = nn.Conv2d(input_dim // reduction, input_dim, kernel_size=1, stride=1) + self.relu = nn.ReLU(inplace=True) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + z = self.global_avgpool(x) + z = self.relu(self.conv1(z)) + z = self.sigmoid(self.conv2(z)) + return x * z + +class scSE(nn.Module): + def __init__(self, dim, reduction=4): + super(scSE, self).__init__() + self.satt = SpatialAttention2d(dim) + self.catt = GAB(dim, reduction) + + def forward(self, x): + return self.satt(x) + self.catt(x) + +strides_and_dilations = { + 16 : { + 'strides': (1, 2, 2, 1), + 'dilations': (1, 1, 1, 2), + 'mg_rates': (1, 2, 4) + }, + 8 : { + 'strides': (1, 2, 1, 1), + 'dilations': (1, 1, 2, 4), + 'mg_rates': (1, 2, 4) + }, + 'jpu': { + 'strides': (1, 2, 2, 2), + 'dilations': (1, 1, 1, 1), + 'mg_rates': (1, 1, 1) + } +} + +def build_mmdet_res_model(output_stride, jpu, + use_scse, use_maxpool, + backbone, pretrained, preprocessing, + encoder_channels, group_norm): + model = backbone.pop('type') + model = eval(model)(**backbone) + model.init_weights(pretrained) + model.input_range = preprocessing['input_range'] + model.mean = preprocessing['mean'] + model.std = preprocessing['std'] + if group_norm: + low_level_list = [model.conv1, model.gn1, model.relu] + else: + low_level_list = [model.conv1, model.bn1, model.relu] + if use_maxpool: + low_level_list.append(model.maxpool) + low_level_list.append(model.layer1) + if use_scse: + low_level_list.append(scSE(encoder_channels[0])) + low_level = nn.Sequential(*low_level_list) + if jpu: + # If using JPU, need to return a list of encoder blocks + # so that final 3 encoder feature maps can be returned + if use_scse: + encoder = [nn.Sequential(model.layer2, scSE(encoder_channels[1])), + nn.Sequential(model.layer3, scSE(encoder_channels[2])), + nn.Sequential(model.layer4, scSE(encoder_channels[3]))] + else: + encoder = [model.layer2, model.layer3, model.layer4] + else: + if use_scse: + encoder = nn.Sequential(model.layer2, scSE(encoder_channels[1]), + model.layer3, scSE(encoder_channels[2]), + model.layer4, scSE(encoder_channels[3])) + else: + encoder = nn.Sequential(model.layer2, + model.layer3, + model.layer4) + return (encoder, low_level), encoder_channels, model + +########################## +# GROUPNORM + WEIGHT STD # +########################## + +def resnet50_gn_ws(output_stride=16, + jpu=True, + use_scse=True, + use_maxpool=True): + os = output_stride if not jpu else 'jpu' + backbone = dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + mg_rates=strides_and_dilations[os]['mg_rates'], + strides=strides_and_dilations[os]['strides'], + dilations=strides_and_dilations[os]['dilations'], + frozen_stages=0, + style='pytorch', + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), + conv_cfg=dict(type='ConvWS') + ) + encoder_channels = [256, 512, 1024, 2048] + preprocessing = {'input_range': [0,255], 'mean': [123.675,116.28,103.53], 'std': [58.395,57.12,57.375]} + enc, ch, mod = build_mmdet_res_model( + output_stride, jpu, use_scse, use_maxpool, + backbone, 'open-mmlab://jhu/resnet50_gn_ws', + preprocessing, encoder_channels, group_norm=True + ) + return (enc[0], enc[1]), ch, mod + +def resnet101_gn_ws(output_stride=16, + jpu=True, + use_scse=True, + use_maxpool=True): + os = output_stride if not jpu else 'jpu' + backbone = dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + mg_rates=strides_and_dilations[os]['mg_rates'], + strides=strides_and_dilations[os]['strides'], + dilations=strides_and_dilations[os]['dilations'], + frozen_stages=0, + style='pytorch', + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), + conv_cfg=dict(type='ConvWS') + ) + encoder_channels = [256, 512, 1024, 2048] + preprocessing = {'input_range': [0,255], 'mean': [123.675,116.28,103.53], 'std': [58.395,57.12,57.375]} + enc, ch, mod = build_mmdet_res_model( + output_stride, jpu, use_scse, use_maxpool, + backbone, 'open-mmlab://jhu/resnet101_gn_ws', + preprocessing, encoder_channels, group_norm=True + ) + return (enc[0], enc[1]), ch, mod + +def resnext50_gn_ws(output_stride=16, + jpu=True, + use_scse=True, + use_maxpool=True): + os = output_stride if not jpu else 'jpu' + backbone = dict( + type='ResNeXt', + depth=50, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + mg_rates=strides_and_dilations[os]['mg_rates'], + strides=strides_and_dilations[os]['strides'], + dilations=strides_and_dilations[os]['dilations'], + frozen_stages=0, + style='pytorch', + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), + conv_cfg=dict(type='ConvWS') + ) + encoder_channels = [256, 512, 1024, 2048] + preprocessing = {'input_range': [0,255], 'mean': [123.675,116.28,103.53], 'std': [58.395,57.12,57.375]} + enc, ch, mod = build_mmdet_res_model( + output_stride, jpu, use_scse, use_maxpool, + backbone, 'open-mmlab://jhu/resnext50_32x4d_gn_ws', + preprocessing, encoder_channels, group_norm=True + ) + return (enc[0], enc[1]), ch, mod + +def resnext101_gn_ws(output_stride=16, + jpu=True, + use_scse=True, + use_maxpool=True): + os = output_stride if not jpu else 'jpu' + backbone = dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + mg_rates=strides_and_dilations[os]['mg_rates'], + strides=strides_and_dilations[os]['strides'], + dilations=strides_and_dilations[os]['dilations'], + frozen_stages=0, + style='pytorch', + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), + conv_cfg=dict(type='ConvWS') + ) + encoder_channels = [256, 512, 1024, 2048] + preprocessing = {'input_range': [0,255], 'mean': [123.675,116.28,103.53], 'std': [58.395,57.12,57.375]} + enc, ch, mod = build_mmdet_res_model( + output_stride, jpu, use_scse, use_maxpool, + backbone, 'open-mmlab://jhu/resnext101_32x4d_gn_ws', + preprocessing, encoder_channels, group_norm=True + ) + return (enc[0], enc[1]), ch, mod + +############# +# GROUPNORM # +############# + +def resnet50_gn(output_stride=16, + jpu=True, + use_scse=True, + use_maxpool=True): + os = output_stride if not jpu else 'jpu' + backbone = dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + mg_rates=strides_and_dilations[os]['mg_rates'], + strides=strides_and_dilations[os]['strides'], + dilations=strides_and_dilations[os]['dilations'], + frozen_stages=0, + style='pytorch', + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True) + ) + encoder_channels = [256, 512, 1024, 2048] + preprocessing = {'input_range': [0,255], 'mean': [102.9801,115.9465,122.7717], 'std': [1.0,1.0,1.0]} + enc, ch, mod = build_mmdet_res_model( + output_stride, jpu, use_scse, use_maxpool, + backbone, 'open-mmlab://detectron/resnet50_gn', + preprocessing, encoder_channels, group_norm=True + ) + return (enc[0], enc[1]), ch, mod + +def resnet101_gn(output_stride=16, + jpu=True, + use_scse=True, + use_maxpool=True): + os = output_stride if not jpu else 'jpu' + backbone = dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + mg_rates=strides_and_dilations[os]['mg_rates'], + strides=strides_and_dilations[os]['strides'], + dilations=strides_and_dilations[os]['dilations'], + frozen_stages=0, + style='pytorch', + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True) + ) + encoder_channels = [256, 512, 1024, 2048] + preprocessing = {'input_range': [0,255], 'mean': [102.9801,115.9465,122.7717], 'std': [1.0,1.0,1.0]} + enc, ch, mod = build_mmdet_res_model( + output_stride, jpu, use_scse, use_maxpool, + backbone, 'open-mmlab://detectron/resnet101_gn', + preprocessing, encoder_channels, group_norm=True + ) + return (enc[0], enc[1]), ch, mod + +def resnext50_gn(output_stride=16, + jpu=True, + use_scse=True, + use_maxpool=True): + os = output_stride if not jpu else 'jpu' + backbone = dict( + type='ResNeXt', + depth=50, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + mg_rates=strides_and_dilations[os]['mg_rates'], + strides=strides_and_dilations[os]['strides'], + dilations=strides_and_dilations[os]['dilations'], + frozen_stages=0, + style='pytorch', + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True) + ) + encoder_channels = [256, 512, 1024, 2048] + preprocessing = {'input_range': [0,255], 'mean': [123.675,116.28,103.53], 'std': [58.395,57.12,57.375]} + enc, ch, mod = build_mmdet_res_model( + output_stride, jpu, use_scse, use_maxpool, + backbone, 'open-mmlab://jhu/resnext50_32x4d_gn', + preprocessing, encoder_channels, group_norm=True + ) + return (enc[0], enc[1]), ch, mod + +def resnext101_gn(output_stride=16, + jpu=True, + use_scse=True, + use_maxpool=True): + os = output_stride if not jpu else 'jpu' + backbone = dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + mg_rates=strides_and_dilations[os]['mg_rates'], + strides=strides_and_dilations[os]['strides'], + dilations=strides_and_dilations[os]['dilations'], + frozen_stages=0, + style='pytorch', + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True) + ) + preprocessing = {'input_range': [0,255], 'mean': [123.675,116.28,103.53], 'std': [58.395,57.12,57.375]} + encoder_channels = [256, 512, 1024, 2048] + enc, ch, mod = build_mmdet_res_model( + output_stride, jpu, use_scse, use_maxpool, + backbone, 'open-mmlab://jhu/resnext101_32x4d_gn', + preprocessing, encoder_channels, group_norm=True + ) + return (enc[0], enc[1]), ch, mod + +def dcn_resnext101_gn(output_stride=16, + jpu=True, + use_scse=True, + use_maxpool=True): + os = output_stride if not jpu else 'jpu' + backbone = dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + mg_rates=strides_and_dilations[os]['mg_rates'], + strides=strides_and_dilations[os]['strides'], + dilations=strides_and_dilations[os]['dilations'], + stage_with_dcn=(False, True, True, True), + dcn=dict(modulated=True, groups=32, deformable_groups=1, fallback_on_stride=False), + frozen_stages=0, + style='pytorch', + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True) + ) + encoder_channels = [256, 512, 1024, 2048] + preprocessing = {'input_range': [0,255], 'mean': [123.675,116.28,103.53], 'std': [58.395,57.12,57.375]} + enc, ch, mod = build_mmdet_res_model( + output_stride, jpu, use_scse, use_maxpool, + backbone, 'open-mmlab://jhu/resnext101_32x4d_gn', + preprocessing, encoder_channels, group_norm=True + ) + return (enc[0], enc[1]), ch, mod + + +############# +# BATCHNORM # +############# +# BatchNorm is NOT frozen +# i.e., requires_grad + train mode + +def resnext101_64x4d(output_stride=16, + jpu=True, + use_scse=True, + use_maxpool=True): + os = output_stride if not jpu else 'jpu' + backbone = dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + mg_rates=strides_and_dilations[os]['mg_rates'], + strides=strides_and_dilations[os]['strides'], + dilations=strides_and_dilations[os]['dilations'], + frozen_stages=0, + style='pytorch', + norm_eval=False + ) + encoder_channels = [256, 512, 1024, 2048] + preprocessing = {'input_range': [0,255], 'mean': [123.675,116.28,103.53], 'std': [58.395,57.12,57.375]} + enc, ch, mod = build_mmdet_res_model( + output_stride, jpu, use_scse, use_maxpool, + backbone, 'open-mmlab://resnext101_64x4d', + preprocessing, encoder_channels, group_norm=False + ) + return (enc[0], enc[1]), ch, mod + +def resnext101_32x4d(output_stride=16, + jpu=True, + use_scse=True, + use_maxpool=True): + os = output_stride if not jpu else 'jpu' + backbone = dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + mg_rates=strides_and_dilations[os]['mg_rates'], + strides=strides_and_dilations[os]['strides'], + dilations=strides_and_dilations[os]['dilations'], + frozen_stages=0, + style='pytorch', + norm_eval=False + ) + encoder_channels = [256, 512, 1024, 2048] + preprocessing = {'input_range': [0,255], 'mean': [123.675,116.28,103.53], 'std': [58.395,57.12,57.375]} + enc, ch, mod = build_mmdet_res_model( + output_stride, jpu, use_scse, use_maxpool, + backbone, 'open-mmlab://resnext101_32x4d', + preprocessing, encoder_channels, group_norm=False + ) + return (enc[0], enc[1]), ch, mod + +# TODO: keeps throwing an error +def dcn_resnext101_32x4d(output_stride=16, + jpu=True, + use_scse=True, + use_maxpool=True): + os = output_stride if not jpu else 'jpu' + backbone = dict( + type='ResNeXt', + depth=101, + groups=32, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + mg_rates=strides_and_dilations[os]['mg_rates'], + strides=strides_and_dilations[os]['strides'], + dilations=strides_and_dilations[os]['dilations'], + stage_with_dcn=(False, True, True, True), + dcn=dict(modulated=True, groups=32, deformable_groups=1, fallback_on_stride=False), + frozen_stages=0, + style='pytorch', + norm_eval=False + ) + encoder_channels = [256, 512, 1024, 2048] + preprocessing = {'input_range': [0,255], 'mean': [123.675,116.28,103.53], 'std': [58.395,57.12,57.375]} + enc, ch, mod = build_mmdet_res_model( + output_stride, jpu, use_scse, use_maxpool, + backbone, 'open-mmlab://resnext101_32x4d', + preprocessing, encoder_channels, group_norm=False + ) + return (enc[0], enc[1]), ch, mod + +######### +# HRNET # +######### + +# HRNet will not work with JPU +# HRNet does not have dilations + +class HREncoder(nn.Module): + def __init__(self, stages, stage_cfgs, transitions): + super(HREncoder, self).__init__() + self.stage2 = stages[0] + self.stage3 = stages[1] + self.stage4 = stages[2] + self.stage2_cfg = stage_cfgs[0] + self.stage3_cfg = stage_cfgs[1] + self.stage4_cfg = stage_cfgs[2] + self.transition1 = transitions[0] + self.transition2 = transitions[1] + self.transition3 = transitions[2] + def forward(self, x): + x_list = [] + for i in range(self.stage2_cfg['num_branches']): + if self.transition1[i] is not None: + x_list.append(self.transition1[i](x)) + else: + x_list.append(x) + y_list = self.stage2(x_list) + x_list = [] + for i in range(self.stage3_cfg['num_branches']): + if self.transition2[i] is not None: + if i < self.stage2_cfg['num_branches']: + x_list.append(self.transition2[i](y_list[i])) + else: + x_list.append(self.transition2[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage3(x_list) + x_list = [] + for i in range(self.stage4_cfg['num_branches']): + if self.transition3[i] is not None: + if i < self.stage3_cfg['num_branches']: + x_list.append(self.transition3[i](y_list[i])) + else: + x_list.append(self.transition3[i](y_list[-1])) + else: + x_list.append(y_list[i]) + x = self.stage4(x_list) + # Upsampling + x0_h, x0_w = x[0].size(2), x[0].size(3) + x1 = F.interpolate(x[1], size=(x0_h, x0_w), mode='bilinear') + x2 = F.interpolate(x[2], size=(x0_h, x0_w), mode='bilinear') + x3 = F.interpolate(x[3], size=(x0_h, x0_w), mode='bilinear') + x = torch.cat([x[0], x1, x2, x3], 1) + return x + +def hrnetv2_w18(output_stride=None, + jpu=None, + use_scse=True, + use_maxpool=None): + backbone=dict( + type='HRNet', + norm_eval=False, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4,), + num_channels=(64,)), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144)))) + encoder_channels = [256, None, None, 270] + preprocessing = {'input_range': [0,255], 'mean': [123.675,116.28,103.53], 'std': [58.395,57.12,57.375]} + model = backbone.pop('type') + model = eval(model)(**backbone) + model.init_weights('open-mmlab://msra/hrnetv2_w18') + model.input_range = preprocessing['input_range'] + model.mean = preprocessing['mean'] + model.std = preprocessing['std'] + low_level_list = [model.conv1, model.norm1, model.relu, model.conv2, model.norm2, model.relu, model.layer1] + if use_scse: + low_level_list.append(scSE(encoder_channels[0])) + low_level = nn.Sequential(*low_level_list) + encoder = HREncoder(stages=[model.stage2,model.stage3,model.stage4], + stage_cfgs=[model.stage2_cfg,model.stage3_cfg,model.stage4_cfg], + transitions=[model.transition1,model.transition2,model.transition3]) + if use_scse: + encoder = nn.Sequential(encoder, scSE(encoder_channels[3])) + return (encoder, low_level), encoder_channels, model + +def hrnetv2_w32(output_stride=None, + jpu=None, + use_scse=True, + use_maxpool=None): + backbone=dict( + type='HRNet', + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(32, 64, 128, 256)))) + encoder_channels = [256, None, None, 480] + preprocessing = {'input_range': [0,255], 'mean': [123.675,116.28,103.53], 'std': [58.395,57.12,57.375]} + model = backbone.pop('type') + model = eval(model)(**backbone) + model.init_weights('open-mmlab://msra/hrnetv2_w32') + model.input_range = preprocessing['input_range'] + model.mean = preprocessing['mean'] + model.std = preprocessing['std'] + low_level_list = [model.conv1, model.norm1, model.relu, model.conv2, model.norm2, model.relu, model.layer1] + if use_scse: + low_level_list.append(scSE(encoder_channels[0])) + low_level = nn.Sequential(*low_level_list) + encoder = HREncoder(stages=[model.stage2,model.stage3,model.stage4], + stage_cfgs=[model.stage2_cfg,model.stage3_cfg,model.stage4_cfg], + transitions=[model.transition1,model.transition2,model.transition3]) + if use_scse: + encoder = nn.Sequential(encoder, scSE(encoder_channels[3])) + return (encoder, low_level), encoder_channels, model + + +def hrnetv2_w40(output_stride=None, + jpu=None, + use_scse=True, + use_maxpool=None): + backbone=dict( + type='HRNet', + norm_eval=False, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4,), + num_channels=(64,)), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(40, 80)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(40, 80, 160)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(40, 80, 160, 320)))) + encoder_channels = [256, None, None, 600] + preprocessing = {'input_range': [0,255], 'mean': [123.675,116.28,103.53], 'std': [58.395,57.12,57.375]} + model = backbone.pop('type') + model = eval(model)(**backbone) + model.init_weights('open-mmlab://msra/hrnetv2_w40') + model.input_range = preprocessing['input_range'] + model.mean = preprocessing['mean'] + model.std = preprocessing['std'] + low_level_list = [model.conv1, model.norm1, model.relu, model.conv2, model.norm2, model.relu, model.layer1] + if use_scse: + low_level_list.append(scSE(encoder_channels[0])) + low_level = nn.Sequential(*low_level_list) + encoder = HREncoder(stages=[model.stage2,model.stage3,model.stage4], + stage_cfgs=[model.stage2_cfg,model.stage3_cfg,model.stage4_cfg], + transitions=[model.transition1,model.transition2,model.transition3]) + if use_scse: + encoder = nn.Sequential(encoder, scSE(encoder_channels[3])) + return (encoder, low_level), encoder_channels, model diff --git a/segment/model/deeplab.py b/segment/model/deeplab.py new file mode 100644 index 0000000..ebca9b4 --- /dev/null +++ b/segment/model/deeplab.py @@ -0,0 +1,171 @@ +# From https://github.com/jfzhang95/pytorch-deeplab-xception + +import peepdom.backbones_deeplab as backbones + +import torch + +from torch import nn +from torch.nn import functional as F + +from torch.nn.modules.batchnorm import _BatchNorm + +class GroupNorm32(nn.GroupNorm): + def __init__(self, num_channels): + super(GroupNorm32, self).__init__(num_channels=num_channels, num_groups=32) + +class _ASPPModule(nn.Module): + def __init__(self, inplanes, planes, kernel_size, padding, dilation, norm_layer): + super(_ASPPModule, self).__init__() + self.norm = norm_layer(planes) + self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, + stride=1, padding=padding, dilation=dilation, bias=False) + self.elu = nn.ELU(True) + + def forward(self, x): + x = self.atrous_conv(x) + x = self.norm(x) + return self.elu(x) + +class ASPP(nn.Module): + def __init__(self, dilations, inplanes, planes, norm_layer, dropout=0.5): + super(ASPP, self).__init__() + + self.aspp1 = _ASPPModule(inplanes, planes, 1, padding=0, dilation=dilations[0], norm_layer=norm_layer) + self.aspp2 = _ASPPModule(inplanes, planes, 3, padding=dilations[1], dilation=dilations[1], norm_layer=norm_layer) + self.aspp3 = _ASPPModule(inplanes, planes, 3, padding=dilations[2], dilation=dilations[2], norm_layer=norm_layer) + self.aspp4 = _ASPPModule(inplanes, planes, 3, padding=dilations[3], dilation=dilations[3], norm_layer=norm_layer) + + self.norm1 = norm_layer(planes) + self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), + nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False), + norm_layer(planes), + nn.ELU(True)) + self.conv1 = nn.Conv2d(5 * planes, planes, 1, bias=False) + self.elu = nn.ELU(True) + self.dropout = nn.Dropout2d(dropout) + + def forward(self, x): + x1 = self.aspp1(x) + x2 = self.aspp2(x) + x3 = self.aspp3(x) + x4 = self.aspp4(x) + x5 = self.global_avg_pool(x) + x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True) + x = torch.cat((x1, x2, x3, x4, x5), dim=1) + + x = self.conv1(x) + x = self.norm1(x) + x = self.elu(x) + + return self.dropout(x) + +class Decoder(nn.Module): + def __init__(self, num_classes, spp_inplanes, low_level_inplanes, inplanes, dropout, norm_layer): + super(Decoder, self).__init__() + + self.conv1 = nn.Conv2d(low_level_inplanes, inplanes, 1, bias=False) + self.norm1 = norm_layer(inplanes) + + self.elu = nn.ELU(True) + self.last_conv = nn.Sequential(nn.Conv2d(spp_inplanes + inplanes, spp_inplanes, kernel_size=3, stride=1, padding=1, bias=False), + norm_layer(spp_inplanes), + nn.ELU(True), + nn.Dropout2d(dropout[0]), + nn.Conv2d(spp_inplanes, spp_inplanes, kernel_size=3, stride=1, padding=1, bias=False), + norm_layer(spp_inplanes), + nn.ELU(True), + nn.Dropout2d(dropout[1]), + nn.Conv2d(spp_inplanes, num_classes, kernel_size=1, stride=1)) + + def forward(self, x, low_level_feat, classifier=False): + low_level_feat = self.conv1(low_level_feat) + low_level_feat = self.norm1(low_level_feat) + low_level_feat = self.elu(low_level_feat) + x = F.interpolate(x, size=low_level_feat.size()[2:], mode='bilinear', align_corners=False) + x = torch.cat((x, low_level_feat), dim=1) + decoder_output = x + x = self.last_conv(x) + if classifier: + return x, decoder_output + else: + return x + + +class DeepLab(nn.Module): + def __init__(self, backbone, + output_stride=16, + group_norm=True, + classifier=False, + dropout=dict( + spp=0.5, + cls=0.2, + dc0=0.5, + dc1=0.1 + ), + num_classes=2, + norm_eval=False): + super(DeepLab, self).__init__() + + layers, channels, backbone = getattr(backbones, backbone)(output_stride=output_stride) + + self.input_range = backbone.input_range + self.mean = backbone.mean + self.std = backbone.std + + self.classifier = classifier + + # default is freeze BatchNorm + self.norm_eval = norm_eval + + norm_layer = GroupNorm32 if group_norm else nn.BatchNorm2d + + self.backbone = layers[0] + self.low_level = layers[1] + + self.aspp_planes = 256 + + if output_stride == 16: + aspp_dilations = (1, 6, 12, 18) + elif output_stride == 8: + aspp_dilations = (1, 12, 24, 36) + + self.spp = ASPP(aspp_dilations, inplanes=channels[1], planes=self.aspp_planes, dropout=dropout['spp'], norm_layer=norm_layer) + self.decoder = Decoder(num_classes, self.aspp_planes, channels[0], 64, (dropout['dc0'], dropout['dc1']), norm_layer) + self.train_mode = True + + # classifier branch + if classifier: + self.logit_image = nn.Sequential(nn.Dropout(dropout['cls']), nn.Linear(channels[1]+self.aspp_planes+64, num_classes)) + + def forward(self, x_input): + low_level_feat = self.low_level(x_input) + features = self.backbone(x_input) + + x = self.spp(features) + if self.classifier: + x, decoder_output = self.decoder(x, low_level_feat, classifier=self.classifier) + else: + x = self.decoder(x, low_level_feat, classifier=self.classifier) + out_size = x_input.size()[2:] + x = F.interpolate(x, size=out_size, mode='bilinear', align_corners=False) + + # classifier branch + if self.classifier: + features = features.mean([2, 3]) + decoder_output = decoder_output.mean([2,3]) + features = torch.cat((features, decoder_output), dim=1) + c = self.logit_image(features) + return x, c + else: + return x + + def train(self, mode=True): + super(DeepLab, self).train(mode) + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + return self + + diff --git a/segment/model/deeplab_jpu.py b/segment/model/deeplab_jpu.py new file mode 100644 index 0000000..7f32a68 --- /dev/null +++ b/segment/model/deeplab_jpu.py @@ -0,0 +1,352 @@ +# From https://github.com/jfzhang95/pytorch-deeplab-xception + +import peepdom.backbones_deeplab_jpu as backbones +try: + from model.encoding import * +except: + pass + +from torch import nn +from torch.nn import functional as F + +import torch + +# Wrapper for GroupNorm with 32 channels +class GroupNorm32(nn.GroupNorm): + def __init__(self, num_channels): + super(GroupNorm32, self).__init__(num_channels=num_channels, num_groups=32) + +######## +# ASPP # +######## + +class _ASPPModule(nn.Module): + def __init__(self, inplanes, planes, kernel_size, padding, dilation, norm_layer): + super(_ASPPModule, self).__init__() + self.norm = norm_layer(planes) + self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, + stride=1, padding=padding, dilation=dilation, bias=False) + self.elu = nn.ELU(True) + + def forward(self, x): + x = self.atrous_conv(x) + x = self.norm(x) + return self.elu(x) + +class ASPP(nn.Module): + def __init__(self, dilations, inplanes, planes, norm_layer, dropout=0.5): + super(ASPP, self).__init__() + + self.aspp1 = _ASPPModule(inplanes, planes, 1, padding=0, dilation=dilations[0], norm_layer=norm_layer) + self.aspp2 = _ASPPModule(inplanes, planes, 3, padding=dilations[1], dilation=dilations[1], norm_layer=norm_layer) + self.aspp3 = _ASPPModule(inplanes, planes, 3, padding=dilations[2], dilation=dilations[2], norm_layer=norm_layer) + self.aspp4 = _ASPPModule(inplanes, planes, 3, padding=dilations[3], dilation=dilations[3], norm_layer=norm_layer) + + self.norm1 = norm_layer(planes) + self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), + nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False), + norm_layer(planes), + nn.ELU(True)) + self.conv1 = nn.Conv2d(5 * planes, planes, 1, bias=False) + self.elu = nn.ELU(True) + self.dropout = nn.Dropout2d(dropout) + + def forward(self, x): + x1 = self.aspp1(x) + x2 = self.aspp2(x) + x3 = self.aspp3(x) + x4 = self.aspp4(x) + x5 = self.global_avg_pool(x) + x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear') + x = torch.cat((x1, x2, x3, x4, x5), dim=1) + + x = self.conv1(x) + x = self.norm1(x) + x = self.elu(x) + + return self.dropout(x) + +####### +# FPA # +####### + +# From phalanx +class FPAv2(nn.Module): + def __init__(self, input_dim, output_dim, norm_layer): + super(FPAv2, self).__init__() + self.glob = nn.Sequential(nn.AdaptiveAvgPool2d(1), + nn.Conv2d(input_dim, output_dim, kernel_size=1, bias=False)) + + self.down2_1 = nn.Sequential(nn.Conv2d(input_dim, input_dim, kernel_size=5, stride=2, padding=2, bias=False), + norm_layer(input_dim)) + self.down2_2 = nn.Sequential(nn.Conv2d(input_dim, output_dim, kernel_size=5, padding=2, bias=False), + norm_layer(output_dim)) + + self.down3_1 = nn.Sequential(nn.Conv2d(input_dim, input_dim, kernel_size=3, stride=2, padding=1, bias=False), + norm_layer(input_dim)) + self.down3_2 = nn.Sequential(nn.Conv2d(input_dim, output_dim, kernel_size=3, padding=1, bias=False), + norm_layer(output_dim)) + + self.conv1 = nn.Sequential(nn.Conv2d(input_dim, output_dim, kernel_size=1, bias=False), + norm_layer(output_dim)) + + def forward(self, x): + x_glob = self.glob(x) + x_glob = F.interpolate(x_glob, scale_factor=int(x.size()[-1] / x_glob.size()[-1]), mode='bilinear') # 256, 16, 16 + + d2 = F.elu(self.down2_1(x)) + d3 = F.elu(self.down3_1(d2)) + d2 = F.elu(self.down2_2(d2)) + d3 = F.elu(self.down3_2(d3)) + d3 = F.interpolate(d3, scale_factor=2, mode='bilinear') # 256, 8, 8 + d2 = d2 + d3 + d2 = F.interpolate(d2, scale_factor=2, mode='bilinear') # 256, 16, 16 + + x = F.elu(self.conv1(x)) + x = x * d2 + x = x + x_glob + + return x + +####### +# JPU # +####### + +# From https://github.com/wuhuikai/FastFCN/ +class SeparableConv2d(nn.Module): + def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=1, dilation=1, bias=False, BatchNorm=nn.BatchNorm2d): + super(SeparableConv2d, self).__init__() + + self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias) + self.bn = BatchNorm(inplanes) + self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias) + + def forward(self, x): + x = self.conv1(x) + x = self.bn(x) + x = self.pointwise(x) + return x + +class JPU16(nn.Module): + def __init__(self, in_channels, width=512, norm_layer=None): + super(JPU16, self).__init__() + + self.conv5 = nn.Sequential( + nn.Conv2d(in_channels[-1], width, 3, padding=1, bias=False), + norm_layer(width), + nn.ReLU(inplace=True)) + self.conv4 = nn.Sequential( + nn.Conv2d(in_channels[-2], width, 3, padding=1, bias=False), + norm_layer(width), + nn.ReLU(inplace=True)) + + self.dilation1 = nn.Sequential(SeparableConv2d(2*width, width, kernel_size=3, padding=1, dilation=1, bias=False), + norm_layer(width), + nn.ReLU(inplace=True)) + self.dilation2 = nn.Sequential(SeparableConv2d(2*width, width, kernel_size=3, padding=2, dilation=2, bias=False), + norm_layer(width), + nn.ReLU(inplace=True)) + self.dilation3 = nn.Sequential(SeparableConv2d(2*width, width, kernel_size=3, padding=4, dilation=4, bias=False), + norm_layer(width), + nn.ReLU(inplace=True)) + self.dilation4 = nn.Sequential(SeparableConv2d(2*width, width, kernel_size=3, padding=8, dilation=8, bias=False), + norm_layer(width), + nn.ReLU(inplace=True)) + + def forward(self, *inputs): + feats = [self.conv5(inputs[-1]), self.conv4(inputs[-2])] + _, _, h, w = feats[-1].size() + feats[-2] = F.interpolate(feats[-2], size=(h, w), mode='bilinear') + feat = torch.cat(feats, dim=1) + feat = torch.cat([self.dilation1(feat), self.dilation2(feat), self.dilation3(feat), self.dilation4(feat)], dim=1) + + return feat + +class JPU08(nn.Module): + def __init__(self, in_channels, width=512, norm_layer=None): + super(JPU08, self).__init__() + + self.conv5 = nn.Sequential( + nn.Conv2d(in_channels[-1], width, 3, padding=1, bias=False), + norm_layer(width), + nn.ReLU(inplace=True)) + self.conv4 = nn.Sequential( + nn.Conv2d(in_channels[-2], width, 3, padding=1, bias=False), + norm_layer(width), + nn.ReLU(inplace=True)) + self.conv3 = nn.Sequential( + nn.Conv2d(in_channels[-3], width, 3, padding=1, bias=False), + norm_layer(width), + nn.ReLU(inplace=True)) + + self.dilation1 = nn.Sequential(SeparableConv2d(3*width, width, kernel_size=3, padding=1, dilation=1, bias=False), + norm_layer(width), + nn.ReLU(inplace=True)) + self.dilation2 = nn.Sequential(SeparableConv2d(3*width, width, kernel_size=3, padding=2, dilation=2, bias=False), + norm_layer(width), + nn.ReLU(inplace=True)) + self.dilation3 = nn.Sequential(SeparableConv2d(3*width, width, kernel_size=3, padding=4, dilation=4, bias=False), + norm_layer(width), + nn.ReLU(inplace=True)) + self.dilation4 = nn.Sequential(SeparableConv2d(3*width, width, kernel_size=3, padding=8, dilation=8, bias=False), + norm_layer(width), + nn.ReLU(inplace=True)) + + def forward(self, *inputs): + feats = [self.conv5(inputs[-1]), self.conv4(inputs[-2]), self.conv3(inputs[-3])] + _, _, h, w = feats[-1].size() + feats[-2] = F.interpolate(feats[-2], size=(h, w), mode='bilinear') + feats[-3] = F.interpolate(feats[-3], size=(h, w), mode='bilinear') + feat = torch.cat(feats, dim=1) + feat = torch.cat([self.dilation1(feat), self.dilation2(feat), self.dilation3(feat), self.dilation4(feat)], dim=1) + + return feat + +####### +# PSP # +####### +# From https://github.com/Lextal/pspnet-pytorch + +class PSPModule(nn.Module): + def __init__(self, features, out_features=1024, sizes=(1, 2, 3, 6)): + super().__init__() + self.stages = [] + self.stages = nn.ModuleList([self._make_stage(features, size) for size in sizes]) + self.bottleneck = nn.Conv2d(features * (len(sizes) + 1), out_features, kernel_size=1) + self.relu = nn.ReLU() + + def _make_stage(self, features, size): + prior = nn.AdaptiveAvgPool2d(output_size=(size, size)) + conv = nn.Conv2d(features, features, kernel_size=1, bias=False) + return nn.Sequential(prior, conv) + + def forward(self, feats): + h, w = feats.size(2), feats.size(3) + priors = [F.interpolate(input=stage(feats), size=(h, w), mode='bilinear') for stage in self.stages] + [feats] + bottle = self.bottleneck(torch.cat(priors, 1)) + return self.relu(bottle) + +# Decoder for DeepLab +class Decoder(nn.Module): + def __init__(self, num_classes, spp_inplanes, low_level_inplanes, inplanes, dropout, norm_layer): + super(Decoder, self).__init__() + + self.conv1 = nn.Conv2d(low_level_inplanes, inplanes, 1, bias=False) + self.norm1 = norm_layer(inplanes) + + self.elu = nn.ELU(True) + self.last_conv = nn.Sequential(nn.Conv2d(spp_inplanes + inplanes, spp_inplanes, kernel_size=3, stride=1, padding=1, bias=False), + norm_layer(spp_inplanes), + nn.ELU(True), + nn.Dropout2d(dropout[0]), + nn.Conv2d(spp_inplanes, spp_inplanes, kernel_size=3, stride=1, padding=1, bias=False), + norm_layer(spp_inplanes), + nn.ELU(True), + nn.Dropout2d(dropout[1]), + nn.Conv2d(spp_inplanes, num_classes, kernel_size=1, stride=1)) + + def forward(self, x, low_level_feat): + low_level_feat = self.conv1(low_level_feat) + low_level_feat = self.norm1(low_level_feat) + low_level_feat = self.elu(low_level_feat) + x = F.interpolate(x, size=low_level_feat.size()[2:], mode='bilinear') + x = torch.cat((x, low_level_feat), dim=1) + decoder_output = x + x = self.last_conv(x) + return x + +class DeepLab(nn.Module): + def __init__(self, backbone, + output_stride=16, + group_norm=True, + dropout=dict( + spp=0.5, + dc0=0.5, + dc1=0.1 + ), + num_classes=2, + center='aspp', + jpu=True, + norm_eval=False, + use_maxpool=True): + super(DeepLab, self).__init__() + + layers, channels, backbone = getattr(backbones, backbone)(output_stride=output_stride, jpu=jpu, use_maxpool=use_maxpool) + + self.input_range = backbone.input_range + self.mean = backbone.mean + self.std = backbone.std + + # default is freeze BatchNorm + self.norm_eval = norm_eval + + norm_layer = GroupNorm32 if group_norm else nn.BatchNorm2d + + if jpu: + self.backbone1 = layers[0][0] + self.backbone2 = layers[0][1] + self.backbone3 = layers[0][2] + else: + self.backbone = layers[0] + self.low_level = layers[1] + self.center_type = center + self.use_jpu = jpu + + self.aspp_planes = 256 + self.output_stride = output_stride + + if output_stride == 16: + aspp_dilations = (1, 6, 12, 18) + elif output_stride == 8: + aspp_dilations = (1, 12, 24, 36) + + center_input_channels = channels[-1] + + if center == 'fpa': + self.center = FPAv2(center_input_channels, self.aspp_planes, norm_layer=norm_layer) + elif center == 'aspp': + self.center = ASPP(aspp_dilations, inplanes=center_input_channels, planes=self.aspp_planes, dropout=dropout['spp'], norm_layer=norm_layer) + elif center == 'psp': + self.center = PSPModule(center_input_channels, out_features=self.aspp_planes) + elif center == 'enc': + self.center = EncModule(center_input_channels, self.aspp_planes, norm_layer) + if jpu: + if output_stride == 16: + self.jpu = JPU16(channels[2:], norm_layer=norm_layer, width=center_input_channels // 4) + elif output_stride == 8: + self.jpu = JPU08(channels[1:], norm_layer=norm_layer, width=center_input_channels // 4) + + self.decoder = Decoder(num_classes, self.aspp_planes, channels[0], 64, (dropout['dc0'], dropout['dc1']), norm_layer) + self.train_mode = True + + def forward(self, x_input): + low_level_feat = self.low_level(x_input) + if self.use_jpu: + c2 = self.backbone1(low_level_feat) + c3 = self.backbone2(c2) + c4 = self.backbone3(c3) + else: + features = self.backbone(low_level_feat) + + if self.use_jpu: + if self.output_stride == 16: + x = self.center(self.jpu(c3, c4)) + elif self.output_stride == 8: + x = self.center(self.jpu(c2, c3, c4)) + else: + x = self.center(features) + x = self.decoder(x, low_level_feat) + out_size = x_input.size()[2:] + x = F.interpolate(x, size=out_size, mode='bilinear') + return x + + def train(self, mode=True): + super(DeepLab, self).train(mode) + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, nn.BatchNorm2d): + m.eval() + return self + + diff --git a/segment/model/resnet.py b/segment/model/resnet.py new file mode 100644 index 0000000..a1fb41a --- /dev/null +++ b/segment/model/resnet.py @@ -0,0 +1,603 @@ +import logging + +import torch.nn as nn +import torch.utils.checkpoint as cp +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcv.cnn import constant_init, kaiming_init +from mmcv.runner import load_checkpoint + +from mmdet.ops import DeformConv, ModulatedDeformConv, ContextBlock +from mmdet.models.plugins import GeneralizedAttention + +from mmdet.models.utils import build_conv_layer, build_norm_layer + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + gcb=None, + gen_attention=None): + super(BasicBlock, self).__init__() + assert dcn is None, "Not implemented yet." + assert gen_attention is None, "Not implemented yet." + assert gcb is None, "Not implemented yet." + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, planes, planes, 3, padding=1, bias=False) + self.add_module(self.norm2_name, norm2) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + assert not with_cp + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + gcb=None, + gen_attention=None): + """Bottleneck block for ResNet. + If style is "pytorch", the stride-two layer is the 3x3 conv layer, + if it is "caffe", the stride-two layer is the first 1x1 conv layer. + """ + super(Bottleneck, self).__init__() + assert style in ['pytorch', 'caffe'] + assert dcn is None or isinstance(dcn, dict) + assert gcb is None or isinstance(gcb, dict) + assert gen_attention is None or isinstance(gen_attention, dict) + + self.inplanes = inplanes + self.planes = planes + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.dcn = dcn + self.with_dcn = dcn is not None + self.gcb = gcb + self.with_gcb = gcb is not None + self.gen_attention = gen_attention + self.with_gen_attention = gen_attention is not None + + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + fallback_on_stride = False + self.with_modulated_dcn = False + if self.with_dcn: + fallback_on_stride = dcn.get('fallback_on_stride', False) + self.with_modulated_dcn = dcn.get('modulated', False) + if not self.with_dcn or fallback_on_stride: + self.conv2 = build_conv_layer( + conv_cfg, + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + else: + assert conv_cfg is None, 'conv_cfg must be None for DCN' + deformable_groups = dcn.get('deformable_groups', 1) + if not self.with_modulated_dcn: + conv_op = DeformConv + offset_channels = 18 + else: + conv_op = ModulatedDeformConv + offset_channels = 27 + self.conv2_offset = nn.Conv2d( + planes, + deformable_groups * offset_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation) + self.conv2 = conv_op( + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + deformable_groups=deformable_groups, + bias=False) + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + planes, + planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + if self.with_gcb: + gcb_inplanes = planes * self.expansion + self.context_block = ContextBlock(inplanes=gcb_inplanes, **gcb) + + # gen_attention + if self.with_gen_attention: + self.gen_attention_block = GeneralizedAttention( + planes, **gen_attention) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + @property + def norm3(self): + return getattr(self, self.norm3_name) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + if not self.with_dcn: + out = self.conv2(out) + elif self.with_modulated_dcn: + offset_mask = self.conv2_offset(out) + offset = offset_mask[:, :18, :, :] + mask = offset_mask[:, -9:, :, :].sigmoid() + out = self.conv2(out, offset, mask) + else: + offset = self.conv2_offset(out) + out = self.conv2(out, offset) + out = self.norm2(out) + out = self.relu(out) + + if self.with_gen_attention: + out = self.gen_attention_block(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.with_gcb: + out = self.context_block(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +def make_multigrid(block, + inplanes, + planes, + blocks, + stride=1, + dilation=1, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + gcb=None, + gen_attention=None, + gen_attention_blocks=[]): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1], + ) + + layers = [] + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + dilation=blocks[0]*dilation, + downsample=downsample, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb, + gen_attention=gen_attention if + (0 in gen_attention_blocks) else None)) + inplanes = planes * block.expansion + for i in range(1, len(blocks)): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + dilation=blocks[i]*dilation, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb, + gen_attention=gen_attention if + (i in gen_attention_blocks) else None)) + + return nn.Sequential(*layers) + +def make_res_layer(block, + inplanes, + planes, + blocks, + stride=1, + dilation=1, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + gcb=None, + gen_attention=None, + gen_attention_blocks=[]): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1], + ) + + layers = [] + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + dilation=dilation, + downsample=downsample, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb, + gen_attention=gen_attention if + (0 in gen_attention_blocks) else None)) + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + dilation=dilation, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb, + gen_attention=gen_attention if + (i in gen_attention_blocks) else None)) + + return nn.Sequential(*layers) + +class ResNet(nn.Module): + """ResNet backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + num_stages (int): Resnet stages, normally 4. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + num_stages=4, + mg_rates=(1, 2, 4), + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(0, 1, 2, 3), + style='pytorch', + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + dcn=None, + stage_with_dcn=(False, False, False, False), + gcb=None, + stage_with_gcb=(False, False, False, False), + gen_attention=None, + stage_with_gen_attention=((), (), (), ()), + with_cp=False, + zero_init_residual=True): + super(ResNet, self).__init__() + if depth not in self.arch_settings: + raise KeyError('invalid depth {} for resnet'.format(depth)) + self.depth = depth + self.num_stages = num_stages + self.mg_rates = mg_rates + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.dcn = dcn + self.stage_with_dcn = stage_with_dcn + if dcn is not None: + assert len(stage_with_dcn) == num_stages + self.gen_attention = gen_attention + self.gcb = gcb + self.stage_with_gcb = stage_with_gcb + if gcb is not None: + assert len(stage_with_gcb) == num_stages + self.zero_init_residual = zero_init_residual + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.inplanes = 64 + + self._make_stem_layer() + + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = strides[i] + dilation = dilations[i] + dcn = self.dcn if self.stage_with_dcn[i] else None + gcb = self.gcb if self.stage_with_gcb[i] else None + planes = 64 * 2**i + if i == len(self.stage_blocks) - 1 and not True in self.stage_with_dcn: + assert len(self.mg_rates) == num_blocks + res_layer = make_multigrid( + self.block, + self.inplanes, + planes, + self.mg_rates, + stride=stride, + dilation=dilation, + style=self.style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb, + gen_attention=gen_attention, + gen_attention_blocks=stage_with_gen_attention[i]) + else: + res_layer = make_res_layer( + self.block, + self.inplanes, + planes, + num_blocks, + stride=stride, + dilation=dilation, + style=self.style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb, + gen_attention=gen_attention, + gen_attention_blocks=stage_with_gen_attention[i]) + self.inplanes = planes * self.block.expansion + layer_name = 'layer{}'.format(i + 1) + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = self.block.expansion * 64 * 2**( + len(self.stage_blocks) - 1) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def _make_stem_layer(self): + self.conv1 = build_conv_layer( + self.conv_cfg, + 3, + 64, + kernel_size=7, + stride=2, + padding=3, + bias=False) + self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, 'layer{}'.format(i)) + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + + if self.dcn is not None: + for m in self.modules(): + if isinstance(m, Bottleneck) and hasattr( + m, 'conv2_offset'): + constant_init(m.conv2_offset, 0) + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + constant_init(m.norm3, 0) + elif isinstance(m, BasicBlock): + constant_init(m.norm2, 0) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + return x + + def train(self, mode=True): + super(ResNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + diff --git a/segment/model/resnext.py b/segment/model/resnext.py new file mode 100644 index 0000000..cd8910a --- /dev/null +++ b/segment/model/resnext.py @@ -0,0 +1,302 @@ +import math + +import torch.nn as nn + +from mmdet.ops import DeformConv, ModulatedDeformConv +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResNet +from mmdet.models.utils import build_conv_layer, build_norm_layer + + +class Bottleneck(_Bottleneck): + + def __init__(self, inplanes, planes, groups=1, base_width=4, **kwargs): + """Bottleneck block for ResNeXt. + If style is "pytorch", the stride-two layer is the 3x3 conv layer, + if it is "caffe", the stride-two layer is the first 1x1 conv layer. + """ + super(Bottleneck, self).__init__(inplanes, planes, **kwargs) + + if groups == 1: + width = self.planes + else: + width = math.floor(self.planes * (base_width / 64)) * groups + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, width, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, width, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.inplanes, + width, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + fallback_on_stride = False + self.with_modulated_dcn = False + if self.with_dcn: + fallback_on_stride = self.dcn.get('fallback_on_stride', False) + self.with_modulated_dcn = self.dcn.get('modulated', False) + if not self.with_dcn or fallback_on_stride: + self.conv2 = build_conv_layer( + self.conv_cfg, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + else: + assert self.conv_cfg is None, 'conv_cfg must be None for DCN' + groups = self.dcn.get('groups', 1) + deformable_groups = self.dcn.get('deformable_groups', 1) + if not self.with_modulated_dcn: + conv_op = DeformConv + offset_channels = 18 + else: + conv_op = ModulatedDeformConv + offset_channels = 27 + self.conv2_offset = nn.Conv2d( + width, + deformable_groups * offset_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation) + self.conv2 = conv_op( + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + deformable_groups=deformable_groups, + bias=False) + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + width, + self.planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + +def make_multigrid(block, + inplanes, + planes, + blocks, + stride=1, + dilation=1, + groups=1, + base_width=4, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + gcb=None): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1], + ) + + layers = [] + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + dilation=blocks[0]*dilation, + downsample=downsample, + groups=groups, + base_width=base_width, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb)) + inplanes = planes * block.expansion + for i in range(1, len(blocks)): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + dilation=blocks[i]*dilation, + groups=groups, + base_width=base_width, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb)) + + return nn.Sequential(*layers) + +def make_res_layer(block, + inplanes, + planes, + blocks, + stride=1, + dilation=1, + groups=1, + base_width=4, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + gcb=None): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1], + ) + + layers = [] + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + dilation=dilation, + downsample=downsample, + groups=groups, + base_width=base_width, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb)) + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + dilation=dilation, + groups=groups, + base_width=base_width, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb)) + + return nn.Sequential(*layers) + +class ResNeXt(ResNet): + """ResNeXt backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + num_stages (int): Resnet stages, normally 4. + groups (int): Group of resnext. + base_width (int): Base width of resnext. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, groups=1, base_width=4, **kwargs): + super(ResNeXt, self).__init__(**kwargs) + self.groups = groups + self.base_width = base_width + + self.inplanes = 64 + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = self.strides[i] + dilation = self.dilations[i] + dcn = self.dcn if self.stage_with_dcn[i] else None + gcb = self.gcb if self.stage_with_gcb[i] else None + planes = 64 * 2**i + # Allow for DCN + multigrid final block + if i == len(self.stage_blocks) - 1: #and not True in self.stage_with_dcn: + assert len(self.mg_rates) == num_blocks + res_layer = make_multigrid( + self.block, + self.inplanes, + planes, + self.mg_rates, + stride=stride, + dilation=dilation, + groups=self.groups, + base_width=self.base_width, + style=self.style, + with_cp=self.with_cp, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + dcn=dcn, + gcb=gcb) + else: + res_layer = make_res_layer( + self.block, + self.inplanes, + planes, + num_blocks, + stride=stride, + dilation=dilation, + groups=self.groups, + base_width=self.base_width, + style=self.style, + with_cp=self.with_cp, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + dcn=dcn, + gcb=gcb) + self.inplanes = planes * self.block.expansion + layer_name = 'layer{}'.format(i + 1) + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + diff --git a/segment/model/train.py b/segment/model/train.py new file mode 100644 index 0000000..034dfe9 --- /dev/null +++ b/segment/model/train.py @@ -0,0 +1,650 @@ +""" +Objects for training models. +""" + +import os +import datetime +import time +import pandas as pd +import numpy as np +from tqdm import tqdm +import torch +from torch import nn +from sklearn.metrics import roc_auc_score, f1_score +from functools import partial +from loss.other_losses import KLDivergence + +try: + from apex import amp + APEX_AVAILABLE = True +except: + APEX_AVAILABLE = False + +from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, ReduceLROnPlateau +from utils.helper import to_categorical + +def _roc_auc_score(y_true, y_pred): + y_true = np.asarray(y_true) + y_pred = np.asarray(y_pred) + if len(np.unique(y_true)) == 2: + auc = roc_auc_score(y_true, y_pred[:,1]) + else: + auc = roc_auc_score(to_categorical(y_true), y_pred, average='macro') + return auc + +def _f1_score(y_true, y_pred): + y_true = np.asarray(y_true) + y_pred = np.asarray(y_pred) + if len(np.unique(y_true)) == 2: + f1 = f1_score(y_true, np.argmax(y_pred, axis=1)) + else: + f1 = f1_score(y_true, np.argmax(y_pred, axis=1), average='macro') + return f1 + +def _dice_score(y_true, y_pred, thresholds=[0.5]): + y_pred = y_pred[:,1] + # y_pred and y_true should be (N, w, h) + thresholds = [round(thr, 2) for thr in thresholds] + all_dice_list = [] + all_pos_dice_list = [] + for thr in thresholds: + dice_list = [] + pos_dice_list = [] + for i, img in enumerate(y_pred): + img = img.copy() + img[img >= thr] = 1 + img[img < thr] = 0 + # Calculate Dice per image + y_true_sum = np.sum(y_true[i]) + y_pred_sum = np.sum(img) + if y_true_sum == 0: + # For empty images: predicted empty mask = 1, otherwise 0 + if y_pred_sum == 0: + dsc = 1. + else: + dsc = 0. + else: + dsc = f1_score(y_true[i].ravel(), img.ravel()) + pos_dice_list.append(dsc) + dice_list.append(dsc) + all_dice_list.append(np.mean(dice_list)) + all_pos_dice_list.append(np.mean(pos_dice_list)) + return np.max(all_dice_list), \ + thresholds[all_dice_list.index(np.max(all_dice_list))], \ + np.max(all_pos_dice_list), \ + thresholds[all_pos_dice_list.index(np.max(all_pos_dice_list))] + +def _faster_dice(y_true, y_pred, thresholds=[0.5]): + # From Heng + size = len(y_true) + y_pred = y_pred[:,1] + y_pred = y_pred.reshape(size,-1)[:,::16] + y_true = y_true.reshape(size,-1)[:,::16] + assert np.min(y_pred) >= 0 and np.max(y_pred) <= 1 + assert(y_pred.shape == y_true.shape) + + all_dice_list = [] + all_pos_dice_list = [] + all_cls_thresholds = [] + all_seg_thresholds = [] + for cls_thres in thresholds: + for seg_thres in thresholds: + p = (y_pred>cls_thres).astype('float32') + s = (y_pred>seg_thres).astype('float32') + #p = np.asarray([np.expand_dims((p[i].sum(-1)>0).astype('float32'), axis=-1)*s[i] for i in range(len(p))]) + p = np.expand_dims(p.sum(-1)>0, axis=-1).astype('float32')*s + t = (y_true>0.5).astype('float32') + + t_sum = t.sum(-1) + p_sum = p.sum(-1) + neg_index = np.nonzero(t_sum==0)[0] + pos_index = np.nonzero(t_sum>=1)[0] + + dice_neg = (p_sum==0).astype('float32') + dice_pos = 2*(p*t).sum(-1)/((p+t).sum(-1)) + + dice_neg = dice_neg[neg_index] + dice_pos = dice_pos[pos_index] + dice = np.concatenate([dice_pos,dice_neg]) + + dice_pos = np.nan_to_num(dice_pos.mean(), 0) + dice = dice.mean() + + all_dice_list.append(dice) + all_pos_dice_list.append(dice_pos) + all_cls_thresholds.append(cls_thres) + all_seg_thresholds.append(seg_thres) + + #print(pd.DataFrame({'threshold': all_thresholds, 'kaggle_dice': all_dice_list})) + + return np.max(all_dice_list), \ + all_cls_thresholds[all_dice_list.index(np.max(all_dice_list))], \ + np.max(all_pos_dice_list), \ + all_seg_thresholds[all_pos_dice_list.index(np.max(all_pos_dice_list))] + +def _ultimate_kaggle_metric(y_true, y_pred, thresholds): + # Modified from Heng + size = len(y_true) + y_pred = y_pred[:,1] + y_pred = y_pred.reshape(size,-1) + y_true = y_true.reshape(size,-1) + assert np.min(y_pred) >= 0 and np.max(y_pred) <= 1 + assert(y_pred.shape == y_true.shape) + + all_dice_list = [] + all_pos_dice_list = [] + for cls_thres in thresholds: + for seg_thres in thresholds: + p = (y_pred>cls_thres).astype('float32') + # Segmentation predictions + s = (y_pred>seg_thres).astype('float32') + + t = (y_true>0.5).astype('float32') + + t_sum = t.sum(-1) + p_sum = p.sum(-1) + neg_index = np.nonzero(t_sum==0)[0] + pos_index = np.nonzero(t_sum>=1)[0] + + dice_neg = (p_sum==0).astype('float32') + dice_pos = 2*(p*t).sum(-1)/((p+t).sum(-1)) + + dice_neg = dice_neg[neg_index] + dice_pos = dice_pos[pos_index] + dice = np.concatenate([dice_pos,dice_neg]) + + dice_pos = np.nan_to_num(dice_pos.mean(), 0) + dice = dice.mean() + + all_dice_list.append(dice) + all_pos_dice_list.append(dice_pos) + + print(pd.DataFrame({'threshold': thresholds, 'kaggle_dice': all_dice_list})) + + return np.max(all_dice_list), \ + thresholds[all_dice_list.index(np.max(all_dice_list))], \ + np.max(all_pos_dice_list), \ + thresholds[all_pos_dice_list.index(np.max(all_pos_dice_list))] + +class Trainer(object): + def __init__(self, model, architecture, optimizer, criterion, loss_tracker, save_checkpoint, save_best, multiclass=True, validate=1): + self.model = model + self.architecture = architecture + self.optimizer = optimizer + self.criterion = criterion + self.loss_tracker = loss_tracker + self.save_checkpoint = save_checkpoint + self.save_best = save_best + self.best_model = None + self.multiclass = multiclass + self.validate_interval = validate + self.grad_accum = 0 + self.use_amp = False + self.track_valid_metric = 'kag' + + def set_dataloaders(self, train, valid=None): + self.train_gen = train + self.valid_gen = valid + + def set_thresholds(self, thresholds=[0.1]): + assert type(thresholds) is list + self.thresholds = thresholds + + def check_end_train(self): + return True if self.current_epoch >= self.max_epochs else False + + def train_head(self, head_optimizer, head_steps_per_epoch, head_max_epochs=5): + print ('Training decoder for {} epochs ...'.format(head_max_epochs)) + head_current_epoch = 0 ; head_steps = 0 + while True: + for i, data in enumerate(self.train_gen): + batch, labels, class_label = data + head_optimizer.zero_grad() + output = self.model(batch.cuda()) + loss = self.criterion(output, labels.cuda()) + loss.backward() + head_optimizer.step() + head_steps += 1 + if head_steps % head_steps_per_epoch == 0: + head_current_epoch += 1 + head_steps = 0 + if head_current_epoch >= head_max_epochs: + break + if head_current_epoch >= head_max_epochs: + break + print ('Done training decoder !') + + def save_models(self, improvement, metrics): + cpt_name = '{arch}_{epoch}'.format(arch=self.architecture.upper(), epoch=str(self.current_epoch).zfill(len(str(self.max_epochs)))) + for met in metrics.keys(): + cpt_name += '_{name}-{value:.4f}'.format(name=met.upper(), value=metrics[met]) + cpt_name += '.pth' + if not self.save_best: + torch.save(self.model.state_dict(), os.path.join(self.save_checkpoint, cpt_name)) + elif improvement: + if self.best_model is not None: + os.system('rm {}'.format(os.path.join(self.save_checkpoint, self.best_model))) + self.best_model = cpt_name + torch.save(self.model.state_dict(), os.path.join(self.save_checkpoint, cpt_name)) + + def calculate_valid_metrics(self, y_true, y_pred, y_cls, loss): + valid_dsc, thr, pos_valid_dsc, pos_thr = _faster_dice(y_true, y_pred, thresholds=self.thresholds) + #pos_valid_dsc, pos_thr = _dice_score_pos_only(y_true, y_pred, thresholds=self.thresholds) + if len(np.unique(y_cls)) > 1: + y_cls_preds = np.max(y_pred[:,1], axis=(-2, -1)) + y_cls_preds = np.repeat(np.expand_dims(y_cls_preds, axis=-1), 2, axis=-1) + y_cls_preds[:,0] = 1. - y_cls_preds[:,0] + valid_auc = _roc_auc_score(y_cls, y_cls_preds) + print ('epoch {epoch} // VALIDATION : loss = {loss:.4f}, auc = {auc:.4f}, dsc = {dsc:.4f}, thr = {thr:.2f}, dsc (pos) = {pos_dsc:.4f}, thr (pos) = {pos_thr:.2f}' + .format(epoch=str(self.current_epoch).zfill(len(str(self.max_epochs))), \ + loss=loss, \ + auc=valid_auc, \ + dsc=valid_dsc, \ + thr=thr, + pos_dsc=pos_valid_dsc, + pos_thr=pos_thr)) + valid_metric = valid_dsc + valid_auc + metrics_dict = {'auc': valid_auc, 'dsc': valid_dsc, 'thr': thr, 'pos_dsc': pos_valid_dsc, 'pos_thr': pos_thr} + else: + print ('epoch {epoch} // VALIDATION : loss = {loss:.4f}, dsc = {dsc:.4f}, thr = {thr:.2f}, dsc (pos) = {pos_dsc:.4f}, thr (pos) = {pos_thr:.2f}' + .format(epoch=str(self.current_epoch).zfill(len(str(self.max_epochs))), \ + loss=loss, \ + dsc=valid_dsc, \ + thr=thr, + pos_dsc=pos_valid_dsc, + pos_thr=pos_thr)) + valid_metric = valid_dsc + metrics_dict = {'dsc': valid_dsc, 'thr': thr, 'pos_dsc': pos_valid_dsc, 'pos_thr': pos_thr} + return valid_metric, metrics_dict + + def post_validate(self, valid_metric, metrics_dict): + if self.lr_scheduler.mode == 'min': + improvement = valid_metric <= (self.best_valid_score - self.lr_scheduler.threshold) + else: + improvement = valid_metric >= (self.best_valid_score + self.lr_scheduler.threshold) + if improvement: + self.best_valid_score = valid_metric + self.stopping = 0 + else: + if isinstance(self.lr_scheduler, ReduceLROnPlateau): + self.lr_scheduler.step(valid_metric) + self.stopping += 1 + self.save_models(improvement, metrics_dict) + + def validate(self): + with torch.no_grad(): + self.model.eval() + self.model.cuda() + valid_loss = 0. + y_pred = [] ; y_true = [] ; y_cls = [] + for i, data in tqdm(enumerate(self.valid_gen), total=len(self.valid_gen)): + batch, labels, class_label = data + output = self.model(batch.cuda()) + # Loss computed on logits + loss = self.criterion(output, labels.cuda()) + # Softmax predictions + y_pred.append(torch.softmax(output, dim=1).cpu().numpy()) + y_true.append(labels.numpy()) + y_cls.extend(class_label.numpy()) + valid_loss += loss.item() + y_true = np.vstack(y_true) + y_pred = np.vstack(y_pred) + y_cls = np.asarray(y_cls) + valid_loss /= float(len(self.valid_gen)) + valid_metric, metrics_dict = self.calculate_valid_metrics(y_true, y_pred, y_cls, valid_loss) + self.post_validate(valid_metric, metrics_dict) + + def train_step(self, data): + batch, labels, class_label = data + output = self.model(batch.cuda()) + if self.grad_accum > 0: + self.loss = self.criterion(output, labels.cuda()) + self.tracker_loss += self.loss.item() + self.grad_iter += 1 + if self.use_amp: + with amp.scale_loss(self.loss/self.grad_accum, self.optimizer) as scaled_loss: + scaled_loss.backward() + else: + (self.loss/self.grad_accum).backward() + if self.grad_iter % self.grad_accum == 0: + self.loss_tracker.update_loss(self.tracker_loss/self.grad_accum) + self.tracker_loss = 0 + self.optimizer.step() + self.optimizer.zero_grad() + else: + self.loss = self.criterion(output, labels.cuda()) + self.loss_tracker.update_loss(self.loss.item()) + if self.use_amp: + with amp.scale_loss(self.loss, self.optimizer) as scaled_loss: + scaled_loss.backward() + else: + self.loss.backward() + self.optimizer.step() + self.optimizer.zero_grad() + + def train(self, max_epochs, steps_per_epoch, lr_scheduler=None, early_stopping=np.inf, freeze_bn=False, verbosity=100): + self.lr_scheduler = lr_scheduler + self.best_valid_score = 999. if lr_scheduler.mode == 'min' else 0. + self.max_epochs = max_epochs + self.stopping = 0 + start_time = datetime.datetime.now() ; steps = 0 + print ('TRAINING : START') + self.current_epoch = 0 + self.grad_iter = 0 ; self.tracker_loss = 0 + while True: + self.optimizer.zero_grad() + for i, data in enumerate(self.train_gen): + self.train_step(data) + if self.grad_accum > 0: + if self.grad_iter % self.grad_accum == 0: + printed = False + steps += 1 + else: + printed = False + steps += 1 + if isinstance(self.lr_scheduler, CosineAnnealingWarmRestarts): + self.lr_scheduler.step(self.current_epoch + steps * 1./steps_per_epoch) + if steps % verbosity == 0 and steps > 0 and not printed: + duration = time.time() - step_start_time + duration = duration * self.grad_accum if self.grad_accum > 0 else duration + print('epoch {epoch}, batch {batch} / {steps_per_epoch} : loss = {train_loss:.4f} ({duration:.3f} sec/batch)' + .format(epoch=str(self.current_epoch + 1).zfill(len(str(max_epochs))), \ + batch=str(steps).zfill(len(str(steps_per_epoch))), \ + steps_per_epoch=steps_per_epoch, \ + train_loss=self.loss_tracker.get_avg_loss(), \ + duration=duration)) + printed = True + step_start_time = time.time() + if (steps % steps_per_epoch) == 0 and steps > 0 and ((self.current_epoch + 1) % self.validate_interval) != 0: + steps = 0 + elif (steps % steps_per_epoch) == 0 and steps > 0 and ((self.current_epoch + 1) % self.validate_interval) == 0: + self.current_epoch += 1 + print ('VALIDATING ...') + self.model.train_mode = False + validation_start_time = datetime.datetime.now() + self.validate() + print('Validation took {} !'.format(datetime.datetime.now() - validation_start_time)) + steps = 0 + # RESET BEST MODEL IF USING COSINEANNEALINGWARMRESTARTS + if isinstance(self.lr_scheduler, CosineAnnealingWarmRestarts): + if self.current_epoch % self.lr_scheduler.T_0 == 0: + self.best_model = None + self.best_valid_score = 999. if self.lr_scheduler.mode == 'min' else 0. + self.model.train() + self.model.cuda() + self.model.train_mode = True + if freeze_bn: + for module in self.model.modules(): + if type(module) == nn.BatchNorm2d: + module.eval() + if self.stopping >= early_stopping: + # Make sure to set number of epochs to max epochs + self.current_epoch = max_epochs + if self.check_end_train(): + # Break the for loop + break + if self.check_end_train(): + # Break the while loop + break + print ('TRAINING : END') + print ('Training took {}\n'.format(datetime.datetime.now() - start_time)) + +def turn_into_softmax(y): + y = np.repeat(np.expand_dims(y, axis=-1), 2, axis=-1) + y[:,0] = 1. - y[:,0] + return y + +def _kaggle_score(y_true, y_prob, average_dice=0.6, thresholds=np.linspace(0.1, 0.9, 9)): + metric_list = [] + y_prob = y_prob[:,1] + y_true = y_true[:,0] + assert y_true.shape == y_prob.shape + for thres in thresholds: + y_pred = np.asarray([1 if _ >= thres else 0 for _ in y_prob]) + tp = np.sum(y_pred + y_true == 2) + tn = np.sum(y_pred + y_true == 0) + metric_list.append((tn + tp * average_dice) / len(y_true)) + return np.max(metric_list) + +class AllTrainer(Trainer): + # + def class_score_from_pixels(self, y_cls, y_pred, top_sizes=[0, 0.1, 0.5, 1., 2.5, 5]): + y_cls_preds = y_pred[:,1] + y_cls_preds = y_cls_preds.reshape(y_cls_preds.shape[0], -1) + y_cls_preds = -np.sort(-y_cls_preds, axis=1) + auc_list = [] ; kag_list = [] + for size in top_sizes: + tmp_size = int(y_cls_preds.shape[-1] * size / 100.) if size != 0 else 1 + tmp_preds = np.mean(y_cls_preds[:,:tmp_size], axis=1) + tmp_preds = turn_into_softmax(tmp_preds) + auc_list.append(_roc_auc_score(y_cls, tmp_preds)) + kag_list.append(_kaggle_score(y_cls, tmp_preds, thresholds=self.thresholds)) + return np.max(auc_list), top_sizes[auc_list.index(np.max(auc_list))], \ + np.max(kag_list), top_sizes[kag_list.index(np.max(kag_list))] + + def calculate_valid_metrics(self, y_true, y_pred, y_cls, loss): + valid_dsc, thr, pos_valid_dsc, pos_thr = _faster_dice(y_true, y_pred, thresholds=self.thresholds) + #valid_dsc, thr, pos_valid_dsc, pos_thr = _dice_score(y_true, y_pred, thresholds=self.thresholds) + # Determine how to turn pixel scores into classification score + _scores = self.class_score_from_pixels(y_cls, y_pred) + valid_best_auc = _scores[0] + valid_best_auctop = _scores[1] + valid_best_kag = _scores[2] + valid_best_kagtop = _scores[3] + _toprint = 'epoch {epoch} // VALIDATION : loss = {loss:.4f}, ' + \ + 'auc = {auc:.4f}, auctop = {auctop:.4f}, ' + \ + 'kag = {kag:.4f}, kagtop = {kagtop:.4f}, ' + \ + 'dsc = {dsc:.4f}, thr = {thr:.2f}, ' + \ + 'dsc (pos) = {pos_dsc:.4f}, thr (pos) = {pos_thr:.2f}' + print (_toprint.format( + epoch=str(self.current_epoch).zfill(len(str(self.max_epochs))), \ + loss=loss, \ + auc=valid_best_auc, \ + auctop=valid_best_auctop, \ + kag=valid_best_kag, \ + kagtop=valid_best_kagtop, \ + dsc=valid_dsc, \ + thr=thr, \ + pos_dsc=pos_valid_dsc, \ + pos_thr=pos_thr + ) + ) + if self.track_valid_metric == 'kag': + valid_metric = valid_dsc + elif self.track_valid_metric == 'pos_dsc': + valid_metric = pos_valid_dsc + metrics_dict = {'auc': valid_best_auc, + 'auctop': valid_best_auctop, + 'kag': valid_best_kag, + 'kagtop': valid_best_kagtop, + 'dsc': valid_dsc, + 'thr': thr, + 'pos_dsc': pos_valid_dsc, + 'pos_thr': pos_thr} + return valid_metric, metrics_dict + +class StitchTrainer(AllTrainer): + def validate(self): + with torch.no_grad(): + self.model.eval() + self.model.cuda() + valid_loss = 0. + y_pred = [] ; y_true = [] ; y_cls = [] + for i, data in tqdm(enumerate(self.valid_gen), total=len(self.valid_gen)): + # Batch size should be 1 + patches, patch_masks, labels, class_label = data + patches = patches[0].cuda() + patch_masks = patch_masks[0].cuda() + output = self.model(patches) + # Stitch together + stitches = torch.zeros_like(labels).expand(output.shape[1], -1, -1).cuda() # (n, H, W) + stitched = torch.zeros_like(labels).cuda() # (1, H, W) + for idx, out in enumerate(output): + stitches[0][patch_masks[idx].long() == 1] += out[0].view(-1) + stitches[1][patch_masks[idx].long() == 1] += out[1].view(-1) + stitched[0] += patch_masks[idx] + stitches = stitches / stitched + # Loss computed on logits + stitches = stitches.unsqueeze(0) + loss = self.criterion(stitches, labels.cuda()) + # Softmax predictions + y_pred.append(torch.softmax(stitches, dim=1).cpu().numpy()) + y_true.append(labels.numpy()) + y_cls.extend(class_label.numpy()) + valid_loss += loss.item() + y_true = np.vstack(y_true) + y_pred = np.vstack(y_pred) + y_cls = np.asarray(y_cls) + valid_loss /= float(len(self.valid_gen)) + valid_metric, metrics_dict = self.calculate_valid_metrics(y_true, y_pred, y_cls, valid_loss) + self.post_validate(valid_metric, metrics_dict) + + +class BalancedTrainer(AllTrainer): + # + def train_step(self, data): + pos_batch, neg_batch, pos_labels, neg_labels, pos_class_labels, neg_class_labels = data + batch = torch.cat((pos_batch, neg_batch), dim=0) + labels = torch.cat((pos_labels, neg_labels), dim=0) + output = self.model(batch.cuda()) + if self.grad_accum > 0: + self.loss = self.criterion(output, labels.cuda()) + self.tracker_loss += self.loss.item() + self.grad_iter += 1 + (self.loss/self.grad_accum).backward() + if self.grad_iter % self.grad_accum == 0: + self.loss_tracker.update_loss(self.tracker_loss/self.grad_accum) + self.tracker_loss = 0 + self.optimizer.step() + self.optimizer.zero_grad() + else: + self.loss = self.criterion(output, labels.cuda()) + self.loss_tracker.update_loss(self.loss.item()) + self.loss.backward() + self.optimizer.step() + self.optimizer.zero_grad() + + +class EqualTrainer(Trainer): + # + def train_step(self, data): + pos_batch, neg_batch, pos_labels, neg_labels, pos_class_labels, neg_class_labels = data + batch = torch.cat((pos_batch, neg_batch), dim=0) + labels = torch.cat((pos_labels, neg_labels), dim=0) + class_labels = torch.cat((pos_class_labels, neg_class_labels), dim=0) + self.optimizer.zero_grad() + s_output = self.model(batch.cuda()) + loss = self.criterion(s_output, labels.cuda()) + self.loss_tracker.update_loss(loss.item()) + loss.backward() + self.optimizer.step() + # + def calculate_valid_metrics(self, y_true, y_pred, y_c_true, y_c_pred, loss): + + valid_dsc, thr = _dice_score(y_true, y_pred) + pos_valid_dsc, pos_thr = _dice_score_pos_only(y_true, y_pred) + print ('epoch {epoch} // VALIDATION : loss = {loss:.4f}, dsc = {dsc:.4f}, thr = {thr:.2f}, dsc (pos) = {pos_dsc:.4f}, thr (pos) = {pos_thr:.2f}' + .format(epoch=str(self.current_epoch).zfill(len(str(self.max_epochs))), \ + loss=loss, \ + dsc=valid_dsc, \ + thr=thr, + pos_dsc=pos_valid_dsc, + pos_thr=pos_thr)) + valid_metric = pos_valid_dsc + metrics_dict = {'dsc': valid_dsc, 'thr': thr, 'pos_dsc': pos_valid_dsc, 'pos_thr': pos_thr} + return valid_metric, metrics_dict + # + def validate(self): + with torch.no_grad(): + self.model = self.model.eval().cuda() + valid_loss = 0. + y_pred = [] ; y_true = [] ; y_c_pred = [] ; y_c_true = [] + for i, data in tqdm(enumerate(self.valid_gen), total=len(self.valid_gen)): + batch, labels, class_labels = data + s_output = self.model(batch.cuda()) + loss = self.criterion(s_output, labels.cuda()) + y_pred.append(s_output.cpu().numpy()) + y_true.append(labels.numpy()) + y_c_pred.append(c_output.cpu().numpy()) + y_c_true.extend(class_labels.numpy()) + valid_loss += loss.item() + y_true = np.vstack(y_true) + y_pred = np.vstack(y_pred) + y_c_true = np.asarray(y_c_true) + y_c_pred = np.vstack(y_c_pred) + valid_loss /= float(len(self.valid_gen)) + valid_metric, metrics_dict = self.calculate_valid_metrics(y_true, y_pred, y_c_true, y_c_pred, valid_loss) + self.post_validate(valid_metric, metrics_dict) + +class EqualTrainerV2(Trainer): + # + def train_step(self, data): + pos_batch, neg_batch, pos_labels, neg_labels, pos_class_labels, neg_class_labels = data + batch = torch.cat((pos_batch, neg_batch), dim=0) + labels = torch.cat((pos_labels, neg_labels), dim=0) + class_labels = torch.cat((pos_class_labels, neg_class_labels), dim=0) + self.optimizer.zero_grad() + s_output, c_output = self.model(batch.cuda()) + loss = 0.5 * self.criterion[0](s_output, labels.cuda()) + 0.5 * self.criterion[1](c_output, class_labels.long().cuda()) + self.loss_tracker.update_loss(loss.item()) + loss.backward() + self.optimizer.step() + # + def calculate_valid_metrics(self, y_true, y_pred, y_c_true, y_c_pred, loss): + #y_c_pred = np.repeat(np.expand_dims(y_c_pred, axis=-1), 2, axis=-1) + #y_c_pred[:,0] = 1. - y_c_pred[:,0] + valid_auc = _roc_auc_score(y_c_true, y_c_pred) + valid_dsc, thr = _dice_score(y_true, y_pred) + pos_valid_dsc, pos_thr = _dice_score_pos_only(y_true, y_pred) + print ('epoch {epoch} // VALIDATION : loss = {loss:.4f}, auc = {auc:.4f}, dsc = {dsc:.4f}, thr = {thr:.2f}, dsc (pos) = {pos_dsc:.4f}, thr (pos) = {pos_thr:.2f}' + .format(epoch=str(self.current_epoch).zfill(len(str(self.max_epochs))), \ + loss=loss, \ + auc=valid_auc, \ + dsc=valid_dsc, \ + thr=thr, + pos_dsc=pos_valid_dsc, + pos_thr=pos_thr)) + valid_metric = valid_auc + metrics_dict = {'auc': valid_auc, 'dsc': valid_dsc, 'thr': thr, 'pos_dsc': pos_valid_dsc, 'pos_thr': pos_thr} + return valid_metric, metrics_dict + # + def validate(self): + with torch.no_grad(): + self.model = self.model.eval().cuda() + valid_loss = 0. + y_pred = [] ; y_true = [] ; y_c_pred = [] ; y_c_true = [] + for i, data in tqdm(enumerate(self.valid_gen), total=len(self.valid_gen)): + batch, labels, class_labels = data + s_output, c_output = self.model(batch.cuda()) + loss = 0.5 * self.criterion[0](s_output, labels.cuda()) + 0.5 * self.criterion[1](c_output, class_labels[:,0].long().cuda()) + y_pred.append(s_output.cpu().numpy()) + y_true.append(labels.numpy()) + #y_c_pred.extend(torch.sigmoid(c_output).cpu().numpy()) #change + y_c_pred.append(c_output.cpu().numpy()) + y_c_true.extend(class_labels.numpy()) + valid_loss += loss.item() + y_true = np.vstack(y_true) + y_pred = np.vstack(y_pred) + y_c_true = np.asarray(y_c_true) + #y_c_pred = np.asarray(y_c_pred) + y_c_pred = np.vstack(y_c_pred) + valid_loss /= float(len(self.valid_gen)) + valid_metric, metrics_dict = self.calculate_valid_metrics(y_true, y_pred, y_c_true, y_c_pred, valid_loss) + self.post_validate(valid_metric, metrics_dict) + +kld = KLDivergence() +mse = nn.MSELoss() + +class VAETrainer(Trainer): + # + def train_step(self, data): + batch, labels, class_label = data + self.optimizer.zero_grad() + output, recon, mu, logvar = self.model(batch.cuda()) + rescaled_output = output - torch.min(output) + rescaled_output = rescaled_output / torch.max(rescaled_output) + loss = self.criterion(output, labels.cuda()) + 0.1 * kld(np.prod(np.asarray(recon.shape)), mu, logvar) + 0.1 * mse(rescaled_output[:,1], recon[:,0]) + self.loss_tracker.update_loss(loss.item()) + loss.backward() + self.optimizer.step() + diff --git a/segment/scripts/.DS_Store b/segment/scripts/.DS_Store new file mode 100644 index 0000000..ec91513 Binary files /dev/null and b/segment/scripts/.DS_Store differ diff --git a/segment/scripts/PREDICT_DEEPLABXY.sh b/segment/scripts/PREDICT_DEEPLABXY.sh new file mode 100644 index 0000000..f924a17 --- /dev/null +++ b/segment/scripts/PREDICT_DEEPLABXY.sh @@ -0,0 +1,40 @@ +module load anaconda/3-5.2.0 +source activate siim-ptx +source deactivate +source activate siim-ptx +PYTHONPATH='' +cd scratch/siim-ptx/segment/bash-scripts/ + + +python PredictDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i0o0/resnext101 \ + ../../data/pngs/test \ + ../lb-predictions/TRAIN_DEEPLABXYFlip/o0/i0_resnext101.pkl \ + --gn \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 1 + +python PredictDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i1o0/resnext101 \ + ../../data/pngs/test \ + ../lb-predictions/TRAIN_DEEPLABXYFlip/o0/i1_resnext101.pkl \ + --gn \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 1 + +python PredictDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i2o0/resnext101 \ + ../../data/pngs/test \ + ../lb-predictions/TRAIN_DEEPLABXYFlip/o0/i2_resnext101.pkl \ + --gn \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 1 + +python PredictDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i3o0/resnext101 \ + ../../data/pngs/test \ + ../lb-predictions/TRAIN_DEEPLABXYFlip/o0/i3_resnext101.pkl \ + --gn \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 1 + diff --git a/segment/scripts/PREDICT_SEGMENT.sh b/segment/scripts/PREDICT_SEGMENT.sh new file mode 100644 index 0000000..c67ec4f --- /dev/null +++ b/segment/scripts/PREDICT_SEGMENT.sh @@ -0,0 +1,39 @@ +module load anaconda/3-5.2.0 +source activate siim-ptx +source deactivate +source activate siim-ptx +PYTHONPATH='' +cd scratch/siim-ptx/segment/bash-scripts/ + + +python PredictDeepLabSnapshot.py resnet50_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i5o0/resnet50 \ + ../../data/pngs/test \ + ../lb-predictions/TRAIN_SEGMENTFlip/o0/i5_resnet50.pkl \ + --gn \ + --batch-size 1 --imsize-x 1280 --imsize-y 1280 \ + --gpu 0 + +python PredictDeepLabSnapshot.py resnet101_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i6o0/resnet101 \ + ../../data/pngs/test \ + ../lb-predictions/TRAIN_SEGMENTFlip/o0/i6_resnet101.pkl \ + --gn \ + --batch-size 1 --imsize-x 1280 --imsize-y 1280 \ + --gpu 0 + +python PredictDeepLabSnapshot.py resnext50_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i7o0/resnext50 \ + ../../data/pngs/test \ + ../lb-predictions/TRAIN_SEGMENTFlip/o0/i7_resnext50.pkl \ + --gn \ + --batch-size 1 --imsize-x 1280 --imsize-y 1280 \ + --gpu 0 + +python PredictDeepLabSnapshot.py resnext101_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i8o0/resnext101 \ + ../../data/pngs/test \ + ../lb-predictions/TRAIN_SEGMENTFlip/o0/i8_resnext101.pkl \ + --gn \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 0 \ No newline at end of file diff --git a/segment/scripts/PREDICT_V100.sh b/segment/scripts/PREDICT_V100.sh new file mode 100644 index 0000000..21da7f0 --- /dev/null +++ b/segment/scripts/PREDICT_V100.sh @@ -0,0 +1,43 @@ +module load anaconda/3-5.2.0 +source activate siim-ptx +source deactivate +source activate siim-ptx +PYTHONPATH='' +cd scratch/siim-ptx/segment/bash-scripts/ + + +python PredictDeepLabSnapshot.py resnet50_gn_ws \ + ../checkpoints/TRAIN_V100/i0o0/resnet50 \ + ../../data/pngs/test \ + ../lb-predictions/TRAIN_V100Flip/o0/i0_resnet50.csv \ + --gn \ + --class-mode \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 2 + +python PredictDeepLabSnapshot.py resnet101_gn_ws \ + ../checkpoints/TRAIN_V100/i1o0/resnet101 \ + ../../data/pngs/test \ + ../lb-predictions/TRAIN_V100Flip/o0/i1_resnet101.csv \ + --gn \ + --class-mode \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 2 + +python PredictDeepLabSnapshot.py resnext50_gn_ws \ + ../checkpoints/TRAIN_V100/i2o0/resnext50 \ + ../../data/pngs/test \ + ../lb-predictions/TRAIN_V100Flip/o0/i2_resnext50.csv \ + --gn \ + --class-mode \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 2 + +python PredictDeepLabSnapshot.py resnext101_gn_ws \ + ../checkpoints/TRAIN_V100/i3o0/resnext101 \ + ../../data/pngs/test \ + ../lb-predictions/TRAIN_V100Flip/o0/i3_resnext101.csv \ + --gn \ + --class-mode \ + --batch-size 1 --imsize-x 960 --imsize-y 960 \ + --gpu 2 \ No newline at end of file diff --git a/segment/scripts/PredictDeepLabSnapshot.py b/segment/scripts/PredictDeepLabSnapshot.py new file mode 100644 index 0000000..9cb201f --- /dev/null +++ b/segment/scripts/PredictDeepLabSnapshot.py @@ -0,0 +1,200 @@ +import sys ; sys.path.insert(0, '..') ; sys.path.insert(0, '../..') + +from model.deeplab import DeepLab + +from reproducibility import set_reproducibility + +from data.loader import XrayDataset +import pickle + +from tqdm import tqdm +import torch +from torch import optim +from torch import nn +import adabound + +import argparse +import pandas as pd +import numpy as np +import glob, os + +from utils.aug import simple_aug, resize_aug, pad_image +from utils.helper import LossTracker, preprocess_input + +from torch.utils.data import DataLoader +from functools import partial + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('model', type=str) + parser.add_argument('model_folder', type=str, help="Path to folder containing snapsnot ensemble models.") + parser.add_argument('data_dir', type=str, help="Directory to load image data from.") + parser.add_argument('save_file', type=str) + parser.add_argument('--class-mode', action='store_true') + parser.add_argument('--gpu', type=int, default=0) + parser.add_argument('--imsize-x', type=int, default=384) + parser.add_argument('--imsize-y', type=int, default=384) + parser.add_argument('--imratio', type=float, default=1) + parser.add_argument('--batch-size', type=int, default=16) + parser.add_argument('--tta', action='store_true', help='Enable test-time augmentation') + parser.add_argument('--dropout-p', type=float, default=0.2) + parser.add_argument('--gn', action='store_true') + parser.add_argument('--output-stride', type=int, default=16) + parser.add_argument('--verbosity', type=int, default=100) + parser.add_argument('--num-workers', type=int, default=1) + parser.add_argument('--seed', type=int, default=88) + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + set_reproducibility(args.seed) + + resize_me = resize_aug(imsize_x=args.imsize_x, imsize_y=args.imsize_y) + pad_func = partial(pad_image, ratio=args.imratio) + + print ("Predicting the PNEUMOTHORAX SEGMENTATION model...") + + torch.cuda.set_device(args.gpu) ; torch.backends.cudnn.benchmark = True + + if not os.path.exists(os.path.dirname(args.save_file)): + os.makedirs(os.path.dirname(args.save_file)) + + print("Reading images from directory {}".format(args.data_dir)) + test_images = glob.glob(os.path.join(args.data_dir, '*.png')) + print ('TEST: n={}'.format(len(test_images))) + test_sops = [_.split('/')[-1].replace('.png', '') for _ in test_images] + num_classes = 2 + + # Get models in snapshot ensemble + snapshots = glob.glob(os.path.join(args.model_folder, '*.pth')) + + num_snapshots = 3 + weights = np.asarray([3.,1.,1.]) + weights = weights / np.sum(weights) + # Pick best 3 models, then weight based on Kaggle metric: 3, 1, 1 + # This assumes a certain formatting of the checkpoint file name + # in order to extract the Kaggle metric + if args.class_mode: + def extract_kag(ckpt): + ckpt = ckpt.split('/')[-1] + _kag = ckpt.split('_')[4] + _kag = _kag.split('-')[-1] + return float(_kag) + else: + def extract_kag(ckpt): + ckpt = ckpt.split('/')[-1] + _kag = ckpt.split('_')[2] + _kag = _kag.split('-')[-1] + return float(_kag) + + snapshot_kags = [extract_kag(_) for _ in snapshots] + kag_order = np.argsort(snapshot_kags)[::-1][:num_snapshots] + snapshots = list(np.asarray(snapshots)[kag_order]) + + def load_model(ckpt): + model = DeepLab(args.model, args.output_stride, args.gn, classifier=False) + model.load_state_dict(torch.load(ckpt)) + model = model.cuda() + model.eval() + return model + + # Get models + print ('Loading checkpoints ...') + model_list = [] + for ss in snapshots: + model_list.append(load_model(ss)) + + # Set up preprocessing function with model + ppi = partial(preprocess_input, model=model_list[0]) + + print ('Setting up data loaders ...') + + params = {'batch_size': 1 if args.tta else args.batch_size, + 'shuffle': False, + 'num_workers': args.num_workers} + + test_set = XrayDataset(imgfiles=test_images, + dicom=False, + labels=[0]*len(test_images), + preprocess=ppi, + pad=pad_func, + resize=resize_me, + test_mode=True) + test_gen = DataLoader(test_set, **params) + + # Test + def get_test_predictions(mod): + with torch.no_grad(): + list_of_pred_dicts = [] + for data in tqdm(test_gen, total=len(test_gen)): + pred_dict = {} + if args.tta: + # should be batch size = 1 + batch, _ = data + batch = batch[0] + output = mod(batch.cuda()) + pred_dict['pred_mask'] = torch.softmax(output, dim=1).cpu().numpy()[:,1] + else: + batch, _ = data + output = mod(batch.cuda()) + output_flipped = mod(torch.flip(batch, dims=(-1,)).cuda()) + output_flipped = torch.flip(output_flipped, dims=(-1,)) + pred_dict['pred_mask'] = (torch.softmax(output, dim=1).cpu().numpy()[:,1] + torch.softmax(output_flipped, dim=1).cpu().numpy()[:,1]) / 2. + list_of_pred_dicts.append(pred_dict) + return list_of_pred_dicts + + y_pred_list = [] + for model in tqdm(model_list, total=len(model_list)): + tmp_y_pred = get_test_predictions(model) + y_pred_list.append(tmp_y_pred) + + # Need to average predictions across models + for each_indiv_pred in range(len(y_pred_list[0])): + indiv_pred = np.zeros_like(y_pred_list[0][each_indiv_pred]['pred_mask']) + for each_model_pred in range(len(y_pred_list)): + indiv_pred += weights[each_model_pred]*y_pred_list[each_model_pred][each_indiv_pred]['pred_mask'] + #indiv_pred /= float(len(y_pred_list)) + assert np.min(indiv_pred) >= 0 and np.max(indiv_pred) <= 1 + y_pred_list[0][each_indiv_pred]['pred_mask'] = (indiv_pred * 100).astype('uint8') + + def get_top_X(segmentation, tops=[0,0.5,1.0,2.5,5.0]): + # Assumes segmentation.shape is (1, H, W) + assert segmentation.shape[0] == 1 + scores = [] + segmentation = segmentation.reshape(segmentation.shape[0], -1).astype('int8') + segmentation = -np.sort(-segmentation, axis=1) + for t in tops: + size = int(t / 100. * np.prod(segmentation.shape)) if t > 0 else 1 + scores.append(np.mean(segmentation[:,:size]) / 100.) + return scores + + if args.class_mode: + # Turn segmentation output into class scores + tops = [0,0.5,1.0,2.5,5.0] + class_scores = [] + for i in range(len(y_pred_list[0])): + class_scores.append(get_top_X(y_pred_list[0][i]['pred_mask'], tops)) + # Make a DataFrame + class_scores = np.vstack(class_scores) + class_scores = pd.DataFrame(class_scores) + class_scores.columns = ['Top{}'.format(t) for t in tops] + class_scores['sop'] = test_sops + class_scores.to_csv(args.save_file, index=False) + else: + + y_pred_to_pickle = y_pred_list[0] + y_pred_to_pickle = {test_sops[_] : y_pred_to_pickle[_] for _ in range(len(test_sops))} + + with open(args.save_file, 'wb') as f: + pickle.dump(y_pred_to_pickle, f) + +if __name__ == '__main__': + main() + + + diff --git a/segment/scripts/PredictDeepLabSnapshotV3.py b/segment/scripts/PredictDeepLabSnapshotV3.py new file mode 100644 index 0000000..c59e38c --- /dev/null +++ b/segment/scripts/PredictDeepLabSnapshotV3.py @@ -0,0 +1,215 @@ +import sys ; sys.path.insert(0, '..') ; sys.path.insert(0, '../..') + +from model.deeplab_jpu import DeepLab + +from reproducibility import set_reproducibility + +from data.loader import XrayDataset +#import loss.lovasz_losses as LL +from loss.other_losses import * +import pickle + +from tqdm import tqdm +import torch +from torch import optim +from torch import nn +import adabound + +from model.train import EqualTrainerV2 + +import argparse +import pandas as pd +import numpy as np +import glob, os + +from utils.aug import simple_aug, resize_aug, pad_image +from utils.helper import LossTracker, preprocess_input + +from torch.utils.data import DataLoader +from functools import partial + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('model', type=str) + parser.add_argument('model_folder', type=str, help="Path to folder containing snapsnot ensemble models.") + parser.add_argument('data_dir', type=str, help="Directory to load image data from.") + parser.add_argument('save_file', type=str) + parser.add_argument('--class-mode', action='store_true') + parser.add_argument('--pos-only', action='store_true') + parser.add_argument('--num-snapshots', type=int, default=3) + parser.add_argument('--ss-weights', type=lambda s: [float(_) for _ in s.split(',')], default=[3.,1.,1.]) + parser.add_argument('--no-maxpool', action='store_true') + parser.add_argument('--center', type=str, default='aspp') + parser.add_argument('--jpu', action='store_true') + parser.add_argument('--gpu', type=int, default=0) + parser.add_argument('--labels-df', type=str, default='../../data/train_labels_with_splits.csv') + parser.add_argument('--imsize-x', type=int, default=384) + parser.add_argument('--imsize-y', type=int, default=384) + parser.add_argument('--imratio', type=float, default=1) + parser.add_argument('--batch-size', type=int, default=16) + parser.add_argument('--tta', action='store_true', help='Enable test-time augmentation') + parser.add_argument('--dropout-p', type=float, default=0.2) + parser.add_argument('--gn', action='store_true') + parser.add_argument('--output-stride', type=int, default=16) + parser.add_argument('--verbosity', type=int, default=100) + parser.add_argument('--num-workers', type=int, default=1) + parser.add_argument('--seed', type=int, default=88) + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + set_reproducibility(args.seed) + + resize_me = resize_aug(imsize_x=args.imsize_x, imsize_y=args.imsize_y) + pad_func = partial(pad_image, ratio=args.imratio) + + print ("Testing the PNEUMOTHORAX SEGMENTATION model...") + + torch.cuda.set_device(args.gpu) ; torch.backends.cudnn.benchmark = True + + if not os.path.exists(os.path.dirname(args.save_file)): + os.makedirs(os.path.dirname(args.save_file)) + + print("Reading images from directory {}".format(args.data_dir)) + test_images = glob.glob(os.path.join(args.data_dir, '*.png')) + print ('TEST: n={}'.format(len(test_images))) + test_sops = [_.split('/')[-1].replace('.png', '') for _ in test_images] + num_classes = 2 + + # Get models in snapshot ensemble + snapshots = glob.glob(os.path.join(args.model_folder, '*.pth')) + + num_snapshots = args.num_snapshots + snapshot_weights = args.ss_weights + # Pick best 3 models, then weight based on Kaggle metric: 3, 1, 1 + # This assumes a certain formatting of the checkpoint file name + # in order to extract the Kaggle metric + if args.class_mode: + def extract_kag(ckpt): + ckpt = ckpt.split('/')[-1] + _kag = ckpt.split('_')[4] + _kag = _kag.split('-')[-1] + return float(_kag) + elif args.pos_only: + def extract_kag(ckpt): + ckpt = ckpt.split('/')[-1] + _kag = ckpt.split('_')[2] + _kag = _kag.split('-')[-1] + return float(_kag) + else: + def extract_kag(ckpt): + ckpt = ckpt.split('/')[-1] + _kag = ckpt.split('_')[6] + _kag = _kag.split('-')[-1] + return float(_kag) + + snapshot_kags = [extract_kag(_) for _ in snapshots] + kag_order = np.argsort(snapshot_kags)[::-1][:num_snapshots] + snapshots = list(np.asarray(snapshots)[kag_order]) + + def load_model(ckpt): + model = DeepLab(args.model, args.output_stride, args.gn, center=args.center, jpu=args.jpu, use_maxpool=not args.no_maxpool) + model.load_state_dict(torch.load(ckpt)) + model = model.cuda() + model.eval() + return model + + # Get models + print ('Loading checkpoints ...') + model_list = [] + for ss in snapshots: + model_list.append(load_model(ss)) + + # Set up preprocessing function with model + ppi = partial(preprocess_input, model=model_list[0]) + + print ('Setting up data loaders ...') + + params = {'batch_size': 1 if args.tta else args.batch_size, + 'shuffle': False, + 'num_workers': args.num_workers} + + test_set = XrayDataset(imgfiles=test_images, + dicom=False, + labels=[0]*len(test_images), + preprocess=ppi, + pad=pad_func, + resize=resize_me, + test_mode=True) + test_gen = DataLoader(test_set, **params) + + # Test + def get_test_predictions(mod): + with torch.no_grad(): + list_of_pred_dicts = [] + for data in tqdm(test_gen, total=len(test_gen)): + pred_dict = {} + if args.tta: + # should be batch size = 1 + batch, _ = data + batch = batch[0] + output = mod(batch.cuda()) + pred_dict['pred_mask'] = torch.softmax(output, dim=1).cpu().numpy()[:,1] + else: + batch, _ = data + output = mod(batch.cuda()) + output_flipped = mod(torch.flip(batch, dims=(-1,)).cuda()) + output_flipped = torch.flip(output_flipped, dims=(-1,)) + pred_dict['pred_mask'] = (torch.softmax(output, dim=1).cpu().numpy()[:,1] + torch.softmax(output_flipped, dim=1).cpu().numpy()[:,1]) / 2. + list_of_pred_dicts.append(pred_dict) + return list_of_pred_dicts + + y_pred_list = [] + for model in tqdm(model_list, total=len(model_list)): + tmp_y_pred = get_test_predictions(model) + y_pred_list.append(tmp_y_pred) + + # Need to average predictions across models + for each_indiv_pred in range(len(y_pred_list[0])): + indiv_pred = np.zeros_like(y_pred_list[0][each_indiv_pred]['pred_mask']) + for each_model_pred in range(len(y_pred_list)): + indiv_pred += snapshot_weights[each_model_pred]*y_pred_list[each_model_pred][each_indiv_pred]['pred_mask'] + indiv_pred /= float(np.sum(snapshot_weights)) + assert np.min(indiv_pred) >= 0 and np.max(indiv_pred) <= 1 + y_pred_list[0][each_indiv_pred]['pred_mask'] = (indiv_pred * 100).astype('uint8') + + def get_top_X(segmentation, tops=[0,0.5,1.0,2.5,5.0]): + # Assumes segmentation.shape is (1, H, W) + assert segmentation.shape[0] == 1 + scores = [] + segmentation = segmentation.reshape(segmentation.shape[0], -1).astype('int8') + segmentation = -np.sort(-segmentation, axis=1) + for t in tops: + size = int(t / 100. * np.prod(segmentation.shape)) if t > 0 else 1 + scores.append(np.mean(segmentation[:,:size]) / 100.) + return scores + + if args.class_mode: + # Turn segmentation output into class scores + tops = [0,0.5,1.0,2.5,5.0] + class_scores = [] + for i in range(len(y_pred_list[0])): + class_scores.append(get_top_X(y_pred_list[0][i]['pred_mask'], tops)) + # Make a DataFrame + class_scores = np.vstack(class_scores) + class_scores = pd.DataFrame(class_scores) + class_scores.columns = ['Top{}'.format(t) for t in tops] + class_scores['sop'] = test_sops + class_scores.to_csv(args.save_file, index=False) + else: + y_pred_to_pickle = y_pred_list[0] + y_pred_to_pickle = {test_sops[_] : y_pred_to_pickle[_] for _ in range(len(test_sops))} + + with open(args.save_file, 'wb') as f: + pickle.dump(y_pred_to_pickle, f) + +if __name__ == '__main__': + main() + + + diff --git a/segment/scripts/STAGE2_PREDICT_DEEPLABXY.sh b/segment/scripts/STAGE2_PREDICT_DEEPLABXY.sh new file mode 100644 index 0000000..f431464 --- /dev/null +++ b/segment/scripts/STAGE2_PREDICT_DEEPLABXY.sh @@ -0,0 +1,95 @@ +python Stage2PredictDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i0o0/resnext101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_DEEPLABXYFlip/o0/i0_resnext101.pkl \ + --gn --group 0 \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 0 + +python Stage2PredictDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i1o0/resnext101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_DEEPLABXYFlip/o0/i1_resnext101.pkl \ + --gn --group 0 \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 0 + +python Stage2PredictDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i2o0/resnext101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_DEEPLABXYFlip/o0/i2_resnext101.pkl \ + --gn --group 0 \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 0 + +python Stage2PredictDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i3o0/resnext101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_DEEPLABXYFlip/o0/i3_resnext101.pkl \ + --gn --group 0 \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 0 + +python Stage2PredictDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i0o0/resnext101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_DEEPLABXYFlip/o0/i0_resnext101.pkl \ + --gn --group 1 \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 0 + +python Stage2PredictDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i1o0/resnext101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_DEEPLABXYFlip/o0/i1_resnext101.pkl \ + --gn --group 1 \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 0 + +python Stage2PredictDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i2o0/resnext101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_DEEPLABXYFlip/o0/i2_resnext101.pkl \ + --gn --group 1 \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 0 + +python Stage2PredictDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i3o0/resnext101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_DEEPLABXYFlip/o0/i3_resnext101.pkl \ + --gn --group 1 \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 0 + +python Stage2PredictDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i0o0/resnext101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_DEEPLABXYFlip/o0/i0_resnext101.pkl \ + --gn --group 2 \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 0 + +python Stage2PredictDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i1o0/resnext101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_DEEPLABXYFlip/o0/i1_resnext101.pkl \ + --gn --group 2 \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 0 + +python Stage2PredictDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i2o0/resnext101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_DEEPLABXYFlip/o0/i2_resnext101.pkl \ + --gn --group 2 \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 0 + +python Stage2PredictDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i3o0/resnext101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_DEEPLABXYFlip/o0/i3_resnext101.pkl \ + --gn --group 2 \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 0 diff --git a/segment/scripts/STAGE2_PREDICT_SEGMENT.sh b/segment/scripts/STAGE2_PREDICT_SEGMENT.sh new file mode 100644 index 0000000..f273577 --- /dev/null +++ b/segment/scripts/STAGE2_PREDICT_SEGMENT.sh @@ -0,0 +1,95 @@ +python Stage2PredictDeepLabSnapshot.py resnet50_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i5o0/resnet50 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_SEGMENTFlip/o0/i5_resnet50.pkl \ + --gn --group 0 \ + --batch-size 1 --imsize-x 1280 --imsize-y 1280 \ + --gpu 1 + +python Stage2PredictDeepLabSnapshot.py resnet101_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i6o0/resnet101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_SEGMENTFlip/o0/i6_resnet101.pkl \ + --gn --group 0 \ + --batch-size 1 --imsize-x 1280 --imsize-y 1280 \ + --gpu 1 + +python Stage2PredictDeepLabSnapshot.py resnext50_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i7o0/resnext50 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_SEGMENTFlip/o0/i7_resnext50.pkl \ + --gn --group 0 \ + --batch-size 1 --imsize-x 1280 --imsize-y 1280 \ + --gpu 1 + +python Stage2PredictDeepLabSnapshot.py resnext101_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i8o0/resnext101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_SEGMENTFlip/o0/i8_resnext101.pkl \ + --gn --group 0 \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 1 + +python Stage2PredictDeepLabSnapshot.py resnet50_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i5o0/resnet50 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_SEGMENTFlip/o0/i5_resnet50.pkl \ + --gn --group 1 \ + --batch-size 1 --imsize-x 1280 --imsize-y 1280 \ + --gpu 1 + +python Stage2PredictDeepLabSnapshot.py resnet101_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i6o0/resnet101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_SEGMENTFlip/o0/i6_resnet101.pkl \ + --gn --group 1 \ + --batch-size 1 --imsize-x 1280 --imsize-y 1280 \ + --gpu 1 + +python Stage2PredictDeepLabSnapshot.py resnext50_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i7o0/resnext50 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_SEGMENTFlip/o0/i7_resnext50.pkl \ + --gn --group 1 \ + --batch-size 1 --imsize-x 1280 --imsize-y 1280 \ + --gpu 1 + +python Stage2PredictDeepLabSnapshot.py resnext101_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i8o0/resnext101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_SEGMENTFlip/o0/i8_resnext101.pkl \ + --gn --group 1 \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 1 + +python Stage2PredictDeepLabSnapshot.py resnet50_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i5o0/resnet50 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_SEGMENTFlip/o0/i5_resnet50.pkl \ + --gn --group 2 \ + --batch-size 1 --imsize-x 1280 --imsize-y 1280 \ + --gpu 1 + +python Stage2PredictDeepLabSnapshot.py resnet101_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i6o0/resnet101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_SEGMENTFlip/o0/i6_resnet101.pkl \ + --gn --group 2 \ + --batch-size 1 --imsize-x 1280 --imsize-y 1280 \ + --gpu 1 + +python Stage2PredictDeepLabSnapshot.py resnext50_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i7o0/resnext50 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_SEGMENTFlip/o0/i7_resnext50.pkl \ + --gn --group 2 \ + --batch-size 1 --imsize-x 1280 --imsize-y 1280 \ + --gpu 1 + +python Stage2PredictDeepLabSnapshot.py resnext101_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i8o0/resnext101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_SEGMENTFlip/o0/i8_resnext101.pkl \ + --gn --group 2 \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 1 \ No newline at end of file diff --git a/segment/scripts/STAGE2_PREDICT_V100.sh b/segment/scripts/STAGE2_PREDICT_V100.sh new file mode 100644 index 0000000..1aeb8db --- /dev/null +++ b/segment/scripts/STAGE2_PREDICT_V100.sh @@ -0,0 +1,107 @@ +python Stage2PredictDeepLabSnapshot.py resnet50_gn_ws \ + ../checkpoints/TRAIN_V100/i0o0/resnet50 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_V100Flip/o0/i0_resnet50.csv \ + --gn --group 0 \ + --class-mode \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 2 + +python Stage2PredictDeepLabSnapshot.py resnet101_gn_ws \ + ../checkpoints/TRAIN_V100/i1o0/resnet101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_V100Flip/o0/i1_resnet101.csv \ + --gn --group 0 \ + --class-mode \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 2 + +python Stage2PredictDeepLabSnapshot.py resnext50_gn_ws \ + ../checkpoints/TRAIN_V100/i2o0/resnext50 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_V100Flip/o0/i2_resnext50.csv \ + --gn --group 0 \ + --class-mode \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 2 + +python Stage2PredictDeepLabSnapshot.py resnext101_gn_ws \ + ../checkpoints/TRAIN_V100/i3o0/resnext101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_V100Flip/o0/i3_resnext101.csv \ + --gn --group 0 \ + --class-mode \ + --batch-size 1 --imsize-x 960 --imsize-y 960 \ + --gpu 2 + +python Stage2PredictDeepLabSnapshot.py resnet50_gn_ws \ + ../checkpoints/TRAIN_V100/i0o0/resnet50 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_V100Flip/o0/i0_resnet50.csv \ + --gn --group 1 \ + --class-mode \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 2 + +python Stage2PredictDeepLabSnapshot.py resnet101_gn_ws \ + ../checkpoints/TRAIN_V100/i1o0/resnet101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_V100Flip/o0/i1_resnet101.csv \ + --gn --group 1 \ + --class-mode \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 2 + +python Stage2PredictDeepLabSnapshot.py resnext50_gn_ws \ + ../checkpoints/TRAIN_V100/i2o0/resnext50 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_V100Flip/o0/i2_resnext50.csv \ + --gn --group 1 \ + --class-mode \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 2 + +python Stage2PredictDeepLabSnapshot.py resnext101_gn_ws \ + ../checkpoints/TRAIN_V100/i3o0/resnext101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_V100Flip/o0/i3_resnext101.csv \ + --gn --group 1 \ + --class-mode \ + --batch-size 1 --imsize-x 960 --imsize-y 960 \ + --gpu 2 + +python Stage2PredictDeepLabSnapshot.py resnet50_gn_ws \ + ../checkpoints/TRAIN_V100/i0o0/resnet50 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_V100Flip/o0/i0_resnet50.csv \ + --gn --group 2 \ + --class-mode \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 2 + +python Stage2PredictDeepLabSnapshot.py resnet101_gn_ws \ + ../checkpoints/TRAIN_V100/i1o0/resnet101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_V100Flip/o0/i1_resnet101.csv \ + --gn --group 2 \ + --class-mode \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 2 + +python Stage2PredictDeepLabSnapshot.py resnext50_gn_ws \ + ../checkpoints/TRAIN_V100/i2o0/resnext50 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_V100Flip/o0/i2_resnext50.csv \ + --gn --group 2 \ + --class-mode \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 2 + +python Stage2PredictDeepLabSnapshot.py resnext101_gn_ws \ + ../checkpoints/TRAIN_V100/i3o0/resnext101 \ + ../../data/pngs/stage2 \ + ../stage2-predictions/TRAIN_V100Flip/o0/i3_resnext101.csv \ + --gn --group 2 \ + --class-mode \ + --batch-size 1 --imsize-x 960 --imsize-y 960 \ + --gpu 2 \ No newline at end of file diff --git a/segment/scripts/Stage2PredictDeepLabSnapshot.py b/segment/scripts/Stage2PredictDeepLabSnapshot.py new file mode 100644 index 0000000..8c9d6b7 --- /dev/null +++ b/segment/scripts/Stage2PredictDeepLabSnapshot.py @@ -0,0 +1,209 @@ +import sys ; sys.path.insert(0, '..') ; sys.path.insert(0, '../..') + +from model.deeplab import DeepLab + +from reproducibility import set_reproducibility + +from data.loader import XrayDataset +import pickle + +from tqdm import tqdm +import torch +from torch import optim +from torch import nn +import adabound + +import argparse +import pandas as pd +import numpy as np +import glob, os + +from utils.aug import simple_aug, resize_aug, pad_image +from utils.helper import LossTracker, preprocess_input + +from torch.utils.data import DataLoader +from functools import partial + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('model', type=str) + parser.add_argument('model_folder', type=str, help="Path to folder containing snapsnot ensemble models.") + parser.add_argument('data_dir', type=str, help="Directory to load image data from.") + parser.add_argument('save_file', type=str) + parser.add_argument('--df', type=str, default='../../data/grouped_stage2.csv') + parser.add_argument('--group', type=int, default=-1) + parser.add_argument('--class-mode', action='store_true') + parser.add_argument('--gpu', type=int, default=0) + parser.add_argument('--imsize-x', type=int, default=384) + parser.add_argument('--imsize-y', type=int, default=384) + parser.add_argument('--imratio', type=float, default=1) + parser.add_argument('--batch-size', type=int, default=16) + parser.add_argument('--tta', action='store_true', help='Enable test-time augmentation') + parser.add_argument('--dropout-p', type=float, default=0.2) + parser.add_argument('--gn', action='store_true') + parser.add_argument('--output-stride', type=int, default=16) + parser.add_argument('--verbosity', type=int, default=100) + parser.add_argument('--num-workers', type=int, default=1) + parser.add_argument('--seed', type=int, default=88) + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + set_reproducibility(args.seed) + + resize_me = resize_aug(imsize_x=args.imsize_x, imsize_y=args.imsize_y) + pad_func = partial(pad_image, ratio=args.imratio) + + print ("Predicting the PNEUMOTHORAX SEGMENTATION model...") + + torch.cuda.set_device(args.gpu) ; torch.backends.cudnn.benchmark = True + + if not os.path.exists(os.path.dirname(args.save_file)): + os.makedirs(os.path.dirname(args.save_file)) + + print("Reading images from directory {}".format(args.data_dir)) + test_df = pd.read_csv(args.df) + if args.group >= 0: + test_df = test_df[test_df['group'] == args.group] + test_images = [os.path.join(args.data_dir, '{}.png'.format(_)) for _ in test_df['pid']] + print ('TEST: n={}'.format(len(test_images))) + test_sops = [_.split('/')[-1].replace('.png', '') for _ in test_images] + num_classes = 2 + + # Get models in snapshot ensemble + snapshots = glob.glob(os.path.join(args.model_folder, '*.pth')) + + num_snapshots = 3 + weights = np.asarray([3.,1.,1.]) + weights = weights / np.sum(weights) + # Pick best 3 models, then weight based on Kaggle metric: 3, 1, 1 + # This assumes a certain formatting of the checkpoint file name + # in order to extract the Kaggle metric + if args.class_mode: + def extract_kag(ckpt): + ckpt = ckpt.split('/')[-1] + _kag = ckpt.split('_')[4] + _kag = _kag.split('-')[-1] + return float(_kag) + else: + def extract_kag(ckpt): + ckpt = ckpt.split('/')[-1] + _kag = ckpt.split('_')[2] + _kag = _kag.split('-')[-1] + return float(_kag) + + snapshot_kags = [extract_kag(_) for _ in snapshots] + kag_order = np.argsort(snapshot_kags)[::-1][:num_snapshots] + snapshots = list(np.asarray(snapshots)[kag_order]) + + def load_model(ckpt): + model = DeepLab(args.model, args.output_stride, args.gn, classifier=False) + model.load_state_dict(torch.load(ckpt)) + model = model.cuda() + model.eval() + return model + + # Get models + print ('Loading checkpoints ...') + model_list = [] + for ss in snapshots: + model_list.append(load_model(ss)) + + # Set up preprocessing function with model + ppi = partial(preprocess_input, model=model_list[0]) + + print ('Setting up data loaders ...') + + params = {'batch_size': 1 if args.tta else args.batch_size, + 'shuffle': False, + 'num_workers': args.num_workers} + + test_set = XrayDataset(imgfiles=test_images, + dicom=False, + labels=[0]*len(test_images), + preprocess=ppi, + pad=pad_func, + resize=resize_me, + test_mode=True) + test_gen = DataLoader(test_set, **params) + + # Test + def get_test_predictions(mod): + with torch.no_grad(): + list_of_pred_dicts = [] + for data in tqdm(test_gen, total=len(test_gen)): + pred_dict = {} + if args.tta: + # should be batch size = 1 + batch, _ = data + batch = batch[0] + output = mod(batch.cuda()) + pred_dict['pred_mask'] = torch.softmax(output, dim=1).cpu().numpy()[:,1] + else: + batch, _ = data + output = mod(batch.cuda()) + output_flipped = mod(torch.flip(batch, dims=(-1,)).cuda()) + output_flipped = torch.flip(output_flipped, dims=(-1,)) + pred_dict['pred_mask'] = (torch.softmax(output, dim=1).cpu().numpy()[:,1] + torch.softmax(output_flipped, dim=1).cpu().numpy()[:,1]) / 2. + list_of_pred_dicts.append(pred_dict) + return list_of_pred_dicts + + y_pred_list = [] + for model in tqdm(model_list, total=len(model_list)): + tmp_y_pred = get_test_predictions(model) + y_pred_list.append(tmp_y_pred) + + # Need to average predictions across models + for each_indiv_pred in range(len(y_pred_list[0])): + indiv_pred = np.zeros_like(y_pred_list[0][each_indiv_pred]['pred_mask']) + for each_model_pred in range(len(y_pred_list)): + indiv_pred += weights[each_model_pred]*y_pred_list[each_model_pred][each_indiv_pred]['pred_mask'] + #indiv_pred /= float(len(y_pred_list)) + assert np.min(indiv_pred) >= 0 and np.max(indiv_pred) <= 1 + y_pred_list[0][each_indiv_pred]['pred_mask'] = (indiv_pred * 100).astype('uint8') + + def get_top_X(segmentation, tops=[0,0.5,1.0,2.5,5.0]): + # Assumes segmentation.shape is (1, H, W) + assert segmentation.shape[0] == 1 + scores = [] + segmentation = segmentation.reshape(segmentation.shape[0], -1).astype('int8') + segmentation = -np.sort(-segmentation, axis=1) + for t in tops: + size = int(t / 100. * np.prod(segmentation.shape)) if t > 0 else 1 + scores.append(np.mean(segmentation[:,:size]) / 100.) + return scores + + SAVE_FILE = args.save_file + if args.group >= 0: + SAVE_FILE = '{}{}'.format(SAVE_FILE, args.group) + + if args.class_mode: + # Turn segmentation output into class scores + tops = [0,0.5,1.0,2.5,5.0] + class_scores = [] + for i in range(len(y_pred_list[0])): + class_scores.append(get_top_X(y_pred_list[0][i]['pred_mask'], tops)) + # Make a DataFrame + class_scores = np.vstack(class_scores) + class_scores = pd.DataFrame(class_scores) + class_scores.columns = ['Top{}'.format(t) for t in tops] + class_scores['sop'] = test_sops + class_scores.to_csv(SAVE_FILE, index=False) + else: + + y_pred_to_pickle = y_pred_list[0] + y_pred_to_pickle = {test_sops[_] : y_pred_to_pickle[_] for _ in range(len(test_sops))} + + with open(SAVE_FILE, 'wb') as f: + pickle.dump(y_pred_to_pickle, f) + +if __name__ == '__main__': + main() + + + diff --git a/segment/scripts/Stage2PredictDeepLabSnapshotV3.py b/segment/scripts/Stage2PredictDeepLabSnapshotV3.py new file mode 100644 index 0000000..c69183b --- /dev/null +++ b/segment/scripts/Stage2PredictDeepLabSnapshotV3.py @@ -0,0 +1,224 @@ +import sys ; sys.path.insert(0, '..') ; sys.path.insert(0, '../..') + +from model.deeplab_jpu import DeepLab + +from reproducibility import set_reproducibility + +from data.loader import XrayDataset +import loss.lovasz_losses as LL +from loss.other_losses import * +import pickle + +from tqdm import tqdm +import torch +from torch import optim +from torch import nn +import adabound + +from model.train import EqualTrainerV2 + +import argparse +import pandas as pd +import numpy as np +import glob, os + +from utils.aug import simple_aug, resize_aug, pad_image +from utils.helper import LossTracker, preprocess_input + +from torch.utils.data import DataLoader +from functools import partial + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('model', type=str) + parser.add_argument('model_folder', type=str, help="Path to folder containing snapsnot ensemble models.") + parser.add_argument('data_dir', type=str, help="Directory to load image data from.") + parser.add_argument('save_file', type=str) + parser.add_argument('--df', type=str, default='../../data/grouped_stage2.csv') + parser.add_argument('--group', type=int, default=-1) + parser.add_argument('--class-mode', action='store_true') + parser.add_argument('--pos-only', action='store_true') + parser.add_argument('--num-snapshots', type=int, default=3) + parser.add_argument('--ss-weights', type=lambda s: [float(_) for _ in s.split(',')], default=[3.,1.,1.]) + parser.add_argument('--no-maxpool', action='store_true') + parser.add_argument('--center', type=str, default='aspp') + parser.add_argument('--jpu', action='store_true') + parser.add_argument('--gpu', type=int, default=0) + parser.add_argument('--labels-df', type=str, default='../../data/train_labels_with_splits.csv') + parser.add_argument('--imsize-x', type=int, default=384) + parser.add_argument('--imsize-y', type=int, default=384) + parser.add_argument('--imratio', type=float, default=1) + parser.add_argument('--batch-size', type=int, default=16) + parser.add_argument('--tta', action='store_true', help='Enable test-time augmentation') + parser.add_argument('--dropout-p', type=float, default=0.2) + parser.add_argument('--gn', action='store_true') + parser.add_argument('--output-stride', type=int, default=16) + parser.add_argument('--verbosity', type=int, default=100) + parser.add_argument('--num-workers', type=int, default=1) + parser.add_argument('--seed', type=int, default=88) + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + set_reproducibility(args.seed) + + resize_me = resize_aug(imsize_x=args.imsize_x, imsize_y=args.imsize_y) + pad_func = partial(pad_image, ratio=args.imratio) + + print ("Testing the PNEUMOTHORAX SEGMENTATION model...") + + torch.cuda.set_device(args.gpu) ; torch.backends.cudnn.benchmark = True + + if not os.path.exists(os.path.dirname(args.save_file)): + os.makedirs(os.path.dirname(args.save_file)) + + print("Reading images from directory {}".format(args.data_dir)) + test_df = pd.read_csv(args.df) + if args.group >= 0: + test_df = test_df[test_df['group'] == args.group] + test_images = [os.path.join(args.data_dir, '{}.png'.format(_)) for _ in test_df['pid']] + print ('TEST: n={}'.format(len(test_images))) + test_sops = [_.split('/')[-1].replace('.png', '') for _ in test_images] + num_classes = 2 + + # Get models in snapshot ensemble + snapshots = glob.glob(os.path.join(args.model_folder, '*.pth')) + + num_snapshots = args.num_snapshots + snapshot_weights = args.ss_weights + # Pick best 3 models, then weight based on Kaggle metric: 3, 1, 1 + # This assumes a certain formatting of the checkpoint file name + # in order to extract the Kaggle metric + if args.class_mode: + def extract_kag(ckpt): + ckpt = ckpt.split('/')[-1] + _kag = ckpt.split('_')[4] + _kag = _kag.split('-')[-1] + return float(_kag) + elif args.pos_only: + def extract_kag(ckpt): + ckpt = ckpt.split('/')[-1] + _kag = ckpt.split('_')[2] + _kag = _kag.split('-')[-1] + return float(_kag) + else: + def extract_kag(ckpt): + ckpt = ckpt.split('/')[-1] + _kag = ckpt.split('_')[6] + _kag = _kag.split('-')[-1] + return float(_kag) + + snapshot_kags = [extract_kag(_) for _ in snapshots] + kag_order = np.argsort(snapshot_kags)[::-1][:num_snapshots] + snapshots = list(np.asarray(snapshots)[kag_order]) + + def load_model(ckpt): + model = DeepLab(args.model, args.output_stride, args.gn, center=args.center, jpu=args.jpu, use_maxpool=not args.no_maxpool) + model.load_state_dict(torch.load(ckpt)) + model = model.cuda() + model.eval() + return model + + # Get models + print ('Loading checkpoints ...') + model_list = [] + for ss in snapshots: + model_list.append(load_model(ss)) + + # Set up preprocessing function with model + ppi = partial(preprocess_input, model=model_list[0]) + + print ('Setting up data loaders ...') + + params = {'batch_size': 1 if args.tta else args.batch_size, + 'shuffle': False, + 'num_workers': args.num_workers} + + test_set = XrayDataset(imgfiles=test_images, + dicom=False, + labels=[0]*len(test_images), + preprocess=ppi, + pad=pad_func, + resize=resize_me, + test_mode=True) + test_gen = DataLoader(test_set, **params) + + # Test + def get_test_predictions(mod): + with torch.no_grad(): + list_of_pred_dicts = [] + for data in tqdm(test_gen, total=len(test_gen)): + pred_dict = {} + if args.tta: + # should be batch size = 1 + batch, _ = data + batch = batch[0] + output = mod(batch.cuda()) + pred_dict['pred_mask'] = torch.softmax(output, dim=1).cpu().numpy()[:,1] + else: + batch, _ = data + output = mod(batch.cuda()) + output_flipped = mod(torch.flip(batch, dims=(-1,)).cuda()) + output_flipped = torch.flip(output_flipped, dims=(-1,)) + pred_dict['pred_mask'] = (torch.softmax(output, dim=1).cpu().numpy()[:,1] + torch.softmax(output_flipped, dim=1).cpu().numpy()[:,1]) / 2. + list_of_pred_dicts.append(pred_dict) + return list_of_pred_dicts + + y_pred_list = [] + for model in tqdm(model_list, total=len(model_list)): + tmp_y_pred = get_test_predictions(model) + y_pred_list.append(tmp_y_pred) + + # Need to average predictions across models + for each_indiv_pred in range(len(y_pred_list[0])): + indiv_pred = np.zeros_like(y_pred_list[0][each_indiv_pred]['pred_mask']) + for each_model_pred in range(len(y_pred_list)): + indiv_pred += snapshot_weights[each_model_pred]*y_pred_list[each_model_pred][each_indiv_pred]['pred_mask'] + indiv_pred /= float(np.sum(snapshot_weights)) + assert np.min(indiv_pred) >= 0 and np.max(indiv_pred) <= 1 + y_pred_list[0][each_indiv_pred]['pred_mask'] = (indiv_pred * 100).astype('uint8') + + def get_top_X(segmentation, tops=[0,0.5,1.0,2.5,5.0]): + # Assumes segmentation.shape is (1, H, W) + assert segmentation.shape[0] == 1 + scores = [] + segmentation = segmentation.reshape(segmentation.shape[0], -1).astype('int8') + segmentation = -np.sort(-segmentation, axis=1) + for t in tops: + size = int(t / 100. * np.prod(segmentation.shape)) if t > 0 else 1 + scores.append(np.mean(segmentation[:,:size]) / 100.) + return scores + + SAVE_FILE = args.save_file + if args.group >= 0: + SAVE_FILE = '{}{}'.format(SAVE_FILE, args.group) + + if args.class_mode: + # Turn segmentation output into class scores + tops = [0,0.5,1.0,2.5,5.0] + class_scores = [] + for i in range(len(y_pred_list[0])): + class_scores.append(get_top_X(y_pred_list[0][i]['pred_mask'], tops)) + # Make a DataFrame + class_scores = np.vstack(class_scores) + class_scores = pd.DataFrame(class_scores) + class_scores.columns = ['Top{}'.format(t) for t in tops] + class_scores['sop'] = test_sops + class_scores.to_csv(SAVE_FILE, index=False) + else: + y_pred_to_pickle = y_pred_list[0] + y_pred_to_pickle = {test_sops[_] : y_pred_to_pickle[_] for _ in range(len(test_sops))} + + with open(SAVE_FILE, 'wb') as f: + pickle.dump(y_pred_to_pickle, f) + +if __name__ == '__main__': + main() + + + diff --git a/segment/scripts/TEST_DEEPLABXY.sh b/segment/scripts/TEST_DEEPLABXY.sh new file mode 100644 index 0000000..173d6eb --- /dev/null +++ b/segment/scripts/TEST_DEEPLABXY.sh @@ -0,0 +1,40 @@ +python TestDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i0o0/resnext101 \ + ../../data/pngs/train \ + ../../data/masks/train \ + ../local-cv-predictions/TRAIN_DEEPLABXYFlip/o0/i0_resnext101.pkl \ + --gn \ + --outer-fold 0 --outer-only \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 0 + +python TestDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i1o0/resnext101 \ + ../../data/pngs/train \ + ../../data/masks/train \ + ../local-cv-predictions/TRAIN_DEEPLABXYFlip/o0/i1_resnext101.pkl \ + --gn \ + --outer-fold 0 --outer-only \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 2 + +python TestDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i2o0/resnext101 \ + ../../data/pngs/train \ + ../../data/masks/train \ + ../local-cv-predictions/TRAIN_DEEPLABXYFlip/o0/i2_resnext101.pkl \ + --gn \ + --outer-fold 0 --outer-only \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 2 + +python TestDeepLabSnapshotV3.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i3o0/resnext101 \ + ../../data/pngs/train \ + ../../data/masks/train \ + ../local-cv-predictions/TRAIN_DEEPLABXYFlip/o0/i3_resnext101.pkl \ + --gn \ + --outer-fold 0 --outer-only \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 2 + diff --git a/segment/scripts/TEST_SEGMENT.sh b/segment/scripts/TEST_SEGMENT.sh new file mode 100644 index 0000000..8511982 --- /dev/null +++ b/segment/scripts/TEST_SEGMENT.sh @@ -0,0 +1,39 @@ +python TestDeepLabSnapshot.py resnet50_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i5o0/resnet50 \ + ../../data/pngs/train \ + ../../data/masks/train \ + ../local-cv-predictions/TRAIN_SEGMENTFlip/o0/i5_resnet50.pkl \ + --gn \ + --outer-fold 0 --outer-only \ + --batch-size 1 --imsize-x 1280 --imsize-y 1280 \ + --gpu 2 + +python TestDeepLabSnapshot.py resnet101_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i6o0/resnet101 \ + ../../data/pngs/train \ + ../../data/masks/train \ + ../local-cv-predictions/TRAIN_SEGMENTFlip/o0/i6_resnet101.pkl \ + --gn \ + --outer-fold 0 --outer-only \ + --batch-size 1 --imsize-x 1280 --imsize-y 1280 \ + --gpu 2 + +python TestDeepLabSnapshot.py resnext50_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i7o0/resnext50 \ + ../../data/pngs/train \ + ../../data/masks/train \ + ../local-cv-predictions/TRAIN_SEGMENTFlip/o0/i7_resnext50.pkl \ + --gn \ + --outer-fold 0 --outer-only \ + --batch-size 1 --imsize-x 1280 --imsize-y 1280 \ + --gpu 2 + +python TestDeepLabSnapshot.py resnext101_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i8o0/resnext101 \ + ../../data/pngs/train \ + ../../data/masks/train \ + ../local-cv-predictions/TRAIN_SEGMENTFlip/o0/i8_resnext101.pkl \ + --gn \ + --outer-fold 0 --outer-only \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 2 \ No newline at end of file diff --git a/segment/scripts/TEST_V100.sh b/segment/scripts/TEST_V100.sh new file mode 100644 index 0000000..f3a36a4 --- /dev/null +++ b/segment/scripts/TEST_V100.sh @@ -0,0 +1,43 @@ +python TestDeepLabSnapshot.py resnet50_gn_ws \ + ../checkpoints/TRAIN_V100/i0o0/resnet50 \ + ../../data/pngs/train \ + ../../data/masks/train \ + ../local-cv-predictions/TRAIN_V100Flip/o0/i0_resnet50.csv \ + --gn \ + --class-mode \ + --outer-fold 0 --outer-only \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 3 + +python TestDeepLabSnapshot.py resnet101_gn_ws \ + ../checkpoints/TRAIN_V100/i1o0/resnet101 \ + ../../data/pngs/train \ + ../../data/masks/train \ + ../local-cv-predictions/TRAIN_V100Flip/o0/i1_resnet101.csv \ + --gn \ + --class-mode \ + --outer-fold 0 --outer-only \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 3 + +python TestDeepLabSnapshot.py resnext50_gn_ws \ + ../checkpoints/TRAIN_V100/i2o0/resnext50 \ + ../../data/pngs/train \ + ../../data/masks/train \ + ../local-cv-predictions/TRAIN_V100Flip/o0/i2_resnext50.csv \ + --gn \ + --class-mode \ + --outer-fold 0 --outer-only \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --gpu 3 + +python TestDeepLabSnapshot.py resnext101_gn_ws \ + ../checkpoints/TRAIN_V100/i3o0/resnext101 \ + ../../data/pngs/train \ + ../../data/masks/train \ + ../local-cv-predictions/TRAIN_V100Flip/o0/i3_resnext101.csv \ + --gn \ + --class-mode \ + --outer-fold 0 --outer-only \ + --batch-size 1 --imsize-x 960 --imsize-y 960 \ + --gpu 3 \ No newline at end of file diff --git a/segment/scripts/TRAIN_DEEPLABXY.sh b/segment/scripts/TRAIN_DEEPLABXY.sh new file mode 100644 index 0000000..ae93d9f --- /dev/null +++ b/segment/scripts/TRAIN_DEEPLABXY.sh @@ -0,0 +1,51 @@ +python TrainDeepLabV2.py resnet50_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i0o0/resnext101 \ + ../../data/pngs/train \ + ../../data/masks/train \ + --loss weighted_bce --pos-frac 0.1 --neg-frac 7.9 --gn \ + --inner-fold 0 --outer-fold 0 \ + --thresholds 0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95 \ + --grad-accum 4 \ + --batch-size 4 --imsize-x 1024 --imsize-y 1024 \ + --cosine-anneal --total-epochs 80 --num-snapshots 4 \ + --optimizer sgd --initial-lr 1e-2 --momentum 0.9 --eta-min 1e-4 \ + --save_best --gpu 0 --seed 86 --verbosity 20 + +python TrainDeepLabV2.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i1o0/resnext101 \ + ../../data/pngs/train \ + ../../data/masks/train \ + --loss weighted_bce --pos-frac 0.1 --neg-frac 7.9 --gn \ + --inner-fold 1 --outer-fold 0 \ + --thresholds 0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95 \ + --grad-accum 4 \ + --batch-size 4 --imsize-x 1024 --imsize-y 1024 \ + --cosine-anneal --total-epochs 80 --num-snapshots 4 \ + --optimizer sgd --initial-lr 1e-2 --momentum 0.9 --eta-min 1e-4 \ + --save_best --gpu 0 --seed 87 + +python TrainDeepLabV2.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i2o0/resnext101 \ + ../../data/pngs/train \ + ../../data/masks/train \ + --loss weighted_bce --pos-frac 0.1 --neg-frac 7.9 --gn \ + --inner-fold 2 --outer-fold 0 \ + --thresholds 0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95 \ + --grad-accum 4 \ + --batch-size 4 --imsize-x 1024 --imsize-y 1024 \ + --cosine-anneal --total-epochs 80 --num-snapshots 4 \ + --optimizer sgd --initial-lr 1e-2 --momentum 0.9 --eta-min 1e-4 \ + --save_best --gpu 0 --seed 88 + +python TrainDeepLabV2.py resnext101_gn_ws \ + ../checkpoints/TRAIN_DEEPLABXY/i3o0/resnext101 \ + ../../data/pngs/train \ + ../../data/masks/train \ + --loss weighted_bce --pos-frac 0.1 --neg-frac 7.9 --gn \ + --inner-fold 3 --outer-fold 0 \ + --thresholds 0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95 \ + --grad-accum 4 \ + --batch-size 4 --imsize-x 1024 --imsize-y 1024 \ + --cosine-anneal --total-epochs 80 --num-snapshots 4 \ + --optimizer sgd --initial-lr 1e-2 --momentum 0.9 --eta-min 1e-4 \ + --save_best --gpu 0 --seed 89 \ No newline at end of file diff --git a/segment/scripts/TRAIN_SEGMENT.sh b/segment/scripts/TRAIN_SEGMENT.sh new file mode 100644 index 0000000..38d8e9b --- /dev/null +++ b/segment/scripts/TRAIN_SEGMENT.sh @@ -0,0 +1,51 @@ +python TrainDeepLab.py resnet50_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i5o0/resnet50 \ + ../../data/pngs/train \ + ../../data/masks/train \ + --pos-only --loss soft_dice --gn \ + --inner-fold 5 --outer-fold 0 \ + --thresholds 0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99 \ + --grad-accum 16 \ + --batch-size 1 --imsize-x 1280 --imsize-y 1280 \ + --cosine-anneal --total-epochs 100 --num-snapshots 5 \ + --optimizer sgd --initial-lr 1e-2 --momentum 0.9 --eta-min 1e-4 \ + --save_best --gpu 0 --verbosity 50 + +python TrainDeepLab.py resnet101_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i6o0/resnet101 \ + ../../data/pngs/train \ + ../../data/masks/train \ + --pos-only --loss soft_dice --gn \ + --inner-fold 1 --outer-fold 0 \ + --thresholds 0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99 \ + --grad-accum 16 \ + --batch-size 1 --imsize-x 1280 --imsize-y 1280 \ + --cosine-anneal --total-epochs 100 --num-snapshots 5 \ + --optimizer sgd --initial-lr 1e-2 --momentum 0.9 --eta-min 1e-4 \ + --save_best --gpu 1 --verbosity 50 + +python TrainDeepLab.py resnext50_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i7o0/resnext50 \ + ../../data/pngs/train \ + ../../data/masks/train \ + --pos-only --loss soft_dice --gn \ + --inner-fold 7 --outer-fold 0 \ + --thresholds 0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99 \ + --grad-accum 16 \ + --batch-size 1 --imsize-x 1280 --imsize-y 1280 \ + --cosine-anneal --total-epochs 100 --num-snapshots 5 \ + --optimizer sgd --initial-lr 1e-2 --momentum 0.9 --eta-min 1e-4 \ + --save_best --gpu 2 --verbosity 50 + +python TrainDeepLab.py resnext101_gn_ws \ + ../checkpoints/TRAIN_SEGMENT/i8o0/resnext101 \ + ../../data/pngs/train \ + ../../data/masks/train \ + --pos-only --loss soft_dice --gn \ + --inner-fold 8 --outer-fold 0 \ + --thresholds 0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99 \ + --grad-accum 16 \ + --batch-size 1 --imsize-x 1024 --imsize-y 1024 \ + --cosine-anneal --total-epochs 100 --num-snapshots 5 \ + --optimizer sgd --initial-lr 1e-2 --momentum 0.9 --eta-min 1e-4 \ + --save_best --gpu 3 --verbosity 50 \ No newline at end of file diff --git a/segment/scripts/TRAIN_V100.sh b/segment/scripts/TRAIN_V100.sh new file mode 100644 index 0000000..6b56dfb --- /dev/null +++ b/segment/scripts/TRAIN_V100.sh @@ -0,0 +1,51 @@ +python TrainDeepLab.py resnet50_gn_ws \ + ../checkpoints/TRAIN_V100/i0o0/resnet50 \ + ../../data/pngs/train \ + ../../data/masks/train \ + --loss weighted_bce --pos-frac 0.1 --neg-frac 4.9 --gn \ + --inner-fold 0 --outer-fold 0 \ + --thresholds 0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99 \ + --grad-accum 4 \ + --batch-size 4 --imsize-x 1024 --imsize-y 1024 \ + --cosine-anneal --total-epochs 100 --num-snapshots 5 \ + --optimizer sgd --initial-lr 1e-2 --momentum 0.9 --eta-min 1e-4 \ + --save_best --gpu 0 + +python TrainDeepLab.py resnet101_gn_ws \ + ../checkpoints/TRAIN_V100/i1o0/resnet101 \ + ../../data/pngs/train \ + ../../data/masks/train \ + --loss weighted_bce --pos-frac 0.1 --neg-frac 4.9 --gn \ + --inner-fold 1 --outer-fold 0 \ + --thresholds 0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99 \ + --grad-accum 4 \ + --batch-size 4 --imsize-x 1024 --imsize-y 1024 \ + --cosine-anneal --total-epochs 100 --num-snapshots 5 \ + --optimizer sgd --initial-lr 1e-2 --momentum 0.9 --eta-min 1e-4 \ + --save_best --gpu 1 + +python TrainDeepLab.py resnext50_gn_ws \ + ../checkpoints/TRAIN_V100/i2o0/resnext50 \ + ../../data/pngs/train \ + ../../data/masks/train \ + --loss weighted_bce --pos-frac 0.1 --neg-frac 4.9 --gn \ + --inner-fold 2 --outer-fold 0 \ + --thresholds 0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99 \ + --grad-accum 4 \ + --batch-size 4 --imsize-x 1024 --imsize-y 1024 \ + --cosine-anneal --total-epochs 100 --num-snapshots 5 \ + --optimizer sgd --initial-lr 1e-2 --momentum 0.9 --eta-min 1e-4 \ + --save_best --gpu 2 + +python TrainDeepLab.py resnext101_gn_ws \ + ../checkpoints/TRAIN_V100/i3o0/resnext101 \ + ../../data/pngs/train \ + ../../data/masks/train \ + --loss weighted_bce --pos-frac 0.1 --neg-frac 4.9 --gn \ + --inner-fold 3 --outer-fold 0 \ + --thresholds 0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99 \ + --grad-accum 4 \ + --batch-size 4 --imsize-x 960 --imsize-y 960 \ + --cosine-anneal --total-epochs 100 --num-snapshots 5 \ + --optimizer sgd --initial-lr 1e-2 --momentum 0.9 --eta-min 1e-4 \ + --save_best --gpu 3 \ No newline at end of file diff --git a/segment/scripts/TestDeepLabSnapshot.py b/segment/scripts/TestDeepLabSnapshot.py new file mode 100644 index 0000000..c93a04d --- /dev/null +++ b/segment/scripts/TestDeepLabSnapshot.py @@ -0,0 +1,228 @@ +import sys ; sys.path.insert(0, '..') ; sys.path.insert(0, '../..') + +from peepdom.deeplab import DeepLab + +from reproducibility import set_reproducibility + +from data.loader import XrayMaskDataset +import pickle + +from tqdm import tqdm +import torch +from torch import optim +from torch import nn +import adabound + +import argparse +import pandas as pd +import numpy as np +import glob, os + +from utils.aug import simple_aug, resize_aug, pad_image +from utils.helper import LossTracker, preprocess_input + +from torch.utils.data import DataLoader +from functools import partial + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('model', type=str) + parser.add_argument('model_folder', type=str, help="Path to folder containing snapsnot ensemble models.") + parser.add_argument('data_dir', type=str, help="Directory to load image data from.") + parser.add_argument('mask_dir', type=str, help="Directory to load mask data from.") + parser.add_argument('save_file', type=str) + parser.add_argument('--class-mode', action='store_true') + parser.add_argument('--inner-fold', type=int, default=0) + parser.add_argument('--outer-fold', type=int, default=0) + parser.add_argument('--outer-only', action='store_true') + parser.add_argument('--gpu', type=int, default=0) + parser.add_argument('--labels-df', type=str, default='../../data/train_labels_with_splits.csv') + parser.add_argument('--imsize-x', type=int, default=384) + parser.add_argument('--imsize-y', type=int, default=384) + parser.add_argument('--imratio', type=float, default=1) + parser.add_argument('--batch-size', type=int, default=16) + parser.add_argument('--tta', action='store_true', help='Enable test-time augmentation') + parser.add_argument('--dropout-p', type=float, default=0.2) + parser.add_argument('--gn', action='store_true') + parser.add_argument('--output-stride', type=int, default=16) + parser.add_argument('--verbosity', type=int, default=100) + parser.add_argument('--num-workers', type=int, default=1) + parser.add_argument('--seed', type=int, default=88) + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + set_reproducibility(args.seed) + + resize_me = resize_aug(imsize_x=args.imsize_x, imsize_y=args.imsize_y) + pad_func = partial(pad_image, ratio=args.imratio) + + print ("Testing the PNEUMOTHORAX SEGMENTATION model...") + + torch.cuda.set_device(args.gpu) ; torch.backends.cudnn.benchmark = True + + if not os.path.exists(os.path.dirname(args.save_file)): + os.makedirs(os.path.dirname(args.save_file)) + + print("Reading labels from {}".format(args.labels_df)) + + df = pd.read_csv(args.labels_df) + + if args.outer_only: + # Test on outer fold + test_df = df[df['outer'] == args.outer_fold] + else: + # Get rid of outer fold test set + df = df[df['outer'] != args.outer_fold] + assert np.sum(df['inner{}'.format(args.outer_fold)] == 888) == 0 + test_df = df[df['inner{}'.format(args.outer_fold)] == args.inner_fold] + + print ('TEST: n={}'.format(len(test_df))) + + print("Reading images from directory {}".format(args.data_dir)) + test_images = [os.path.join(args.data_dir, '{}.png'.format(_)) for _ in test_df['sop']] + test_masks = [os.path.join(args.mask_dir, '{}.png'.format(_)) for _ in test_df['sop']] + test_labels = list(test_df['ptx_binary']) + test_sops = list(test_df['sop']) + num_classes = 2 + + # Get models in snapshot ensemble + snapshots = glob.glob(os.path.join(args.model_folder, '*.pth')) + + num_snapshots = 3 + weights = np.asarray([3.,1.,1.]) + weights = weights / np.sum(weights) + # Pick best 3 models, then weight based on Kaggle metric: 3, 1, 1 + # This assumes a certain formatting of the checkpoint file name + # in order to extract the Kaggle metric + if args.class_mode: + def extract_kag(ckpt): + ckpt = ckpt.split('/')[-1] + _kag = ckpt.split('_')[4] + _kag = _kag.split('-')[-1] + return float(_kag) + else: + def extract_kag(ckpt): + ckpt = ckpt.split('/')[-1] + _kag = ckpt.split('_')[2] + _kag = _kag.split('-')[-1] + return float(_kag) + + snapshot_kags = [extract_kag(_) for _ in snapshots] + kag_order = np.argsort(snapshot_kags)[::-1][:num_snapshots] + snapshots = list(np.asarray(snapshots)[kag_order]) + + def load_model(ckpt): + model = DeepLab(args.model, args.output_stride, args.gn, classifier=False) + model.load_state_dict(torch.load(ckpt)) + model = model.cuda() + model.eval() + return model + + # Get models + print ('Loading checkpoints ...') + model_list = [] + for ss in snapshots: + model_list.append(load_model(ss)) + + # Set up preprocessing function with model + ppi = partial(preprocess_input, model=model_list[0]) + + print ('Setting up data loaders ...') + + params = {'batch_size': 1 if args.tta else args.batch_size, + 'shuffle': False, + 'num_workers': args.num_workers} + + test_set = XrayMaskDataset(imgfiles=test_images, + maskfiles=test_masks, + dicom=False, + labels=test_labels, + preprocess=ppi, + pad=pad_func, + crop=None, + resize=resize_me, + test_mode=True) + test_gen = DataLoader(test_set, **params) + + # Test + def get_test_predictions(mod): + with torch.no_grad(): + list_of_pred_dicts = [] + for data in tqdm(test_gen, total=len(test_gen)): + pred_dict = {} + if args.tta: + # should be batch size = 1 + batch, masks, classes = data + batch = batch[0] + output = mod(batch.cuda()) + pred_dict['pred_mask'] = torch.softmax(output, dim=1).cpu().numpy()[:,1] + pred_dict['gt_mask'] = masks.cpu().numpy().astype('uint8') + pred_dict['y_true'] = classes.cpu().numpy() + else: + batch, masks, classes = data + output = mod(batch.cuda()) + output_flipped = mod(torch.flip(batch, dims=(-1,)).cuda()) + output_flipped = torch.flip(output_flipped, dims=(-1,)) + pred_dict['pred_mask'] = (torch.softmax(output, dim=1).cpu().numpy()[:,1] + torch.softmax(output_flipped, dim=1).cpu().numpy()[:,1]) / 2. + pred_dict['gt_mask'] = masks.cpu().numpy().astype('uint8') + pred_dict['y_true'] = classes.cpu().numpy() + list_of_pred_dicts.append(pred_dict) + return list_of_pred_dicts + + y_pred_list = [] + for model in tqdm(model_list, total=len(model_list)): + tmp_y_pred = get_test_predictions(model) + y_pred_list.append(tmp_y_pred) + + # Need to average predictions across models + for each_indiv_pred in range(len(y_pred_list[0])): + indiv_pred = np.zeros_like(y_pred_list[0][each_indiv_pred]['pred_mask']) + for each_model_pred in range(len(y_pred_list)): + indiv_pred += weights[each_model_pred]*y_pred_list[each_model_pred][each_indiv_pred]['pred_mask'] + #indiv_pred /= float(len(y_pred_list)) + assert np.min(indiv_pred) >= 0 and np.max(indiv_pred) <= 1 + y_pred_list[0][each_indiv_pred]['pred_mask'] = (indiv_pred * 100).astype('uint8') + + def get_top_X(segmentation, tops=[0,0.5,1.0,2.5,5.0]): + # Assumes segmentation.shape is (1, H, W) + assert segmentation.shape[0] == 1 + scores = [] + segmentation = segmentation.reshape(segmentation.shape[0], -1).astype('int8') + segmentation = -np.sort(-segmentation, axis=1) + for t in tops: + size = int(t / 100. * np.prod(segmentation.shape)) if t > 0 else 1 + scores.append(np.mean(segmentation[:,:size]) / 100.) + return scores + + if args.class_mode: + # Turn segmentation output into class scores + tops = [0,0.5,1.0,2.5,5.0] + class_scores = [] + for i in range(len(y_pred_list[0])): + class_scores.append(get_top_X(y_pred_list[0][i]['pred_mask'], tops)) + # Make a DataFrame + class_scores = np.vstack(class_scores) + class_scores = pd.DataFrame(class_scores) + class_scores.columns = ['Top{}'.format(t) for t in tops] + class_scores['y_true'] = [_['y_true'][0] for _ in y_pred_list[0]] + class_scores['sop'] = test_sops + class_scores.to_csv(args.save_file, index=False) + else: + + y_pred_to_pickle = y_pred_list[0] + y_pred_to_pickle = {test_sops[_] : y_pred_to_pickle[_] for _ in range(len(test_sops))} + + with open(args.save_file, 'wb') as f: + pickle.dump(y_pred_to_pickle, f) + +if __name__ == '__main__': + main() + + + diff --git a/segment/scripts/TestDeepLabSnapshotV3.py b/segment/scripts/TestDeepLabSnapshotV3.py new file mode 100644 index 0000000..741cca8 --- /dev/null +++ b/segment/scripts/TestDeepLabSnapshotV3.py @@ -0,0 +1,242 @@ +import sys ; sys.path.insert(0, '..') ; sys.path.insert(0, '../..') + +from peepdom.deeplab_jpu import DeepLab + +from reproducibility import set_reproducibility + +from data.loader import XrayMaskDataset +#import loss.lovasz_losses as LL +from loss.other_losses import * +import pickle + +from tqdm import tqdm +import torch +from torch import optim +from torch import nn +import adabound + +from model.train import EqualTrainerV2 + +import argparse +import pandas as pd +import numpy as np +import glob, os + +from utils.aug import simple_aug, resize_aug, pad_image +from utils.helper import LossTracker, preprocess_input + +from torch.utils.data import DataLoader +from functools import partial + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('model', type=str) + parser.add_argument('model_folder', type=str, help="Path to folder containing snapsnot ensemble models.") + parser.add_argument('data_dir', type=str, help="Directory to load image data from.") + parser.add_argument('mask_dir', type=str, help="Directory to load mask data from.") + parser.add_argument('save_file', type=str) + parser.add_argument('--class-mode', action='store_true') + parser.add_argument('--pos-only', action='store_true') + parser.add_argument('--num-snapshots', type=int, default=3) + parser.add_argument('--ss-weights', type=lambda s: [float(_) for _ in s.split(',')], default=[3.,1.,1.]) + parser.add_argument('--inner-fold', type=int, default=0) + parser.add_argument('--outer-fold', type=int, default=0) + parser.add_argument('--outer-only', action='store_true') + parser.add_argument('--no-maxpool', action='store_true') + parser.add_argument('--center', type=str, default='aspp') + parser.add_argument('--jpu', action='store_true') + parser.add_argument('--gpu', type=int, default=0) + parser.add_argument('--labels-df', type=str, default='../../data/train_labels_with_splits.csv') + parser.add_argument('--imsize-x', type=int, default=384) + parser.add_argument('--imsize-y', type=int, default=384) + parser.add_argument('--imratio', type=float, default=1) + parser.add_argument('--batch-size', type=int, default=16) + parser.add_argument('--tta', action='store_true', help='Enable test-time augmentation') + parser.add_argument('--dropout-p', type=float, default=0.2) + parser.add_argument('--gn', action='store_true') + parser.add_argument('--output-stride', type=int, default=16) + parser.add_argument('--verbosity', type=int, default=100) + parser.add_argument('--num-workers', type=int, default=1) + parser.add_argument('--seed', type=int, default=88) + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + set_reproducibility(args.seed) + + resize_me = resize_aug(imsize_x=args.imsize_x, imsize_y=args.imsize_y) + pad_func = partial(pad_image, ratio=args.imratio) + + print ("Testing the PNEUMOTHORAX SEGMENTATION model...") + + torch.cuda.set_device(args.gpu) ; torch.backends.cudnn.benchmark = True + + if not os.path.exists(os.path.dirname(args.save_file)): + os.makedirs(os.path.dirname(args.save_file)) + + print("Reading labels from {}".format(args.labels_df)) + + df = pd.read_csv(args.labels_df) + + if args.outer_only: + # Test on outer fold + test_df = df[df['outer'] == args.outer_fold] + else: + # Get rid of outer fold test set + df = df[df['outer'] != args.outer_fold] + assert np.sum(df['inner{}'.format(args.outer_fold)] == 888) == 0 + test_df = df[df['inner{}'.format(args.outer_fold)] == args.inner_fold] + + print ('TEST: n={}'.format(len(test_df))) + + print("Reading images from directory {}".format(args.data_dir)) + test_images = [os.path.join(args.data_dir, '{}.png'.format(_)) for _ in test_df['sop']] + test_masks = [os.path.join(args.mask_dir, '{}.png'.format(_)) for _ in test_df['sop']] + test_labels = list(test_df['ptx_binary']) + test_sops = list(test_df['sop']) + num_classes = 2 + + # Get models in snapshot ensemble + snapshots = glob.glob(os.path.join(args.model_folder, '*.pth')) + + num_snapshots = args.num_snapshots + snapshot_weights = args.ss_weights + # Pick best 3 models, then weight based on Kaggle metric: 3, 1, 1 + # This assumes a certain formatting of the checkpoint file name + # in order to extract the Kaggle metric + if args.class_mode: + def extract_kag(ckpt): + ckpt = ckpt.split('/')[-1] + _kag = ckpt.split('_')[4] + _kag = _kag.split('-')[-1] + return float(_kag) + elif args.pos_only: + def extract_kag(ckpt): + ckpt = ckpt.split('/')[-1] + _kag = ckpt.split('_')[2] + _kag = _kag.split('-')[-1] + return float(_kag) + else: + def extract_kag(ckpt): + ckpt = ckpt.split('/')[-1] + _kag = ckpt.split('_')[6] + _kag = _kag.split('-')[-1] + return float(_kag) + + snapshot_kags = [extract_kag(_) for _ in snapshots] + kag_order = np.argsort(snapshot_kags)[::-1][:num_snapshots] + snapshots = list(np.asarray(snapshots)[kag_order]) + + def load_model(ckpt): + model = DeepLab(args.model, args.output_stride, args.gn, center=args.center, jpu=args.jpu, use_maxpool=not args.no_maxpool) + model.load_state_dict(torch.load(ckpt)) + model = model.cuda() + model.eval() + return model + + # Get models + print ('Loading checkpoints ...') + model_list = [] + for ss in snapshots: + model_list.append(load_model(ss)) + + # Set up preprocessing function with model + ppi = partial(preprocess_input, model=model_list[0]) + + print ('Setting up data loaders ...') + + params = {'batch_size': 1 if args.tta else args.batch_size, + 'shuffle': False, + 'num_workers': args.num_workers} + + test_set = XrayMaskDataset(imgfiles=test_images, + maskfiles=test_masks, + dicom=False, + labels=test_labels, + preprocess=ppi, + pad=pad_func, + crop=None, + resize=resize_me, + test_mode=True) + test_gen = DataLoader(test_set, **params) + + # Test + def get_test_predictions(mod): + with torch.no_grad(): + list_of_pred_dicts = [] + for data in tqdm(test_gen, total=len(test_gen)): + pred_dict = {} + if args.tta: + # should be batch size = 1 + batch, masks, classes = data + batch = batch[0] + output = mod(batch.cuda()) + pred_dict['pred_mask'] = torch.softmax(output, dim=1).cpu().numpy()[:,1] + pred_dict['gt_mask'] = masks.cpu().numpy().astype('uint8') + pred_dict['y_true'] = classes.cpu().numpy() + else: + batch, masks, classes = data + output = mod(batch.cuda()) + output_flipped = mod(torch.flip(batch, dims=(-1,)).cuda()) + output_flipped = torch.flip(output_flipped, dims=(-1,)) + pred_dict['pred_mask'] = (torch.softmax(output, dim=1).cpu().numpy()[:,1] + torch.softmax(output_flipped, dim=1).cpu().numpy()[:,1]) / 2. + pred_dict['gt_mask'] = masks.cpu().numpy().astype('uint8') + pred_dict['y_true'] = classes.cpu().numpy() + list_of_pred_dicts.append(pred_dict) + return list_of_pred_dicts + + y_pred_list = [] + for model in tqdm(model_list, total=len(model_list)): + tmp_y_pred = get_test_predictions(model) + y_pred_list.append(tmp_y_pred) + + # Need to average predictions across models + for each_indiv_pred in range(len(y_pred_list[0])): + indiv_pred = np.zeros_like(y_pred_list[0][each_indiv_pred]['pred_mask']) + for each_model_pred in range(len(y_pred_list)): + indiv_pred += snapshot_weights[each_model_pred]*y_pred_list[each_model_pred][each_indiv_pred]['pred_mask'] + indiv_pred /= float(np.sum(snapshot_weights)) + assert np.min(indiv_pred) >= 0 and np.max(indiv_pred) <= 1 + y_pred_list[0][each_indiv_pred]['pred_mask'] = (indiv_pred * 100).astype('uint8') + + def get_top_X(segmentation, tops=[0,0.5,1.0,2.5,5.0]): + # Assumes segmentation.shape is (1, H, W) + assert segmentation.shape[0] == 1 + scores = [] + segmentation = segmentation.reshape(segmentation.shape[0], -1).astype('int8') + segmentation = -np.sort(-segmentation, axis=1) + for t in tops: + size = int(t / 100. * np.prod(segmentation.shape)) if t > 0 else 1 + scores.append(np.mean(segmentation[:,:size]) / 100.) + return scores + + if args.class_mode: + # Turn segmentation output into class scores + tops = [0,0.5,1.0,2.5,5.0] + class_scores = [] + for i in range(len(y_pred_list[0])): + class_scores.append(get_top_X(y_pred_list[0][i]['pred_mask'], tops)) + # Make a DataFrame + class_scores = np.vstack(class_scores) + class_scores = pd.DataFrame(class_scores) + class_scores.columns = ['Top{}'.format(t) for t in tops] + class_scores['y_true'] = [_['y_true'][0] for _ in y_pred_list[0]] + class_scores['sop'] = test_sops + class_scores.to_csv(args.save_file, index=False) + else: + y_pred_to_pickle = y_pred_list[0] + y_pred_to_pickle = {test_sops[_] : y_pred_to_pickle[_] for _ in range(len(test_sops))} + + with open(args.save_file, 'wb') as f: + pickle.dump(y_pred_to_pickle, f) + +if __name__ == '__main__': + main() + + + diff --git a/segment/scripts/TrainDeepLab.py b/segment/scripts/TrainDeepLab.py new file mode 100644 index 0000000..6211cab --- /dev/null +++ b/segment/scripts/TrainDeepLab.py @@ -0,0 +1,332 @@ +import sys ; sys.path.insert(0, '..') ; sys.path.insert(0, '../..') ; sys.path.insert(0, '../../apex/') + +from peepdom.deeplab import DeepLab + +from reproducibility import set_reproducibility + +from data.loader import XrayMaskDataset, RatioSampler +#import loss.lovasz_losses as LL +from loss.other_losses import * + +import torch +from torch import optim +from torch.optim import Optimizer +from torch import nn +import adabound + +from model.train import Trainer, AllTrainer, BalancedTrainer + +import argparse +import pandas as pd +import numpy as np +import os + +try: + from apex import amp + APEX_AVAILABLE = True +except ModuleNotFoundError: + APEX_AVAILABLE = False + +from utils.aug import simple_aug, resize_aug, pad_image +from utils.helper import LossTracker, preprocess_input + +from torch.utils.data import DataLoader +from functools import partial + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('model', type=str) + parser.add_argument('save_dir', type=str, help="Directory to save the trained model.") + parser.add_argument('data_dir', type=str, help="Directory to load image data from.") + parser.add_argument('mask_dir', type=str, help="Directory to load mask data from.") + parser.add_argument('--load-model', type=str, default='') + parser.add_argument('--inner-fold', type=int, default=0) + parser.add_argument('--outer-fold', type=int, default=0) + parser.add_argument('--outer-only', action='store_true') + parser.add_argument('--pos-only', action='store_true') + parser.add_argument('--pos-neg-ratio', type=int, default=0) + parser.add_argument('--num-samples', type=int, default=2000) + parser.add_argument('--no-maxpool', action='store_true') + parser.add_argument('--balanced', action='store_true') + parser.add_argument('--invert', action='store_true') + parser.add_argument('--mixed', action='store_true') + parser.add_argument('--center', type=str, default='aspp') + parser.add_argument('--jpu', action='store_true') + parser.add_argument('--gpu', type=int, default=0) + parser.add_argument('--loss', type=str, default='lovasz_softmax') + # for WeightedBCE + parser.add_argument('--pos-frac', type=float, default=0.25) + parser.add_argument('--neg-frac', type=float, default=0.75) + parser.add_argument('--grad-accum', type=float, default=0) + parser.add_argument('--log-dampened', action='store_true') + parser.add_argument('--labels-df', type=str, default='../../data/train_labels_with_splits.csv') + parser.add_argument('--imsize-x', type=int, default=512) + parser.add_argument('--imsize-y', type=int, default=512) + parser.add_argument('--imratio', type=float, default=1) + parser.add_argument('--batch-size', type=int, default=4) + parser.add_argument('--augment-p', type=float, default=0.5) + parser.add_argument('--dropout-p', type=float, default=0.2) + parser.add_argument('--steps-per-epoch', type=int, default=0) + parser.add_argument('--train-head', type=int, default=0) + parser.add_argument('--gn', action='store_true') + parser.add_argument('--output-stride', type=int, default=16) + parser.add_argument('--thresholds', type=lambda s: [float(_) for _ in s.split(',')], default=[0.1], help='Thresholds to evaluate during validation for Dice score') + # CosineAnnealingWarmRestarts + parser.add_argument('--cosine-anneal', action='store_true') + parser.add_argument('--total-epochs', type=int, default=50) + parser.add_argument('--num-snapshots', type=int, default=5) + parser.add_argument('--eta-min', type=float, default=1e-8) + # Optimization + parser.add_argument('--optimizer', type=str, default='adam') + parser.add_argument('--initial-lr', type=float, default=1e-4) + parser.add_argument('--weight-decay', type=float, default=1e-5) + # For SGD + parser.add_argument('--momentum', type=float, default=0.9) + parser.add_argument('--nesterov', action='store_true') + # For AdaBound + parser.add_argument('--final-lr-scale', type=float, default=100.) + parser.add_argument('--gamma', type=float, default=1e-3) + + parser.add_argument('--lr-patience', type=int, default=2) + parser.add_argument('--stop-patience', type=int, default=10) + parser.add_argument('--annealing-factor', type=float, default=0.5) + parser.add_argument('--min-delta', type=float, default=1e-3) + # + parser.add_argument('--verbosity', type=int, default=100) + parser.add_argument('--num-workers', type=int, default=1) + parser.add_argument('--seed', type=int, default=88) + parser.add_argument('--save_best', action='store_true', help='Only store the best model.') + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + set_reproducibility(args.seed) + + train_aug = simple_aug(p=args.augment_p) + resize_me = resize_aug(imsize_x=args.imsize_x, imsize_y=args.imsize_y) + pad_func = partial(pad_image, ratio=args.imratio) + + print ("Training the PNEUMOTHORAX SEGMENTATION model...") + + torch.cuda.set_device(args.gpu) ; torch.backends.cudnn.benchmark = True + + if not os.path.exists(args.save_dir): + os.makedirs(args.save_dir) + + print("Saving model to {}".format(args.save_dir)) + print("Reading labels from {}".format(args.labels_df)) + + df = pd.read_csv(args.labels_df) + if args.pos_only: + df = df[df['ptx_binary'] == 1] + + if args.outer_only: + # We may want to only use outer splits + train_df = df[df['outer'] != args.outer_fold] + valid_df = df[df['outer'] == args.outer_fold] + else: + # Get rid of outer fold test set + df = df[df['outer'] != args.outer_fold] + assert np.sum(df['inner{}'.format(args.outer_fold)] == 888) == 0 + train_df = df[df['inner{}'.format(args.outer_fold)] != args.inner_fold] + valid_df = df[df['inner{}'.format(args.outer_fold)] == args.inner_fold] + + print ('TRAIN: n={}'.format(len(train_df))) + print ('% PTX: {:.1f}'.format(np.mean(train_df['ptx_binary'])*100)) + print ('VALID: n={}'.format(len(valid_df))) + print ('% PTX: {:.1f}'.format(np.mean(valid_df['ptx_binary'])*100)) + + print("Reading images from directory {}".format(args.data_dir)) + train_images = [os.path.join(args.data_dir, '{}.png'.format(_)) for i, _ in enumerate(train_df['sop'])] + pos_train_images = [os.path.join(args.data_dir, '{}.png'.format(_)) for i, _ in enumerate(train_df['sop']) if train_df['ptx_binary'].iloc[i] == 1] + neg_train_images = [os.path.join(args.data_dir, '{}.png'.format(_)) for i, _ in enumerate(train_df['sop']) if train_df['ptx_binary'].iloc[i] == 0] + train_labels = list(train_df['ptx_binary']) + + valid_images = [os.path.join(args.data_dir, '{}.png'.format(_)) for _ in valid_df['sop']] + valid_labels = list(valid_df['ptx_binary']) + + print("Reading masks from directory {}".format(args.mask_dir)) + train_masks = [os.path.join(args.mask_dir, '{}.png'.format(_)) for i, _ in enumerate(train_df['sop'])] + pos_train_masks = [os.path.join(args.mask_dir, '{}.png'.format(_)) for i, _ in enumerate(train_df['sop']) if train_df['ptx_binary'].iloc[i] == 1] + valid_masks = [os.path.join(args.mask_dir, '{}.png'.format(_)) for _ in valid_df['sop']] + + model = DeepLab(args.model, args.output_stride, args.gn, classifier=False) + if args.load_model != '': + print('Loading trained model {} ...'.format(args.load_model)) + model.load_state_dict(torch.load(args.load_model)) + model = model.cuda() + model.train() + + if args.loss == 'lovasz_softmax': + criterion = LL.LovaszSoftmax().cuda() + elif args.loss == 'soft_dice': + criterion = SoftDiceLoss().cuda() + elif args.loss == 'soft_dicev2': + criterion = SoftDiceLossV2().cuda() + elif args.loss == 'dice_bce': + criterion = DiceBCELoss().cuda() + elif args.loss == 'lovasz_hinge': + criterion = LL.LovaszHinge().cuda() + elif args.loss == 'weighted_bce': + criterion = WeightedBCE(pos_frac=args.pos_frac, neg_frac=args.neg_frac).cuda() + elif args.loss == 'weighted_bce_v2': + criterion = WeightedBCEv2().cuda() + elif args.loss == 'focal_loss': + criterion = FocalLoss().cuda() + + train_params = model.parameters() + + if args.optimizer.lower() == 'adam': + optimizer = optim.Adam(train_params, + lr=args.initial_lr, + weight_decay=args.weight_decay) + elif args.optimizer.lower() == 'sgd': + optimizer = optim.SGD(train_params, + lr=args.initial_lr, + weight_decay=args.weight_decay, + momentum=args.momentum, + nesterov=args.nesterov) + elif args.optimizer.lower() == 'adabound': + optimizer = adabound.AdaBound(train_params, + lr=args.initial_lr, + final_lr=args.initial_lr * args.final_lr_scale, + weight_decay=args.weight_decay, + gamma=args.gamma) + else: + '`{}` is not a valid optimizer .'.format(args.optimizer) + + if APEX_AVAILABLE and args.mixed: + print('Using NVIDIA Apex for mixed precision training ...') + model, optimizer = amp.initialize( + model, optimizer, opt_level="O2", + keep_batchnorm_fp32=True, loss_scale="dynamic" + ) + + if not isinstance(optimizer, Optimizer): + flag = False + try: + from apex.fp16_utils.fp16_optimizer import FP16_Optimizer + if isinstance(optimizer, FP16_Optimizer): + flag = True + except ModuleNotFoundError: + pass + if not flag: + raise TypeError('{} is not an Optimizer'.format( + type(optimizer).__name__)) + + if args.cosine_anneal: + scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, + T_0=int(args.total_epochs / args.num_snapshots), + eta_min=args.eta_min) + scheduler.T_cur = 0. + scheduler.mode = 'max' + scheduler.threshold = args.min_delta + else: + scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', + factor=args.annealing_factor, + patience=args.lr_patience, + threshold=args.min_delta, + threshold_mode='abs', + verbose=True) + + # Set up preprocessing function with model + ppi = partial(preprocess_input, model=model) + + print ('Setting up data loaders ...') + + params = {'batch_size': args.batch_size, + 'shuffle': True, + 'num_workers': args.num_workers, + 'drop_last': True} + + valid_params = {'batch_size': args.batch_size, + 'shuffle': False, + 'num_workers': args.num_workers} + + if args.balanced: + train_set = XrayEqualMaskDataset(posfiles=pos_train_images, + negfiles=neg_train_images, + maskfiles=pos_train_masks, + dicom=False, + labels=None, + preprocess=ppi, + transform=train_aug, + pad=pad_func, + resize=resize_me, + inversion=args.invert) + else: + train_set = XrayMaskDataset(imgfiles=train_images, + maskfiles=train_masks, + dicom=False, + labels=train_labels, + preprocess=ppi, + transform=train_aug, + pad=pad_func, + resize=resize_me, + inversion=args.invert) + + if args.pos_neg_ratio > 0: + params['shuffle'] = False + params['sampler'] = RatioSampler(train_set, args.num_samples, args.pos_neg_ratio) + + train_gen = DataLoader(train_set, **params) + + valid_set = XrayMaskDataset(imgfiles=valid_images, + maskfiles=valid_masks, + dicom=False, + labels=valid_labels, + preprocess=ppi, + pad=pad_func, + resize=resize_me, + test_mode=True, + inversion=args.invert) + valid_gen = DataLoader(valid_set, **valid_params) + + loss_tracker = LossTracker() + + steps_per_epoch = args.steps_per_epoch + if steps_per_epoch == 0: + if args.grad_accum == 0: + effective_batch_size = args.batch_size + elif args.grad_accum > 0: + effective_batch_size = args.batch_size * args.grad_accum + else: + raise Exception('`grad-accum` cannot be negative') + if args.balanced: + effective_batch_size *= 2 + # Hack for steps_per_epoch calculation + train_set.imgfiles = train_set.negfiles + steps_per_epoch = int(np.ceil(len(train_set.imgfiles) / effective_batch_size)) + if args.pos_neg_ratio > 0: + steps_per_epoch = int(np.ceil(args.num_samples / effective_batch_size)) + + if args.pos_only and args.balanced: + raise Exception('`pos-only` and `balanced` cannot both be specified') + + trainer_class = Trainer if args.pos_only else AllTrainer + if args.balanced: + trainer_class = BalancedTrainer + trainer = trainer_class(model, 'DeepLab', optimizer, criterion, loss_tracker, args.save_dir, args.save_best, multiclass=train_set.multiclass) + #if args.pos_neg_ratio > 0: + # trainer.track_valid_metric = 'pos_dsc' + trainer.grad_accum = args.grad_accum + if APEX_AVAILABLE and args.mixed: + trainer.use_amp = True + trainer.set_dataloaders(train_gen, valid_gen) + trainer.set_thresholds(args.thresholds) + + if args.train_head > 0: + trainer.train_head(optim.Adam(classifier.parameters()), steps_per_epoch, args.train_head) + + trainer.train(args.total_epochs, steps_per_epoch, scheduler, args.stop_patience, verbosity=args.verbosity) + +if __name__ == '__main__': + main() + + + diff --git a/segment/scripts/TrainDeepLabV2.py b/segment/scripts/TrainDeepLabV2.py new file mode 100644 index 0000000..8733556 --- /dev/null +++ b/segment/scripts/TrainDeepLabV2.py @@ -0,0 +1,332 @@ +import sys ; sys.path.insert(0, '..') ; sys.path.insert(0, '../..') ; sys.path.insert(0, '../../apex/') + +from model.deeplab_jpu import DeepLab + +from reproducibility import set_reproducibility + +from data.loader import XrayMaskDataset, RatioSampler +#import loss.lovasz_losses as LL +from loss.other_losses import * + +import torch +from torch import optim +from torch.optim import Optimizer +from torch import nn +import adabound + +from model.train import Trainer, AllTrainer, BalancedTrainer + +import argparse +import pandas as pd +import numpy as np +import os + +try: + from apex import amp + APEX_AVAILABLE = True +except ModuleNotFoundError: + APEX_AVAILABLE = False + +from utils.aug import simple_aug, resize_aug, pad_image +from utils.helper import LossTracker, preprocess_input + +from torch.utils.data import DataLoader +from functools import partial + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('model', type=str) + parser.add_argument('save_dir', type=str, help="Directory to save the trained model.") + parser.add_argument('data_dir', type=str, help="Directory to load image data from.") + parser.add_argument('mask_dir', type=str, help="Directory to load mask data from.") + parser.add_argument('--load-model', type=str, default='') + parser.add_argument('--inner-fold', type=int, default=0) + parser.add_argument('--outer-fold', type=int, default=0) + parser.add_argument('--outer-only', action='store_true') + parser.add_argument('--pos-only', action='store_true') + parser.add_argument('--pos-neg-ratio', type=int, default=0) + parser.add_argument('--num-samples', type=int, default=2000) + parser.add_argument('--no-maxpool', action='store_true') + parser.add_argument('--balanced', action='store_true') + parser.add_argument('--invert', action='store_true') + parser.add_argument('--mixed', action='store_true') + parser.add_argument('--center', type=str, default='aspp') + parser.add_argument('--jpu', action='store_true') + parser.add_argument('--gpu', type=int, default=0) + parser.add_argument('--loss', type=str, default='lovasz_softmax') + # for WeightedBCE + parser.add_argument('--pos-frac', type=float, default=0.25) + parser.add_argument('--neg-frac', type=float, default=0.75) + parser.add_argument('--grad-accum', type=float, default=0) + parser.add_argument('--log-dampened', action='store_true') + parser.add_argument('--labels-df', type=str, default='../../data/train_labels_with_splits.csv') + parser.add_argument('--imsize-x', type=int, default=512) + parser.add_argument('--imsize-y', type=int, default=512) + parser.add_argument('--imratio', type=float, default=1) + parser.add_argument('--batch-size', type=int, default=4) + parser.add_argument('--augment-p', type=float, default=0.5) + parser.add_argument('--dropout-p', type=float, default=0.2) + parser.add_argument('--steps-per-epoch', type=int, default=0) + parser.add_argument('--train-head', type=int, default=0) + parser.add_argument('--gn', action='store_true') + parser.add_argument('--output-stride', type=int, default=16) + parser.add_argument('--thresholds', type=lambda s: [float(_) for _ in s.split(',')], default=[0.1], help='Thresholds to evaluate during validation for Dice score') + # CosineAnnealingWarmRestarts + parser.add_argument('--cosine-anneal', action='store_true') + parser.add_argument('--total-epochs', type=int, default=50) + parser.add_argument('--num-snapshots', type=int, default=5) + parser.add_argument('--eta-min', type=float, default=1e-8) + # Optimization + parser.add_argument('--optimizer', type=str, default='adam') + parser.add_argument('--initial-lr', type=float, default=1e-4) + parser.add_argument('--weight-decay', type=float, default=1e-5) + # For SGD + parser.add_argument('--momentum', type=float, default=0.9) + parser.add_argument('--nesterov', action='store_true') + # For AdaBound + parser.add_argument('--final-lr-scale', type=float, default=100.) + parser.add_argument('--gamma', type=float, default=1e-3) + + parser.add_argument('--lr-patience', type=int, default=2) + parser.add_argument('--stop-patience', type=int, default=10) + parser.add_argument('--annealing-factor', type=float, default=0.5) + parser.add_argument('--min-delta', type=float, default=1e-3) + # + parser.add_argument('--verbosity', type=int, default=100) + parser.add_argument('--num-workers', type=int, default=1) + parser.add_argument('--seed', type=int, default=88) + parser.add_argument('--save_best', action='store_true', help='Only store the best model.') + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + set_reproducibility(args.seed) + + train_aug = simple_aug(p=args.augment_p) + resize_me = resize_aug(imsize_x=args.imsize_x, imsize_y=args.imsize_y) + pad_func = partial(pad_image, ratio=args.imratio) + + print ("Training the PNEUMOTHORAX SEGMENTATION model...") + + torch.cuda.set_device(args.gpu) ; torch.backends.cudnn.benchmark = True + + if not os.path.exists(args.save_dir): + os.makedirs(args.save_dir) + + print("Saving model to {}".format(args.save_dir)) + print("Reading labels from {}".format(args.labels_df)) + + df = pd.read_csv(args.labels_df) + if args.pos_only: + df = df[df['ptx_binary'] == 1] + + if args.outer_only: + # We may want to only use outer splits + train_df = df[df['outer'] != args.outer_fold] + valid_df = df[df['outer'] == args.outer_fold] + else: + # Get rid of outer fold test set + df = df[df['outer'] != args.outer_fold] + assert np.sum(df['inner{}'.format(args.outer_fold)] == 888) == 0 + train_df = df[df['inner{}'.format(args.outer_fold)] != args.inner_fold] + valid_df = df[df['inner{}'.format(args.outer_fold)] == args.inner_fold] + + print ('TRAIN: n={}'.format(len(train_df))) + print ('% PTX: {:.1f}'.format(np.mean(train_df['ptx_binary'])*100)) + print ('VALID: n={}'.format(len(valid_df))) + print ('% PTX: {:.1f}'.format(np.mean(valid_df['ptx_binary'])*100)) + + print("Reading images from directory {}".format(args.data_dir)) + train_images = [os.path.join(args.data_dir, '{}.png'.format(_)) for i, _ in enumerate(train_df['sop'])] + pos_train_images = [os.path.join(args.data_dir, '{}.png'.format(_)) for i, _ in enumerate(train_df['sop']) if train_df['ptx_binary'].iloc[i] == 1] + neg_train_images = [os.path.join(args.data_dir, '{}.png'.format(_)) for i, _ in enumerate(train_df['sop']) if train_df['ptx_binary'].iloc[i] == 0] + train_labels = list(train_df['ptx_binary']) + + valid_images = [os.path.join(args.data_dir, '{}.png'.format(_)) for _ in valid_df['sop']] + valid_labels = list(valid_df['ptx_binary']) + + print("Reading masks from directory {}".format(args.mask_dir)) + train_masks = [os.path.join(args.mask_dir, '{}.png'.format(_)) for i, _ in enumerate(train_df['sop'])] + pos_train_masks = [os.path.join(args.mask_dir, '{}.png'.format(_)) for i, _ in enumerate(train_df['sop']) if train_df['ptx_binary'].iloc[i] == 1] + valid_masks = [os.path.join(args.mask_dir, '{}.png'.format(_)) for _ in valid_df['sop']] + + model = DeepLab(args.model, args.output_stride, args.gn, center=args.center, jpu=args.jpu, use_maxpool=not args.no_maxpool) + if args.load_model != '': + print('Loading trained model {} ...'.format(args.load_model)) + model.load_state_dict(torch.load(args.load_model)) + model = model.cuda() + model.train() + + if args.loss == 'lovasz_softmax': + criterion = LL.LovaszSoftmax().cuda() + elif args.loss == 'soft_dice': + criterion = SoftDiceLoss().cuda() + elif args.loss == 'soft_dicev2': + criterion = SoftDiceLossV2().cuda() + elif args.loss == 'dice_bce': + criterion = DiceBCELoss().cuda() + elif args.loss == 'lovasz_hinge': + criterion = LL.LovaszHinge().cuda() + elif args.loss == 'weighted_bce': + criterion = WeightedBCE(pos_frac=args.pos_frac, neg_frac=args.neg_frac).cuda() + elif args.loss == 'weighted_bce_v2': + criterion = WeightedBCEv2().cuda() + elif args.loss == 'focal_loss': + criterion = FocalLoss().cuda() + + train_params = model.parameters() + + if args.optimizer.lower() == 'adam': + optimizer = optim.Adam(train_params, + lr=args.initial_lr, + weight_decay=args.weight_decay) + elif args.optimizer.lower() == 'sgd': + optimizer = optim.SGD(train_params, + lr=args.initial_lr, + weight_decay=args.weight_decay, + momentum=args.momentum, + nesterov=args.nesterov) + elif args.optimizer.lower() == 'adabound': + optimizer = adabound.AdaBound(train_params, + lr=args.initial_lr, + final_lr=args.initial_lr * args.final_lr_scale, + weight_decay=args.weight_decay, + gamma=args.gamma) + else: + '`{}` is not a valid optimizer .'.format(args.optimizer) + + if APEX_AVAILABLE and args.mixed: + print('Using NVIDIA Apex for mixed precision training ...') + model, optimizer = amp.initialize( + model, optimizer, opt_level="O2", + keep_batchnorm_fp32=True, loss_scale="dynamic" + ) + + if not isinstance(optimizer, Optimizer): + flag = False + try: + from apex.fp16_utils.fp16_optimizer import FP16_Optimizer + if isinstance(optimizer, FP16_Optimizer): + flag = True + except ModuleNotFoundError: + pass + if not flag: + raise TypeError('{} is not an Optimizer'.format( + type(optimizer).__name__)) + + if args.cosine_anneal: + scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, + T_0=int(args.total_epochs / args.num_snapshots), + eta_min=args.eta_min) + scheduler.T_cur = 0. + scheduler.mode = 'max' + scheduler.threshold = args.min_delta + else: + scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', + factor=args.annealing_factor, + patience=args.lr_patience, + threshold=args.min_delta, + threshold_mode='abs', + verbose=True) + + # Set up preprocessing function with model + ppi = partial(preprocess_input, model=model) + + print ('Setting up data loaders ...') + + params = {'batch_size': args.batch_size, + 'shuffle': True, + 'num_workers': args.num_workers, + 'drop_last': True} + + valid_params = {'batch_size': args.batch_size, + 'shuffle': False, + 'num_workers': args.num_workers} + + if args.balanced: + train_set = XrayEqualMaskDataset(posfiles=pos_train_images, + negfiles=neg_train_images, + maskfiles=pos_train_masks, + dicom=False, + labels=None, + preprocess=ppi, + transform=train_aug, + pad=pad_func, + resize=resize_me, + inversion=args.invert) + else: + train_set = XrayMaskDataset(imgfiles=train_images, + maskfiles=train_masks, + dicom=False, + labels=train_labels, + preprocess=ppi, + transform=train_aug, + pad=pad_func, + resize=resize_me, + inversion=args.invert) + + if args.pos_neg_ratio > 0: + params['shuffle'] = False + params['sampler'] = RatioSampler(train_set, args.num_samples, args.pos_neg_ratio) + + train_gen = DataLoader(train_set, **params) + + valid_set = XrayMaskDataset(imgfiles=valid_images, + maskfiles=valid_masks, + dicom=False, + labels=valid_labels, + preprocess=ppi, + pad=pad_func, + resize=resize_me, + test_mode=True, + inversion=args.invert) + valid_gen = DataLoader(valid_set, **valid_params) + + loss_tracker = LossTracker() + + steps_per_epoch = args.steps_per_epoch + if steps_per_epoch == 0: + if args.grad_accum == 0: + effective_batch_size = args.batch_size + elif args.grad_accum > 0: + effective_batch_size = args.batch_size * args.grad_accum + else: + raise Exception('`grad-accum` cannot be negative') + if args.balanced: + effective_batch_size *= 2 + # Hack for steps_per_epoch calculation + train_set.imgfiles = train_set.negfiles + steps_per_epoch = int(np.ceil(len(train_set.imgfiles) / effective_batch_size)) + if args.pos_neg_ratio > 0: + steps_per_epoch = int(np.ceil(args.num_samples / effective_batch_size)) + + if args.pos_only and args.balanced: + raise Exception('`pos-only` and `balanced` cannot both be specified') + + trainer_class = Trainer if args.pos_only else AllTrainer + if args.balanced: + trainer_class = BalancedTrainer + trainer = trainer_class(model, 'DeepLab', optimizer, criterion, loss_tracker, args.save_dir, args.save_best, multiclass=train_set.multiclass) + #if args.pos_neg_ratio > 0: + # trainer.track_valid_metric = 'pos_dsc' + trainer.grad_accum = args.grad_accum + if APEX_AVAILABLE and args.mixed: + trainer.use_amp = True + trainer.set_dataloaders(train_gen, valid_gen) + trainer.set_thresholds(args.thresholds) + + if args.train_head > 0: + trainer.train_head(optim.Adam(classifier.parameters()), steps_per_epoch, args.train_head) + + trainer.train(args.total_epochs, steps_per_epoch, scheduler, args.stop_patience, verbosity=args.verbosity) + +if __name__ == '__main__': + main() + + + diff --git a/segment/scripts/__init__.py b/segment/scripts/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/segment/utils/__init__.py b/segment/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/segment/utils/aug.py b/segment/utils/aug.py new file mode 100644 index 0000000..001cbb7 --- /dev/null +++ b/segment/utils/aug.py @@ -0,0 +1,59 @@ +""" +Utilities for data augmentation. +""" +import cv2 +import numpy as np + +from albumentations import ( + Compose, OneOf, HorizontalFlip, ShiftScaleRotate, JpegCompression, Blur, CLAHE, RandomGamma, RandomContrast, RandomBrightness, Resize, PadIfNeeded, RandomCrop +) + + +def simple_aug(p=0.5): + return Compose([ + #HorizontalFlip(p=0.5), + ShiftScaleRotate(rotate_limit=10, scale_limit=0.15, p=0.5, border_mode=cv2.BORDER_CONSTANT, value=[0,0,0]), + OneOf([ + JpegCompression(quality_lower=80), + Blur(), + ], p=0.5), + OneOf([ + CLAHE(), + RandomGamma(), + RandomContrast(), + RandomBrightness(), + ], p=0.5) + ], p=p) + +def resize_aug(imsize_x, imsize_y=None): + if imsize_y is None: imsize_y = imsize_x + return Compose([ + Resize(imsize_x, imsize_y, always_apply=True, interpolation=cv2.INTER_CUBIC, p=1) + ], p=1) + +def crop_image(imsize_x, imsize_y=None): + if imsize_y is None: imsize_y = imsize_x + return Compose([ + RandomCrop(imsize_x, imsize_y, always_apply=True, p=1) + ], p=1) + +def pad_image(img, ratio=1.): + # Default is ratio=1 aka pad to create square image + ratio = float(ratio) + # Given ratio, what should the height be given the width? + h, w = img.shape[:2] + desired_h = int(w * ratio) + # If the height should be greater than it is, then pad height + if desired_h > h: + hdiff = int(desired_h - h) ; hdiff = int(hdiff / 2) + pad_list = [(hdiff, desired_h-h-hdiff), (0,0), (0,0)] + # If height should be smaller than it is, then pad width + elif desired_h < h: + desired_w = int(h / ratio) + wdiff = int(desired_w - w) ; wdiff = int(wdiff / 2) + pad_list = [(0,0), (wdiff, desired_w-w-wdiff), (0,0)] + elif desired_h == h: + return img + return np.pad(img, pad_list, 'constant', constant_values=np.min(img)) + + diff --git a/segment/utils/helper.py b/segment/utils/helper.py new file mode 100644 index 0000000..296d22a --- /dev/null +++ b/segment/utils/helper.py @@ -0,0 +1,252 @@ +""" +Helper functions. +""" +import pydicom +import numpy as np + +import types +import math +from torch._six import inf +from collections import Counter +from functools import partial + +from torch.optim.optimizer import Optimizer + +def channels_last_to_first(img): + """ Move the channels to the first dimension.""" + img = np.swapaxes(img, 0,2) + img = np.swapaxes(img, 1,2) + return img + +def preprocess_input(img, model): + """ Preprocess an input image. """ + # assume image is RGB + img = img[..., ::-1].astype('float32') + model_min = model.input_range[0] ; model_max = model.input_range[1] + img_min = float(np.min(img)) ; img_max = float(np.max(img)) + img_range = img_max - img_min + model_range = model_max - model_min + if img_range == 0: img_range = 1. + img = (((img - img_min) * model_range) / img_range) + model_min + img[..., 0] -= model.mean[0] + img[..., 1] -= model.mean[1] + img[..., 2] -= model.mean[2] + img[..., 0] /= model.std[0] + img[..., 1] /= model.std[1] + img[..., 2] /= model.std[2] + return img + +def preprocess_deeplab(img, pp_cfg): + """ Preprocess an input image. """ + # assume image is RGB + # img = img[..., ::-1].astype('float32') + img = img.astype('float32') + img[..., 0] -= pp_cfg['mean'][0] + img[..., 1] -= pp_cfg['mean'][1] + img[..., 2] -= pp_cfg['mean'][2] + img[..., 0] /= pp_cfg['std'][0] + img[..., 1] /= pp_cfg['std'][1] + img[..., 2] /= pp_cfg['std'][2] + return img + +def preprocess_tf(img): + """ Preprocess an input image. """ + img = img.astype('float32') + img /= 255. + img -= 0.5 + img *= 2. + return img + + +def to_categorical(y, num_classes=None): + """ + Converts a class vector (integers) to binary class matrix. + E.g. for use with categorical_crossentropy. + Arguments: + y: class vector to be converted into a matrix + (integers from 0 to num_classes). + num_classes: total number of classes. + Returns: + A binary matrix representation of the input. The classes axis is placed + last. + """ + y = np.array(y, dtype='int') + input_shape = y.shape + if input_shape and input_shape[-1] == 1 and len(input_shape) > 1: + input_shape = tuple(input_shape[:-1]) + y = y.ravel() + if not num_classes: + num_classes = np.max(y) + 1 + n = y.shape[0] + categorical = np.zeros((n, num_classes), dtype=np.float32) + categorical[np.arange(n), y] = 1 + output_shape = input_shape + (num_classes,) + categorical = np.reshape(categorical, output_shape) + return categorical + +def get_image_from_dicom(dicom_file): + """ + Extract the image as an array from a DICOM file. + """ + dcm = pydicom.read_file(dicom_file) + array = dcm.pixel_array + try: + array *= int(dcm.RescaleSlope) + array += int(dcm.RescaleIntercept) + except: + pass + if dcm.PhotometricInterpretation == "MONOCHROME1": + array = np.invert(array.astype("uint16")) + array = array.astype("float32") + array -= np.min(array) + array /= np.max(array) + array *= 255. + return array.astype('uint8') + +class LossTracker(): + # + def __init__(self, num_moving_average=1000): + self.losses = [] + self.loss_history = [] + self.num_moving_average = num_moving_average + # + def update_loss(self, minibatch_loss): + self.losses.append(minibatch_loss) + # + def get_avg_loss(self): + self.loss_history.append(np.mean(self.losses[-self.num_moving_average:])) + return self.loss_history[-1] + # + def reset_loss(self): + self.losses = [] + # + def get_loss_history(self): + return self.loss_history + +class ReduceLROnPlateau(object): + + + def __init__(self, optimizer, model, classifier, mode='min', factor=0.1, patience=10, + verbose=False, threshold=1e-4, threshold_mode='rel', + cooldown=0, min_lr=0, eps=1e-8): + + if factor >= 1.0: + raise ValueError('Factor should be < 1.0.') + self.factor = factor + + if not isinstance(optimizer, Optimizer): + raise TypeError('{} is not an Optimizer'.format( + type(optimizer).__name__)) + self.optimizer = optimizer + self.model = model + self.classifier = classifier + self.best_weights = model.state_dict() + self.best_classifier = classifier.state_dict() + + if isinstance(min_lr, list) or isinstance(min_lr, tuple): + if len(min_lr) != len(optimizer.param_groups): + raise ValueError("expected {} min_lrs, got {}".format( + len(optimizer.param_groups), len(min_lr))) + self.min_lrs = list(min_lr) + else: + self.min_lrs = [min_lr] * len(optimizer.param_groups) + + self.patience = patience + self.verbose = verbose + self.cooldown = cooldown + self.cooldown_counter = 0 + self.mode = mode + self.threshold = threshold + self.threshold_mode = threshold_mode + self.best = None + self.num_bad_epochs = None + self.mode_worse = None # the worse value for the chosen mode + self.is_better = None + self.eps = eps + self.last_epoch = -1 + self._init_is_better(mode=mode, threshold=threshold, + threshold_mode=threshold_mode) + self._reset() + + def _reset(self): + """Resets num_bad_epochs counter and cooldown counter.""" + self.best = self.mode_worse + self.cooldown_counter = 0 + self.num_bad_epochs = 0 + + def step(self, metrics, epoch=None): + # convert `metrics` to float, in case it's a zero-dim Tensor + current = float(metrics) + if epoch is None: + epoch = self.last_epoch = self.last_epoch + 1 + self.last_epoch = epoch + + if self.is_better(current, self.best): + self.best = current + self.best_weights = self.model.state_dict() + self.best_classifier = self.classifier.state_dict() + self.num_bad_epochs = 0 + else: + self.num_bad_epochs += 1 + + if self.in_cooldown: + self.cooldown_counter -= 1 + self.num_bad_epochs = 0 # ignore any bad epochs in cooldown + + if self.num_bad_epochs > self.patience: + self._reduce_lr(epoch) + print ('Restoring best weights ...') + self.model.load_state_dict(self.best_weights) + self.classifier.load_state_dict(self.best_classifier) + self.cooldown_counter = self.cooldown + self.num_bad_epochs = 0 + + def _reduce_lr(self, epoch): + for i, param_group in enumerate(self.optimizer.param_groups): + old_lr = float(param_group['lr']) + new_lr = max(old_lr * self.factor, self.min_lrs[i]) + if old_lr - new_lr > self.eps: + param_group['lr'] = new_lr + if self.verbose: + print('Epoch {:5d}: reducing learning rate' + ' of group {} to {:.4e}.'.format(epoch, i, new_lr)) + + @property + def in_cooldown(self): + return self.cooldown_counter > 0 + + def _cmp(self, mode, threshold_mode, threshold, a, best): + if mode == 'min' and threshold_mode == 'rel': + rel_epsilon = 1. - threshold + return a < best * rel_epsilon + + elif mode == 'min' and threshold_mode == 'abs': + return a < best - threshold + + elif mode == 'max' and threshold_mode == 'rel': + rel_epsilon = threshold + 1. + return a > best * rel_epsilon + + else: # mode == 'max' and epsilon_mode == 'abs': + return a > best + threshold + + def _init_is_better(self, mode, threshold, threshold_mode): + if mode not in {'min', 'max'}: + raise ValueError('mode ' + mode + ' is unknown!') + if threshold_mode not in {'rel', 'abs'}: + raise ValueError('threshold mode ' + threshold_mode + ' is unknown!') + + if mode == 'min': + self.mode_worse = inf + else: # mode == 'max': + self.mode_worse = -inf + + self.is_better = partial(self._cmp, mode, threshold_mode, threshold) + + def state_dict(self): + return {key: value for key, value in self.__dict__.items() if key not in {'optimizer', 'is_better'}} + + def load_state_dict(self, state_dict): + self.__dict__.update(state_dict) + self._init_is_better(mode=self.mode, threshold=self.threshold, threshold_mode=self.threshold_mode) + diff --git a/setup.sh b/setup.sh new file mode 100644 index 0000000..5b979a4 --- /dev/null +++ b/setup.sh @@ -0,0 +1,12 @@ +conda create -n siim-ptx python=3.7 pip + +conda install pytorch=1.1 torchvision cudatoolkit=10.0 -c pytorch + +# Install mmdetection +git clone https://github.com/open-mmlab/mmdetection/ +pip install Cython +python setup.py develop +# pip install -v -e . + +conda install pandas scikit-learn scikit-image +pip install albumentations pretrainedmodels pydicom adabound \ No newline at end of file diff --git a/stage1test.txt b/stage1test.txt new file mode 100644 index 0000000..260dddf --- /dev/null +++ b/stage1test.txt @@ -0,0 +1,1377 @@ +1.2.276.0.7230010.3.1.4.8323329.5797.1517875190.762694 +1.2.276.0.7230010.3.1.4.8323329.5798.1517875190.796716 +1.2.276.0.7230010.3.1.4.8323329.5799.1517875190.806810 +1.2.276.0.7230010.3.1.4.8323329.5800.1517875190.832691 +1.2.276.0.7230010.3.1.4.8323329.5801.1517875190.850480 +1.2.276.0.7230010.3.1.4.8323329.580.1517875163.537053 +1.2.276.0.7230010.3.1.4.8323329.5802.1517875190.919441 +1.2.276.0.7230010.3.1.4.8323329.5803.1517875190.948211 +1.2.276.0.7230010.3.1.4.8323329.5804.1517875190.965203 +1.2.276.0.7230010.3.1.4.8323329.5805.1517875190.971203 +1.2.276.0.7230010.3.1.4.8323329.5806.1517875190.997408 +1.2.276.0.7230010.3.1.4.8323329.5807.1517875191.46837 +1.2.276.0.7230010.3.1.4.8323329.5808.1517875191.103610 +1.2.276.0.7230010.3.1.4.8323329.5809.1517875191.181226 +1.2.276.0.7230010.3.1.4.8323329.5810.1517875191.161980 +1.2.276.0.7230010.3.1.4.8323329.5811.1517875191.159688 +1.2.276.0.7230010.3.1.4.8323329.581.1517875163.538760 +1.2.276.0.7230010.3.1.4.8323329.5812.1517875191.165822 +1.2.276.0.7230010.3.1.4.8323329.5813.1517875191.178523 +1.2.276.0.7230010.3.1.4.8323329.5814.1517875191.199063 +1.2.276.0.7230010.3.1.4.8323329.5815.1517875191.185989 +1.2.276.0.7230010.3.1.4.8323329.5816.1517875191.207380 +1.2.276.0.7230010.3.1.4.8323329.5817.1517875191.222523 +1.2.276.0.7230010.3.1.4.8323329.5818.1517875191.228622 +1.2.276.0.7230010.3.1.4.8323329.5819.1517875191.247064 +1.2.276.0.7230010.3.1.4.8323329.5820.1517875191.249325 +1.2.276.0.7230010.3.1.4.8323329.5821.1517875191.246350 +1.2.276.0.7230010.3.1.4.8323329.582.1517875163.546454 +1.2.276.0.7230010.3.1.4.8323329.5822.1517875191.292105 +1.2.276.0.7230010.3.1.4.8323329.5823.1517875191.296784 +1.2.276.0.7230010.3.1.4.8323329.5824.1517875191.323606 +1.2.276.0.7230010.3.1.4.8323329.5825.1517875191.342287 +1.2.276.0.7230010.3.1.4.8323329.5826.1517875191.351683 +1.2.276.0.7230010.3.1.4.8323329.5827.1517875191.350766 +1.2.276.0.7230010.3.1.4.8323329.5828.1517875191.341519 +1.2.276.0.7230010.3.1.4.8323329.5829.1517875191.395249 +1.2.276.0.7230010.3.1.4.8323329.5830.1517875191.406486 +1.2.276.0.7230010.3.1.4.8323329.5831.1517875191.436811 +1.2.276.0.7230010.3.1.4.8323329.583.1517875163.545784 +1.2.276.0.7230010.3.1.4.8323329.5832.1517875191.454262 +1.2.276.0.7230010.3.1.4.8323329.5833.1517875191.476556 +1.2.276.0.7230010.3.1.4.8323329.5834.1517875191.515390 +1.2.276.0.7230010.3.1.4.8323329.5835.1517875191.548945 +1.2.276.0.7230010.3.1.4.8323329.5836.1517875191.548726 +1.2.276.0.7230010.3.1.4.8323329.5837.1517875191.579808 +1.2.276.0.7230010.3.1.4.8323329.5838.1517875191.587919 +1.2.276.0.7230010.3.1.4.8323329.5839.1517875191.654694 +1.2.276.0.7230010.3.1.4.8323329.5840.1517875191.649829 +1.2.276.0.7230010.3.1.4.8323329.5841.1517875191.659472 +1.2.276.0.7230010.3.1.4.8323329.584.1517875163.552929 +1.2.276.0.7230010.3.1.4.8323329.5842.1517875191.664868 +1.2.276.0.7230010.3.1.4.8323329.5843.1517875191.673586 +1.2.276.0.7230010.3.1.4.8323329.5844.1517875191.690379 +1.2.276.0.7230010.3.1.4.8323329.5845.1517875191.702488 +1.2.276.0.7230010.3.1.4.8323329.5846.1517875191.710559 +1.2.276.0.7230010.3.1.4.8323329.5847.1517875191.698196 +1.2.276.0.7230010.3.1.4.8323329.5848.1517875191.704616 +1.2.276.0.7230010.3.1.4.8323329.5849.1517875191.722828 +1.2.276.0.7230010.3.1.4.8323329.5850.1517875191.735242 +1.2.276.0.7230010.3.1.4.8323329.5851.1517875191.729256 +1.2.276.0.7230010.3.1.4.8323329.585.1517875163.560240 +1.2.276.0.7230010.3.1.4.8323329.5852.1517875191.735562 +1.2.276.0.7230010.3.1.4.8323329.5853.1517875191.769180 +1.2.276.0.7230010.3.1.4.8323329.5854.1517875191.762250 +1.2.276.0.7230010.3.1.4.8323329.5855.1517875191.791791 +1.2.276.0.7230010.3.1.4.8323329.5856.1517875191.785271 +1.2.276.0.7230010.3.1.4.8323329.5857.1517875191.816283 +1.2.276.0.7230010.3.1.4.8323329.5858.1517875191.818159 +1.2.276.0.7230010.3.1.4.8323329.5859.1517875191.831031 +1.2.276.0.7230010.3.1.4.8323329.5860.1517875191.872579 +1.2.276.0.7230010.3.1.4.8323329.5861.1517875191.853578 +1.2.276.0.7230010.3.1.4.8323329.586.1517875163.569244 +1.2.276.0.7230010.3.1.4.8323329.5862.1517875191.859607 +1.2.276.0.7230010.3.1.4.8323329.5863.1517875191.863727 +1.2.276.0.7230010.3.1.4.8323329.5864.1517875191.874675 +1.2.276.0.7230010.3.1.4.8323329.5865.1517875191.889948 +1.2.276.0.7230010.3.1.4.8323329.5866.1517875191.879932 +1.2.276.0.7230010.3.1.4.8323329.5867.1517875191.895143 +1.2.276.0.7230010.3.1.4.8323329.5868.1517875191.908583 +1.2.276.0.7230010.3.1.4.8323329.5869.1517875191.937926 +1.2.276.0.7230010.3.1.4.8323329.5870.1517875191.961685 +1.2.276.0.7230010.3.1.4.8323329.5871.1517875191.986435 +1.2.276.0.7230010.3.1.4.8323329.587.1517875163.572858 +1.2.276.0.7230010.3.1.4.8323329.5872.1517875192.6373 +1.2.276.0.7230010.3.1.4.8323329.5873.1517875192.23873 +1.2.276.0.7230010.3.1.4.8323329.5874.1517875192.21238 +1.2.276.0.7230010.3.1.4.8323329.5875.1517875192.15497 +1.2.276.0.7230010.3.1.4.8323329.5876.1517875192.46265 +1.2.276.0.7230010.3.1.4.8323329.5877.1517875192.85942 +1.2.276.0.7230010.3.1.4.8323329.5878.1517875192.94158 +1.2.276.0.7230010.3.1.4.8323329.5879.1517875192.98770 +1.2.276.0.7230010.3.1.4.8323329.5880.1517875192.117431 +1.2.276.0.7230010.3.1.4.8323329.5881.1517875192.113345 +1.2.276.0.7230010.3.1.4.8323329.588.1517875163.573523 +1.2.276.0.7230010.3.1.4.8323329.5882.1517875192.114854 +1.2.276.0.7230010.3.1.4.8323329.5883.1517875192.140852 +1.2.276.0.7230010.3.1.4.8323329.5884.1517875192.132750 +1.2.276.0.7230010.3.1.4.8323329.5885.1517875192.139968 +1.2.276.0.7230010.3.1.4.8323329.5886.1517875192.167421 +1.2.276.0.7230010.3.1.4.8323329.5887.1517875192.188287 +1.2.276.0.7230010.3.1.4.8323329.5888.1517875192.218999 +1.2.276.0.7230010.3.1.4.8323329.5889.1517875192.229258 +1.2.276.0.7230010.3.1.4.8323329.5890.1517875192.254535 +1.2.276.0.7230010.3.1.4.8323329.5891.1517875192.271624 +1.2.276.0.7230010.3.1.4.8323329.589.1517875163.596264 +1.2.276.0.7230010.3.1.4.8323329.5892.1517875192.282733 +1.2.276.0.7230010.3.1.4.8323329.5893.1517875192.289767 +1.2.276.0.7230010.3.1.4.8323329.5894.1517875192.302761 +1.2.276.0.7230010.3.1.4.8323329.5895.1517875192.311883 +1.2.276.0.7230010.3.1.4.8323329.5896.1517875192.324656 +1.2.276.0.7230010.3.1.4.8323329.5897.1517875192.347294 +1.2.276.0.7230010.3.1.4.8323329.5898.1517875192.360759 +1.2.276.0.7230010.3.1.4.8323329.5899.1517875192.361136 +1.2.276.0.7230010.3.1.4.8323329.5900.1517875192.358098 +1.2.276.0.7230010.3.1.4.8323329.5901.1517875192.375124 +1.2.276.0.7230010.3.1.4.8323329.5902.1517875192.362851 +1.2.276.0.7230010.3.1.4.8323329.5903.1517875192.393499 +1.2.276.0.7230010.3.1.4.8323329.5904.1517875192.393587 +1.2.276.0.7230010.3.1.4.8323329.5905.1517875192.398201 +1.2.276.0.7230010.3.1.4.8323329.5906.1517875192.410470 +1.2.276.0.7230010.3.1.4.8323329.5907.1517875192.421269 +1.2.276.0.7230010.3.1.4.8323329.5908.1517875192.433407 +1.2.276.0.7230010.3.1.4.8323329.5909.1517875192.458312 +1.2.276.0.7230010.3.1.4.8323329.5910.1517875192.469004 +1.2.276.0.7230010.3.1.4.8323329.5911.1517875192.469371 +1.2.276.0.7230010.3.1.4.8323329.591.1517875163.597744 +1.2.276.0.7230010.3.1.4.8323329.5912.1517875192.475170 +1.2.276.0.7230010.3.1.4.8323329.5913.1517875192.491327 +1.2.276.0.7230010.3.1.4.8323329.5914.1517875192.501641 +1.2.276.0.7230010.3.1.4.8323329.5915.1517875192.499989 +1.2.276.0.7230010.3.1.4.8323329.5916.1517875192.522778 +1.2.276.0.7230010.3.1.4.8323329.5917.1517875192.538017 +1.2.276.0.7230010.3.1.4.8323329.5918.1517875192.589345 +1.2.276.0.7230010.3.1.4.8323329.5919.1517875192.629867 +1.2.276.0.7230010.3.1.4.8323329.5920.1517875192.641146 +1.2.276.0.7230010.3.1.4.8323329.5921.1517875192.659936 +1.2.276.0.7230010.3.1.4.8323329.592.1517875163.596782 +1.2.276.0.7230010.3.1.4.8323329.5922.1517875192.678314 +1.2.276.0.7230010.3.1.4.8323329.5923.1517875192.680366 +1.2.276.0.7230010.3.1.4.8323329.5924.1517875192.687737 +1.2.276.0.7230010.3.1.4.8323329.5925.1517875192.696960 +1.2.276.0.7230010.3.1.4.8323329.5926.1517875192.690224 +1.2.276.0.7230010.3.1.4.8323329.5927.1517875192.727889 +1.2.276.0.7230010.3.1.4.8323329.5928.1517875192.727858 +1.2.276.0.7230010.3.1.4.8323329.5929.1517875192.745084 +1.2.276.0.7230010.3.1.4.8323329.5930.1517875192.760890 +1.2.276.0.7230010.3.1.4.8323329.5931.1517875192.893355 +1.2.276.0.7230010.3.1.4.8323329.593.1517875163.595805 +1.2.276.0.7230010.3.1.4.8323329.5932.1517875192.889920 +1.2.276.0.7230010.3.1.4.8323329.5933.1517875192.918374 +1.2.276.0.7230010.3.1.4.8323329.5934.1517875192.944204 +1.2.276.0.7230010.3.1.4.8323329.5935.1517875192.947028 +1.2.276.0.7230010.3.1.4.8323329.5936.1517875192.957072 +1.2.276.0.7230010.3.1.4.8323329.5937.1517875192.957160 +1.2.276.0.7230010.3.1.4.8323329.5938.1517875192.958188 +1.2.276.0.7230010.3.1.4.8323329.5939.1517875192.982448 +1.2.276.0.7230010.3.1.4.8323329.5940.1517875192.979975 +1.2.276.0.7230010.3.1.4.8323329.5941.1517875192.977222 +1.2.276.0.7230010.3.1.4.8323329.594.1517875163.596798 +1.2.276.0.7230010.3.1.4.8323329.5942.1517875192.994523 +1.2.276.0.7230010.3.1.4.8323329.5943.1517875192.988544 +1.2.276.0.7230010.3.1.4.8323329.5944.1517875192.999950 +1.2.276.0.7230010.3.1.4.8323329.5945.1517875193.3679 +1.2.276.0.7230010.3.1.4.8323329.5946.1517875192.992175 +1.2.276.0.7230010.3.1.4.8323329.5947.1517875193.11025 +1.2.276.0.7230010.3.1.4.8323329.5948.1517875193.25727 +1.2.276.0.7230010.3.1.4.8323329.5949.1517875193.24030 +1.2.276.0.7230010.3.1.4.8323329.5950.1517875193.19989 +1.2.276.0.7230010.3.1.4.8323329.5951.1517875193.26359 +1.2.276.0.7230010.3.1.4.8323329.595.1517875163.607376 +1.2.276.0.7230010.3.1.4.8323329.5952.1517875193.48091 +1.2.276.0.7230010.3.1.4.8323329.5953.1517875193.45019 +1.2.276.0.7230010.3.1.4.8323329.5954.1517875193.59697 +1.2.276.0.7230010.3.1.4.8323329.5955.1517875193.49512 +1.2.276.0.7230010.3.1.4.8323329.5956.1517875193.53053 +1.2.276.0.7230010.3.1.4.8323329.5957.1517875193.69736 +1.2.276.0.7230010.3.1.4.8323329.5958.1517875193.76650 +1.2.276.0.7230010.3.1.4.8323329.5959.1517875193.92258 +1.2.276.0.7230010.3.1.4.8323329.5960.1517875193.95401 +1.2.276.0.7230010.3.1.4.8323329.5961.1517875193.99881 +1.2.276.0.7230010.3.1.4.8323329.596.1517875163.613318 +1.2.276.0.7230010.3.1.4.8323329.5962.1517875193.122750 +1.2.276.0.7230010.3.1.4.8323329.5963.1517875193.120942 +1.2.276.0.7230010.3.1.4.8323329.5964.1517875193.142753 +1.2.276.0.7230010.3.1.4.8323329.5965.1517875193.154889 +1.2.276.0.7230010.3.1.4.8323329.5966.1517875193.154952 +1.2.276.0.7230010.3.1.4.8323329.5967.1517875193.158996 +1.2.276.0.7230010.3.1.4.8323329.5968.1517875193.160532 +1.2.276.0.7230010.3.1.4.8323329.5969.1517875193.190300 +1.2.276.0.7230010.3.1.4.8323329.5970.1517875193.293359 +1.2.276.0.7230010.3.1.4.8323329.5971.1517875193.313021 +1.2.276.0.7230010.3.1.4.8323329.597.1517875163.616565 +1.2.276.0.7230010.3.1.4.8323329.5972.1517875193.343156 +1.2.276.0.7230010.3.1.4.8323329.5973.1517875193.348634 +1.2.276.0.7230010.3.1.4.8323329.5974.1517875193.372772 +1.2.276.0.7230010.3.1.4.8323329.5975.1517875193.372533 +1.2.276.0.7230010.3.1.4.8323329.5976.1517875193.380698 +1.2.276.0.7230010.3.1.4.8323329.5977.1517875193.416169 +1.2.276.0.7230010.3.1.4.8323329.5978.1517875193.397387 +1.2.276.0.7230010.3.1.4.8323329.5979.1517875193.417486 +1.2.276.0.7230010.3.1.4.8323329.5980.1517875193.460449 +1.2.276.0.7230010.3.1.4.8323329.5981.1517875193.442637 +1.2.276.0.7230010.3.1.4.8323329.598.1517875163.618268 +1.2.276.0.7230010.3.1.4.8323329.5982.1517875193.473187 +1.2.276.0.7230010.3.1.4.8323329.5983.1517875193.477675 +1.2.276.0.7230010.3.1.4.8323329.5984.1517875193.471141 +1.2.276.0.7230010.3.1.4.8323329.5985.1517875193.508182 +1.2.276.0.7230010.3.1.4.8323329.5986.1517875193.521679 +1.2.276.0.7230010.3.1.4.8323329.5987.1517875193.536736 +1.2.276.0.7230010.3.1.4.8323329.5988.1517875193.557148 +1.2.276.0.7230010.3.1.4.8323329.5989.1517875193.552210 +1.2.276.0.7230010.3.1.4.8323329.5990.1517875193.572930 +1.2.276.0.7230010.3.1.4.8323329.5991.1517875193.595277 +1.2.276.0.7230010.3.1.4.8323329.599.1517875163.619318 +1.2.276.0.7230010.3.1.4.8323329.5992.1517875193.622078 +1.2.276.0.7230010.3.1.4.8323329.5993.1517875193.634970 +1.2.276.0.7230010.3.1.4.8323329.5994.1517875193.658844 +1.2.276.0.7230010.3.1.4.8323329.5995.1517875193.697651 +1.2.276.0.7230010.3.1.4.8323329.5996.1517875193.695716 +1.2.276.0.7230010.3.1.4.8323329.5997.1517875193.700184 +1.2.276.0.7230010.3.1.4.8323329.5998.1517875193.706128 +1.2.276.0.7230010.3.1.4.8323329.5999.1517875193.743454 +1.2.276.0.7230010.3.1.4.8323329.6000.1517875193.775354 +1.2.276.0.7230010.3.1.4.8323329.6001.1517875193.804351 +1.2.276.0.7230010.3.1.4.8323329.600.1517875163.632969 +1.2.276.0.7230010.3.1.4.8323329.6002.1517875193.795315 +1.2.276.0.7230010.3.1.4.8323329.6003.1517875193.788198 +1.2.276.0.7230010.3.1.4.8323329.6004.1517875193.802651 +1.2.276.0.7230010.3.1.4.8323329.6005.1517875193.803666 +1.2.276.0.7230010.3.1.4.8323329.6006.1517875193.812881 +1.2.276.0.7230010.3.1.4.8323329.6007.1517875193.809054 +1.2.276.0.7230010.3.1.4.8323329.6008.1517875193.828798 +1.2.276.0.7230010.3.1.4.8323329.6009.1517875193.839979 +1.2.276.0.7230010.3.1.4.8323329.6010.1517875193.838877 +1.2.276.0.7230010.3.1.4.8323329.6011.1517875193.677 +1.2.276.0.7230010.3.1.4.8323329.601.1517875163.632477 +1.2.276.0.7230010.3.1.4.8323329.6012.1517875193.983866 +1.2.276.0.7230010.3.1.4.8323329.6013.1517875193.976828 +1.2.276.0.7230010.3.1.4.8323329.6014.1517875193.760 +1.2.276.0.7230010.3.1.4.8323329.6015.1517875194.6201 +1.2.276.0.7230010.3.1.4.8323329.6016.1517875194.37919 +1.2.276.0.7230010.3.1.4.8323329.6017.1517875194.5243 +1.2.276.0.7230010.3.1.4.8323329.6018.1517875194.45959 +1.2.276.0.7230010.3.1.4.8323329.6019.1517875194.19491 +1.2.276.0.7230010.3.1.4.8323329.6020.1517875194.26435 +1.2.276.0.7230010.3.1.4.8323329.6021.1517875194.33002 +1.2.276.0.7230010.3.1.4.8323329.602.1517875163.637058 +1.2.276.0.7230010.3.1.4.8323329.6022.1517875194.37752 +1.2.276.0.7230010.3.1.4.8323329.6023.1517875194.29811 +1.2.276.0.7230010.3.1.4.8323329.6024.1517875194.37124 +1.2.276.0.7230010.3.1.4.8323329.6025.1517875194.67340 +1.2.276.0.7230010.3.1.4.8323329.6026.1517875194.75724 +1.2.276.0.7230010.3.1.4.8323329.6027.1517875194.75647 +1.2.276.0.7230010.3.1.4.8323329.6028.1517875194.74465 +1.2.276.0.7230010.3.1.4.8323329.6029.1517875194.97842 +1.2.276.0.7230010.3.1.4.8323329.6031.1517875194.107594 +1.2.276.0.7230010.3.1.4.8323329.603.1517875163.641240 +1.2.276.0.7230010.3.1.4.8323329.6032.1517875194.115011 +1.2.276.0.7230010.3.1.4.8323329.6033.1517875194.121343 +1.2.276.0.7230010.3.1.4.8323329.6034.1517875194.144763 +1.2.276.0.7230010.3.1.4.8323329.6035.1517875194.148457 +1.2.276.0.7230010.3.1.4.8323329.6036.1517875194.138959 +1.2.276.0.7230010.3.1.4.8323329.6037.1517875194.177032 +1.2.276.0.7230010.3.1.4.8323329.6038.1517875194.162530 +1.2.276.0.7230010.3.1.4.8323329.6039.1517875194.175065 +1.2.276.0.7230010.3.1.4.8323329.6040.1517875194.177632 +1.2.276.0.7230010.3.1.4.8323329.6041.1517875194.194971 +1.2.276.0.7230010.3.1.4.8323329.604.1517875163.641978 +1.2.276.0.7230010.3.1.4.8323329.6042.1517875194.200758 +1.2.276.0.7230010.3.1.4.8323329.6043.1517875194.208786 +1.2.276.0.7230010.3.1.4.8323329.6044.1517875194.208165 +1.2.276.0.7230010.3.1.4.8323329.6045.1517875194.227183 +1.2.276.0.7230010.3.1.4.8323329.6046.1517875194.238374 +1.2.276.0.7230010.3.1.4.8323329.6047.1517875194.258571 +1.2.276.0.7230010.3.1.4.8323329.6048.1517875194.294575 +1.2.276.0.7230010.3.1.4.8323329.6049.1517875194.330299 +1.2.276.0.7230010.3.1.4.8323329.6050.1517875194.368208 +1.2.276.0.7230010.3.1.4.8323329.6051.1517875194.389336 +1.2.276.0.7230010.3.1.4.8323329.605.1517875163.658162 +1.2.276.0.7230010.3.1.4.8323329.6052.1517875195.374792 +1.2.276.0.7230010.3.1.4.8323329.6053.1517875195.381257 +1.2.276.0.7230010.3.1.4.8323329.6054.1517875195.426375 +1.2.276.0.7230010.3.1.4.8323329.6055.1517875195.431985 +1.2.276.0.7230010.3.1.4.8323329.6056.1517875195.468079 +1.2.276.0.7230010.3.1.4.8323329.6057.1517875195.474178 +1.2.276.0.7230010.3.1.4.8323329.6058.1517875195.508522 +1.2.276.0.7230010.3.1.4.8323329.6059.1517875195.666364 +1.2.276.0.7230010.3.1.4.8323329.6060.1517875195.681779 +1.2.276.0.7230010.3.1.4.8323329.6061.1517875195.712169 +1.2.276.0.7230010.3.1.4.8323329.606.1517875163.664448 +1.2.276.0.7230010.3.1.4.8323329.6062.1517875195.751534 +1.2.276.0.7230010.3.1.4.8323329.6063.1517875195.897680 +1.2.276.0.7230010.3.1.4.8323329.6064.1517875195.903355 +1.2.276.0.7230010.3.1.4.8323329.6065.1517875195.904347 +1.2.276.0.7230010.3.1.4.8323329.6066.1517875195.913340 +1.2.276.0.7230010.3.1.4.8323329.6067.1517875195.915618 +1.2.276.0.7230010.3.1.4.8323329.6068.1517875195.901526 +1.2.276.0.7230010.3.1.4.8323329.6069.1517875195.931485 +1.2.276.0.7230010.3.1.4.8323329.6070.1517875195.938695 +1.2.276.0.7230010.3.1.4.8323329.6071.1517875195.989638 +1.2.276.0.7230010.3.1.4.8323329.607.1517875163.665814 +1.2.276.0.7230010.3.1.4.8323329.6072.1517875196.35305 +1.2.276.0.7230010.3.1.4.8323329.6073.1517875196.43966 +1.2.276.0.7230010.3.1.4.8323329.6074.1517875196.90815 +1.2.276.0.7230010.3.1.4.8323329.6075.1517875196.140940 +1.2.276.0.7230010.3.1.4.8323329.6076.1517875196.191804 +1.2.276.0.7230010.3.1.4.8323329.6077.1517875196.219087 +1.2.276.0.7230010.3.1.4.8323329.6078.1517875196.221892 +1.2.276.0.7230010.3.1.4.8323329.6079.1517875196.264177 +1.2.276.0.7230010.3.1.4.8323329.6080.1517875196.313091 +1.2.276.0.7230010.3.1.4.8323329.6081.1517875196.408460 +1.2.276.0.7230010.3.1.4.8323329.608.1517875163.669595 +1.2.276.0.7230010.3.1.4.8323329.6082.1517875196.407031 +1.2.276.0.7230010.3.1.4.8323329.6083.1517875196.423941 +1.2.276.0.7230010.3.1.4.8323329.6084.1517875196.445843 +1.2.276.0.7230010.3.1.4.8323329.6085.1517875196.435602 +1.2.276.0.7230010.3.1.4.8323329.6086.1517875196.453752 +1.2.276.0.7230010.3.1.4.8323329.6087.1517875196.470878 +1.2.276.0.7230010.3.1.4.8323329.6088.1517875196.476825 +1.2.276.0.7230010.3.1.4.8323329.6089.1517875196.493087 +1.2.276.0.7230010.3.1.4.8323329.6090.1517875196.494557 +1.2.276.0.7230010.3.1.4.8323329.6091.1517875196.508212 +1.2.276.0.7230010.3.1.4.8323329.609.1517875163.680634 +1.2.276.0.7230010.3.1.4.8323329.6092.1517875196.508731 +1.2.276.0.7230010.3.1.4.8323329.6093.1517875196.519322 +1.2.276.0.7230010.3.1.4.8323329.6094.1517875196.518215 +1.2.276.0.7230010.3.1.4.8323329.6095.1517875196.520107 +1.2.276.0.7230010.3.1.4.8323329.6096.1517875196.514992 +1.2.276.0.7230010.3.1.4.8323329.6097.1517875196.527134 +1.2.276.0.7230010.3.1.4.8323329.6098.1517875196.532890 +1.2.276.0.7230010.3.1.4.8323329.6099.1517875196.552935 +1.2.276.0.7230010.3.1.4.8323329.6100.1517875196.546915 +1.2.276.0.7230010.3.1.4.8323329.6101.1517875196.542711 +1.2.276.0.7230010.3.1.4.8323329.610.1517875163.682763 +1.2.276.0.7230010.3.1.4.8323329.6102.1517875196.552550 +1.2.276.0.7230010.3.1.4.8323329.6103.1517875196.550241 +1.2.276.0.7230010.3.1.4.8323329.6104.1517875196.561468 +1.2.276.0.7230010.3.1.4.8323329.6105.1517875196.572273 +1.2.276.0.7230010.3.1.4.8323329.6106.1517875196.567499 +1.2.276.0.7230010.3.1.4.8323329.6107.1517875196.576812 +1.2.276.0.7230010.3.1.4.8323329.6108.1517875196.573358 +1.2.276.0.7230010.3.1.4.8323329.6109.1517875196.574624 +1.2.276.0.7230010.3.1.4.8323329.6110.1517875196.589278 +1.2.276.0.7230010.3.1.4.8323329.6111.1517875196.592604 +1.2.276.0.7230010.3.1.4.8323329.611.1517875163.682790 +1.2.276.0.7230010.3.1.4.8323329.6112.1517875196.590742 +1.2.276.0.7230010.3.1.4.8323329.6113.1517875196.608064 +1.2.276.0.7230010.3.1.4.8323329.6114.1517875196.612017 +1.2.276.0.7230010.3.1.4.8323329.6115.1517875196.616758 +1.2.276.0.7230010.3.1.4.8323329.6116.1517875196.610774 +1.2.276.0.7230010.3.1.4.8323329.6117.1517875196.621187 +1.2.276.0.7230010.3.1.4.8323329.6118.1517875196.627498 +1.2.276.0.7230010.3.1.4.8323329.6119.1517875196.635530 +1.2.276.0.7230010.3.1.4.8323329.6120.1517875196.634319 +1.2.276.0.7230010.3.1.4.8323329.6121.1517875196.630608 +1.2.276.0.7230010.3.1.4.8323329.612.1517875163.694286 +1.2.276.0.7230010.3.1.4.8323329.6122.1517875196.627459 +1.2.276.0.7230010.3.1.4.8323329.6123.1517875196.651624 +1.2.276.0.7230010.3.1.4.8323329.6124.1517875196.640341 +1.2.276.0.7230010.3.1.4.8323329.6125.1517875196.647899 +1.2.276.0.7230010.3.1.4.8323329.6126.1517875196.661711 +1.2.276.0.7230010.3.1.4.8323329.6127.1517875196.644318 +1.2.276.0.7230010.3.1.4.8323329.6128.1517875196.661639 +1.2.276.0.7230010.3.1.4.8323329.6129.1517875196.661887 +1.2.276.0.7230010.3.1.4.8323329.6130.1517875196.663062 +1.2.276.0.7230010.3.1.4.8323329.6131.1517875196.657575 +1.2.276.0.7230010.3.1.4.8323329.613.1517875163.694257 +1.2.276.0.7230010.3.1.4.8323329.6132.1517875196.663434 +1.2.276.0.7230010.3.1.4.8323329.6133.1517875196.673415 +1.2.276.0.7230010.3.1.4.8323329.6134.1517875196.681385 +1.2.276.0.7230010.3.1.4.8323329.6135.1517875196.685123 +1.2.276.0.7230010.3.1.4.8323329.6136.1517875196.688265 +1.2.276.0.7230010.3.1.4.8323329.6137.1517875196.689784 +1.2.276.0.7230010.3.1.4.8323329.6138.1517875196.696592 +1.2.276.0.7230010.3.1.4.8323329.6139.1517875196.698882 +1.2.276.0.7230010.3.1.4.8323329.6140.1517875196.697884 +1.2.276.0.7230010.3.1.4.8323329.6141.1517875196.714183 +1.2.276.0.7230010.3.1.4.8323329.614.1517875163.702453 +1.2.276.0.7230010.3.1.4.8323329.6142.1517875196.718800 +1.2.276.0.7230010.3.1.4.8323329.6143.1517875196.719375 +1.2.276.0.7230010.3.1.4.8323329.6144.1517875196.721781 +1.2.276.0.7230010.3.1.4.8323329.6145.1517875196.725300 +1.2.276.0.7230010.3.1.4.8323329.6146.1517875196.740093 +1.2.276.0.7230010.3.1.4.8323329.6147.1517875196.739897 +1.2.276.0.7230010.3.1.4.8323329.6148.1517875196.740512 +1.2.276.0.7230010.3.1.4.8323329.6149.1517875196.742562 +1.2.276.0.7230010.3.1.4.8323329.6150.1517875196.762551 +1.2.276.0.7230010.3.1.4.8323329.6151.1517875196.762570 +1.2.276.0.7230010.3.1.4.8323329.615.1517875163.703375 +1.2.276.0.7230010.3.1.4.8323329.6152.1517875196.760058 +1.2.276.0.7230010.3.1.4.8323329.6153.1517875196.761180 +1.2.276.0.7230010.3.1.4.8323329.6154.1517875196.761169 +1.2.276.0.7230010.3.1.4.8323329.6155.1517875196.770487 +1.2.276.0.7230010.3.1.4.8323329.6156.1517875196.780017 +1.2.276.0.7230010.3.1.4.8323329.6157.1517875196.782799 +1.2.276.0.7230010.3.1.4.8323329.6158.1517875196.784357 +1.2.276.0.7230010.3.1.4.8323329.6159.1517875196.803533 +1.2.276.0.7230010.3.1.4.8323329.6160.1517875196.806852 +1.2.276.0.7230010.3.1.4.8323329.6161.1517875196.804226 +1.2.276.0.7230010.3.1.4.8323329.616.1517875163.721499 +1.2.276.0.7230010.3.1.4.8323329.6162.1517875196.811008 +1.2.276.0.7230010.3.1.4.8323329.6163.1517875196.815187 +1.2.276.0.7230010.3.1.4.8323329.6164.1517875196.817931 +1.2.276.0.7230010.3.1.4.8323329.6165.1517875196.822622 +1.2.276.0.7230010.3.1.4.8323329.6166.1517875196.829660 +1.2.276.0.7230010.3.1.4.8323329.6167.1517875196.831069 +1.2.276.0.7230010.3.1.4.8323329.6168.1517875196.836589 +1.2.276.0.7230010.3.1.4.8323329.6169.1517875196.836614 +1.2.276.0.7230010.3.1.4.8323329.6170.1517875196.850168 +1.2.276.0.7230010.3.1.4.8323329.6171.1517875196.859298 +1.2.276.0.7230010.3.1.4.8323329.617.1517875163.727808 +1.2.276.0.7230010.3.1.4.8323329.6172.1517875196.862409 +1.2.276.0.7230010.3.1.4.8323329.6173.1517875196.872987 +1.2.276.0.7230010.3.1.4.8323329.6174.1517875196.874606 +1.2.276.0.7230010.3.1.4.8323329.6175.1517875196.877372 +1.2.276.0.7230010.3.1.4.8323329.6176.1517875196.874889 +1.2.276.0.7230010.3.1.4.8323329.6177.1517875196.880733 +1.2.276.0.7230010.3.1.4.8323329.6178.1517875196.885349 +1.2.276.0.7230010.3.1.4.8323329.6179.1517875196.887828 +1.2.276.0.7230010.3.1.4.8323329.6180.1517875196.895756 +1.2.276.0.7230010.3.1.4.8323329.6181.1517875196.899823 +1.2.276.0.7230010.3.1.4.8323329.618.1517875163.721499 +1.2.276.0.7230010.3.1.4.8323329.6182.1517875196.903727 +1.2.276.0.7230010.3.1.4.8323329.6183.1517875196.901521 +1.2.276.0.7230010.3.1.4.8323329.6184.1517875196.919704 +1.2.276.0.7230010.3.1.4.8323329.6185.1517875196.925066 +1.2.276.0.7230010.3.1.4.8323329.6186.1517875196.926038 +1.2.276.0.7230010.3.1.4.8323329.6187.1517875196.934561 +1.2.276.0.7230010.3.1.4.8323329.6188.1517875196.937263 +1.2.276.0.7230010.3.1.4.8323329.6189.1517875196.955723 +1.2.276.0.7230010.3.1.4.8323329.6190.1517875196.947077 +1.2.276.0.7230010.3.1.4.8323329.6191.1517875196.965463 +1.2.276.0.7230010.3.1.4.8323329.619.1517875163.724366 +1.2.276.0.7230010.3.1.4.8323329.6192.1517875196.961139 +1.2.276.0.7230010.3.1.4.8323329.6193.1517875196.959059 +1.2.276.0.7230010.3.1.4.8323329.6194.1517875196.965877 +1.2.276.0.7230010.3.1.4.8323329.6195.1517875196.972095 +1.2.276.0.7230010.3.1.4.8323329.6196.1517875196.975870 +1.2.276.0.7230010.3.1.4.8323329.6197.1517875196.977024 +1.2.276.0.7230010.3.1.4.8323329.6198.1517875196.983985 +1.2.276.0.7230010.3.1.4.8323329.6199.1517875196.982608 +1.2.276.0.7230010.3.1.4.8323329.6200.1517875196.988814 +1.2.276.0.7230010.3.1.4.8323329.6201.1517875196.992985 +1.2.276.0.7230010.3.1.4.8323329.620.1517875163.724446 +1.2.276.0.7230010.3.1.4.8323329.6202.1517875196.998681 +1.2.276.0.7230010.3.1.4.8323329.6203.1517875197.7280 +1.2.276.0.7230010.3.1.4.8323329.6204.1517875197.17953 +1.2.276.0.7230010.3.1.4.8323329.6205.1517875197.46907 +1.2.276.0.7230010.3.1.4.8323329.6206.1517875197.39980 +1.2.276.0.7230010.3.1.4.8323329.6207.1517875197.39635 +1.2.276.0.7230010.3.1.4.8323329.6208.1517875197.51423 +1.2.276.0.7230010.3.1.4.8323329.6209.1517875197.56795 +1.2.276.0.7230010.3.1.4.8323329.6210.1517875197.63606 +1.2.276.0.7230010.3.1.4.8323329.6211.1517875197.74627 +1.2.276.0.7230010.3.1.4.8323329.6212.1517875197.83090 +1.2.276.0.7230010.3.1.4.8323329.6213.1517875197.84267 +1.2.276.0.7230010.3.1.4.8323329.6214.1517875197.83920 +1.2.276.0.7230010.3.1.4.8323329.6215.1517875197.106720 +1.2.276.0.7230010.3.1.4.8323329.6216.1517875197.104164 +1.2.276.0.7230010.3.1.4.8323329.6217.1517875197.104164 +1.2.276.0.7230010.3.1.4.8323329.6218.1517875197.129025 +1.2.276.0.7230010.3.1.4.8323329.6219.1517875197.125573 +1.2.276.0.7230010.3.1.4.8323329.6220.1517875197.172313 +1.2.276.0.7230010.3.1.4.8323329.6221.1517875197.172420 +1.2.276.0.7230010.3.1.4.8323329.622.1517875163.738840 +1.2.276.0.7230010.3.1.4.8323329.6222.1517875197.135575 +1.2.276.0.7230010.3.1.4.8323329.6223.1517875197.147751 +1.2.276.0.7230010.3.1.4.8323329.6224.1517875197.167581 +1.2.276.0.7230010.3.1.4.8323329.6225.1517875197.149304 +1.2.276.0.7230010.3.1.4.8323329.6226.1517875197.151260 +1.2.276.0.7230010.3.1.4.8323329.6227.1517875197.171264 +1.2.276.0.7230010.3.1.4.8323329.6228.1517875197.174249 +1.2.276.0.7230010.3.1.4.8323329.6229.1517875197.172143 +1.2.276.0.7230010.3.1.4.8323329.6230.1517875197.189115 +1.2.276.0.7230010.3.1.4.8323329.6231.1517875197.195718 +1.2.276.0.7230010.3.1.4.8323329.623.1517875163.752341 +1.2.276.0.7230010.3.1.4.8323329.6232.1517875197.200793 +1.2.276.0.7230010.3.1.4.8323329.6233.1517875197.206791 +1.2.276.0.7230010.3.1.4.8323329.6234.1517875197.196765 +1.2.276.0.7230010.3.1.4.8323329.6235.1517875197.217889 +1.2.276.0.7230010.3.1.4.8323329.6236.1517875197.214350 +1.2.276.0.7230010.3.1.4.8323329.6237.1517875197.216838 +1.2.276.0.7230010.3.1.4.8323329.6238.1517875197.222388 +1.2.276.0.7230010.3.1.4.8323329.6239.1517875197.229217 +1.2.276.0.7230010.3.1.4.8323329.6240.1517875197.243744 +1.2.276.0.7230010.3.1.4.8323329.6241.1517875197.241114 +1.2.276.0.7230010.3.1.4.8323329.624.1517875163.750709 +1.2.276.0.7230010.3.1.4.8323329.6242.1517875197.255329 +1.2.276.0.7230010.3.1.4.8323329.6243.1517875197.259664 +1.2.276.0.7230010.3.1.4.8323329.6244.1517875197.271181 +1.2.276.0.7230010.3.1.4.8323329.6245.1517875197.270411 +1.2.276.0.7230010.3.1.4.8323329.6246.1517875197.280566 +1.2.276.0.7230010.3.1.4.8323329.6247.1517875197.281078 +1.2.276.0.7230010.3.1.4.8323329.6248.1517875197.287087 +1.2.276.0.7230010.3.1.4.8323329.6249.1517875197.290574 +1.2.276.0.7230010.3.1.4.8323329.6250.1517875197.302746 +1.2.276.0.7230010.3.1.4.8323329.6251.1517875197.309237 +1.2.276.0.7230010.3.1.4.8323329.625.1517875163.758884 +1.2.276.0.7230010.3.1.4.8323329.6252.1517875197.308139 +1.2.276.0.7230010.3.1.4.8323329.6253.1517875197.315372 +1.2.276.0.7230010.3.1.4.8323329.6254.1517875197.323726 +1.2.276.0.7230010.3.1.4.8323329.6255.1517875197.328963 +1.2.276.0.7230010.3.1.4.8323329.6256.1517875197.336110 +1.2.276.0.7230010.3.1.4.8323329.6257.1517875197.339524 +1.2.276.0.7230010.3.1.4.8323329.6258.1517875197.334046 +1.2.276.0.7230010.3.1.4.8323329.6259.1517875197.346009 +1.2.276.0.7230010.3.1.4.8323329.6260.1517875197.348051 +1.2.276.0.7230010.3.1.4.8323329.6261.1517875197.346975 +1.2.276.0.7230010.3.1.4.8323329.626.1517875163.778371 +1.2.276.0.7230010.3.1.4.8323329.6262.1517875197.366218 +1.2.276.0.7230010.3.1.4.8323329.6263.1517875197.363527 +1.2.276.0.7230010.3.1.4.8323329.6264.1517875197.361691 +1.2.276.0.7230010.3.1.4.8323329.6265.1517875197.370012 +1.2.276.0.7230010.3.1.4.8323329.6266.1517875197.375980 +1.2.276.0.7230010.3.1.4.8323329.6267.1517875197.382679 +1.2.276.0.7230010.3.1.4.8323329.6268.1517875197.376736 +1.2.276.0.7230010.3.1.4.8323329.6269.1517875197.383445 +1.2.276.0.7230010.3.1.4.8323329.6270.1517875197.383864 +1.2.276.0.7230010.3.1.4.8323329.6271.1517875197.378304 +1.2.276.0.7230010.3.1.4.8323329.627.1517875163.778449 +1.2.276.0.7230010.3.1.4.8323329.6272.1517875197.397550 +1.2.276.0.7230010.3.1.4.8323329.6273.1517875197.395233 +1.2.276.0.7230010.3.1.4.8323329.6274.1517875197.410023 +1.2.276.0.7230010.3.1.4.8323329.6275.1517875197.411005 +1.2.276.0.7230010.3.1.4.8323329.6276.1517875197.414551 +1.2.276.0.7230010.3.1.4.8323329.6277.1517875197.415485 +1.2.276.0.7230010.3.1.4.8323329.6278.1517875197.409310 +1.2.276.0.7230010.3.1.4.8323329.6279.1517875197.429307 +1.2.276.0.7230010.3.1.4.8323329.6280.1517875197.424048 +1.2.276.0.7230010.3.1.4.8323329.6281.1517875197.432941 +1.2.276.0.7230010.3.1.4.8323329.628.1517875163.776070 +1.2.276.0.7230010.3.1.4.8323329.6282.1517875197.431265 +1.2.276.0.7230010.3.1.4.8323329.6283.1517875197.438469 +1.2.276.0.7230010.3.1.4.8323329.6284.1517875197.440190 +1.2.276.0.7230010.3.1.4.8323329.6285.1517875197.447481 +1.2.276.0.7230010.3.1.4.8323329.6286.1517875197.448114 +1.2.276.0.7230010.3.1.4.8323329.6287.1517875197.447990 +1.2.276.0.7230010.3.1.4.8323329.6288.1517875197.459474 +1.2.276.0.7230010.3.1.4.8323329.6289.1517875197.459157 +1.2.276.0.7230010.3.1.4.8323329.6290.1517875197.455787 +1.2.276.0.7230010.3.1.4.8323329.6291.1517875197.459402 +1.2.276.0.7230010.3.1.4.8323329.629.1517875163.794818 +1.2.276.0.7230010.3.1.4.8323329.6292.1517875197.471780 +1.2.276.0.7230010.3.1.4.8323329.6293.1517875197.472824 +1.2.276.0.7230010.3.1.4.8323329.6294.1517875197.473137 +1.2.276.0.7230010.3.1.4.8323329.6295.1517875197.470575 +1.2.276.0.7230010.3.1.4.8323329.6296.1517875197.477543 +1.2.276.0.7230010.3.1.4.8323329.6297.1517875197.492232 +1.2.276.0.7230010.3.1.4.8323329.6298.1517875197.486753 +1.2.276.0.7230010.3.1.4.8323329.6299.1517875197.492203 +1.2.276.0.7230010.3.1.4.8323329.6300.1517875197.498602 +1.2.276.0.7230010.3.1.4.8323329.6301.1517875197.493580 +1.2.276.0.7230010.3.1.4.8323329.630.1517875163.792432 +1.2.276.0.7230010.3.1.4.8323329.6302.1517875197.494746 +1.2.276.0.7230010.3.1.4.8323329.6303.1517875197.498147 +1.2.276.0.7230010.3.1.4.8323329.6304.1517875197.506126 +1.2.276.0.7230010.3.1.4.8323329.6305.1517875197.518098 +1.2.276.0.7230010.3.1.4.8323329.6306.1517875197.520463 +1.2.276.0.7230010.3.1.4.8323329.6307.1517875197.531851 +1.2.276.0.7230010.3.1.4.8323329.6308.1517875197.534859 +1.2.276.0.7230010.3.1.4.8323329.6309.1517875197.551716 +1.2.276.0.7230010.3.1.4.8323329.6310.1517875197.547271 +1.2.276.0.7230010.3.1.4.8323329.6311.1517875197.551144 +1.2.276.0.7230010.3.1.4.8323329.631.1517875163.801528 +1.2.276.0.7230010.3.1.4.8323329.6312.1517875197.557649 +1.2.276.0.7230010.3.1.4.8323329.6313.1517875197.567291 +1.2.276.0.7230010.3.1.4.8323329.6314.1517875197.568407 +1.2.276.0.7230010.3.1.4.8323329.6315.1517875197.565267 +1.2.276.0.7230010.3.1.4.8323329.6316.1517875197.577463 +1.2.276.0.7230010.3.1.4.8323329.6317.1517875197.593899 +1.2.276.0.7230010.3.1.4.8323329.6318.1517875197.594707 +1.2.276.0.7230010.3.1.4.8323329.6319.1517875197.587372 +1.2.276.0.7230010.3.1.4.8323329.6320.1517875197.596561 +1.2.276.0.7230010.3.1.4.8323329.6321.1517875197.599516 +1.2.276.0.7230010.3.1.4.8323329.632.1517875163.806564 +1.2.276.0.7230010.3.1.4.8323329.6322.1517875197.599558 +1.2.276.0.7230010.3.1.4.8323329.6323.1517875197.605695 +1.2.276.0.7230010.3.1.4.8323329.6324.1517875197.615634 +1.2.276.0.7230010.3.1.4.8323329.6325.1517875197.622786 +1.2.276.0.7230010.3.1.4.8323329.6326.1517875197.630277 +1.2.276.0.7230010.3.1.4.8323329.6327.1517875197.636226 +1.2.276.0.7230010.3.1.4.8323329.6328.1517875197.648207 +1.2.276.0.7230010.3.1.4.8323329.6329.1517875197.653059 +1.2.276.0.7230010.3.1.4.8323329.6330.1517875197.657242 +1.2.276.0.7230010.3.1.4.8323329.6331.1517875197.658596 +1.2.276.0.7230010.3.1.4.8323329.633.1517875163.803794 +1.2.276.0.7230010.3.1.4.8323329.6332.1517875197.672027 +1.2.276.0.7230010.3.1.4.8323329.6333.1517875197.672027 +1.2.276.0.7230010.3.1.4.8323329.6334.1517875197.669105 +1.2.276.0.7230010.3.1.4.8323329.6335.1517875197.680780 +1.2.276.0.7230010.3.1.4.8323329.6336.1517875197.684382 +1.2.276.0.7230010.3.1.4.8323329.6337.1517875197.692126 +1.2.276.0.7230010.3.1.4.8323329.6338.1517875197.693438 +1.2.276.0.7230010.3.1.4.8323329.6339.1517875197.694880 +1.2.276.0.7230010.3.1.4.8323329.6340.1517875197.696624 +1.2.276.0.7230010.3.1.4.8323329.6341.1517875197.710656 +1.2.276.0.7230010.3.1.4.8323329.634.1517875163.807796 +1.2.276.0.7230010.3.1.4.8323329.6342.1517875197.715974 +1.2.276.0.7230010.3.1.4.8323329.6343.1517875197.715116 +1.2.276.0.7230010.3.1.4.8323329.6344.1517875197.715613 +1.2.276.0.7230010.3.1.4.8323329.6345.1517875197.718900 +1.2.276.0.7230010.3.1.4.8323329.6346.1517875197.726308 +1.2.276.0.7230010.3.1.4.8323329.6347.1517875197.737091 +1.2.276.0.7230010.3.1.4.8323329.6348.1517875197.741356 +1.2.276.0.7230010.3.1.4.8323329.6349.1517875197.745174 +1.2.276.0.7230010.3.1.4.8323329.6350.1517875197.744359 +1.2.276.0.7230010.3.1.4.8323329.6351.1517875197.762902 +1.2.276.0.7230010.3.1.4.8323329.635.1517875163.816631 +1.2.276.0.7230010.3.1.4.8323329.6352.1517875197.763294 +1.2.276.0.7230010.3.1.4.8323329.6353.1517875197.766354 +1.2.276.0.7230010.3.1.4.8323329.6354.1517875197.767045 +1.2.276.0.7230010.3.1.4.8323329.6355.1517875197.778464 +1.2.276.0.7230010.3.1.4.8323329.6356.1517875197.786674 +1.2.276.0.7230010.3.1.4.8323329.6357.1517875197.797271 +1.2.276.0.7230010.3.1.4.8323329.6358.1517875197.790842 +1.2.276.0.7230010.3.1.4.8323329.6359.1517875197.803699 +1.2.276.0.7230010.3.1.4.8323329.6360.1517875197.797566 +1.2.276.0.7230010.3.1.4.8323329.6361.1517875197.804479 +1.2.276.0.7230010.3.1.4.8323329.636.1517875163.821345 +1.2.276.0.7230010.3.1.4.8323329.6362.1517875197.799630 +1.2.276.0.7230010.3.1.4.8323329.6363.1517875197.804690 +1.2.276.0.7230010.3.1.4.8323329.6364.1517875197.813376 +1.2.276.0.7230010.3.1.4.8323329.6365.1517875197.815280 +1.2.276.0.7230010.3.1.4.8323329.6366.1517875197.821386 +1.2.276.0.7230010.3.1.4.8323329.6367.1517875197.839026 +1.2.276.0.7230010.3.1.4.8323329.6368.1517875197.827723 +1.2.276.0.7230010.3.1.4.8323329.6369.1517875197.827983 +1.2.276.0.7230010.3.1.4.8323329.6370.1517875197.841736 +1.2.276.0.7230010.3.1.4.8323329.6371.1517875197.833904 +1.2.276.0.7230010.3.1.4.8323329.637.1517875163.824884 +1.2.276.0.7230010.3.1.4.8323329.6372.1517875197.842374 +1.2.276.0.7230010.3.1.4.8323329.6373.1517875197.849069 +1.2.276.0.7230010.3.1.4.8323329.6374.1517875197.849125 +1.2.276.0.7230010.3.1.4.8323329.6375.1517875197.861521 +1.2.276.0.7230010.3.1.4.8323329.6376.1517875197.859508 +1.2.276.0.7230010.3.1.4.8323329.6377.1517875197.864983 +1.2.276.0.7230010.3.1.4.8323329.6378.1517875197.873867 +1.2.276.0.7230010.3.1.4.8323329.6379.1517875197.888352 +1.2.276.0.7230010.3.1.4.8323329.6380.1517875197.881893 +1.2.276.0.7230010.3.1.4.8323329.6381.1517875197.886073 +1.2.276.0.7230010.3.1.4.8323329.638.1517875163.828825 +1.2.276.0.7230010.3.1.4.8323329.6382.1517875197.902640 +1.2.276.0.7230010.3.1.4.8323329.6383.1517875197.906377 +1.2.276.0.7230010.3.1.4.8323329.6384.1517875197.905964 +1.2.276.0.7230010.3.1.4.8323329.6385.1517875197.916433 +1.2.276.0.7230010.3.1.4.8323329.6386.1517875197.917450 +1.2.276.0.7230010.3.1.4.8323329.6387.1517875197.911585 +1.2.276.0.7230010.3.1.4.8323329.6388.1517875197.926387 +1.2.276.0.7230010.3.1.4.8323329.6389.1517875197.938876 +1.2.276.0.7230010.3.1.4.8323329.6390.1517875197.939405 +1.2.276.0.7230010.3.1.4.8323329.6391.1517875197.934222 +1.2.276.0.7230010.3.1.4.8323329.639.1517875163.837692 +1.2.276.0.7230010.3.1.4.8323329.6392.1517875197.939270 +1.2.276.0.7230010.3.1.4.8323329.6393.1517875197.939299 +1.2.276.0.7230010.3.1.4.8323329.6394.1517875197.948071 +1.2.276.0.7230010.3.1.4.8323329.6395.1517875197.962863 +1.2.276.0.7230010.3.1.4.8323329.6396.1517875197.964534 +1.2.276.0.7230010.3.1.4.8323329.6397.1517875197.966066 +1.2.276.0.7230010.3.1.4.8323329.6398.1517875197.967337 +1.2.276.0.7230010.3.1.4.8323329.6399.1517875197.973142 +1.2.276.0.7230010.3.1.4.8323329.6400.1517875197.975358 +1.2.276.0.7230010.3.1.4.8323329.6401.1517875197.998207 +1.2.276.0.7230010.3.1.4.8323329.6402.1517875197.991927 +1.2.276.0.7230010.3.1.4.8323329.6403.1517875197.998353 +1.2.276.0.7230010.3.1.4.8323329.6404.1517875197.996598 +1.2.276.0.7230010.3.1.4.8323329.6405.1517875198.3857 +1.2.276.0.7230010.3.1.4.8323329.6406.1517875198.9054 +1.2.276.0.7230010.3.1.4.8323329.6407.1517875198.20897 +1.2.276.0.7230010.3.1.4.8323329.6408.1517875198.25100 +1.2.276.0.7230010.3.1.4.8323329.6409.1517875198.38966 +1.2.276.0.7230010.3.1.4.8323329.6410.1517875198.41544 +1.2.276.0.7230010.3.1.4.8323329.6411.1517875198.46765 +1.2.276.0.7230010.3.1.4.8323329.6412.1517875198.46707 +1.2.276.0.7230010.3.1.4.8323329.6413.1517875198.52725 +1.2.276.0.7230010.3.1.4.8323329.6414.1517875198.57641 +1.2.276.0.7230010.3.1.4.8323329.6415.1517875198.59061 +1.2.276.0.7230010.3.1.4.8323329.6416.1517875198.77714 +1.2.276.0.7230010.3.1.4.8323329.6417.1517875198.78623 +1.2.276.0.7230010.3.1.4.8323329.6418.1517875198.79240 +1.2.276.0.7230010.3.1.4.8323329.6419.1517875198.82032 +1.2.276.0.7230010.3.1.4.8323329.6420.1517875198.96742 +1.2.276.0.7230010.3.1.4.8323329.6421.1517875198.98821 +1.2.276.0.7230010.3.1.4.8323329.642.1517875163.841478 +1.2.276.0.7230010.3.1.4.8323329.6422.1517875198.101240 +1.2.276.0.7230010.3.1.4.8323329.6423.1517875198.98804 +1.2.276.0.7230010.3.1.4.8323329.6424.1517875198.109252 +1.2.276.0.7230010.3.1.4.8323329.6425.1517875198.118715 +1.2.276.0.7230010.3.1.4.8323329.6426.1517875198.121087 +1.2.276.0.7230010.3.1.4.8323329.6427.1517875198.121527 +1.2.276.0.7230010.3.1.4.8323329.6428.1517875198.134029 +1.2.276.0.7230010.3.1.4.8323329.6429.1517875198.135009 +1.2.276.0.7230010.3.1.4.8323329.6430.1517875198.140985 +1.2.276.0.7230010.3.1.4.8323329.6431.1517875198.145400 +1.2.276.0.7230010.3.1.4.8323329.643.1517875163.850678 +1.2.276.0.7230010.3.1.4.8323329.6432.1517875198.147220 +1.2.276.0.7230010.3.1.4.8323329.6433.1517875198.164658 +1.2.276.0.7230010.3.1.4.8323329.6434.1517875198.159220 +1.2.276.0.7230010.3.1.4.8323329.6435.1517875198.167071 +1.2.276.0.7230010.3.1.4.8323329.6436.1517875198.188359 +1.2.276.0.7230010.3.1.4.8323329.6437.1517875198.210070 +1.2.276.0.7230010.3.1.4.8323329.6438.1517875198.224488 +1.2.276.0.7230010.3.1.4.8323329.6439.1517875198.234221 +1.2.276.0.7230010.3.1.4.8323329.6440.1517875198.234389 +1.2.276.0.7230010.3.1.4.8323329.6441.1517875198.234488 +1.2.276.0.7230010.3.1.4.8323329.644.1517875163.850974 +1.2.276.0.7230010.3.1.4.8323329.6442.1517875198.245282 +1.2.276.0.7230010.3.1.4.8323329.6443.1517875198.253828 +1.2.276.0.7230010.3.1.4.8323329.6444.1517875198.259517 +1.2.276.0.7230010.3.1.4.8323329.6445.1517875198.270925 +1.2.276.0.7230010.3.1.4.8323329.6446.1517875198.273534 +1.2.276.0.7230010.3.1.4.8323329.6447.1517875198.281655 +1.2.276.0.7230010.3.1.4.8323329.6448.1517875198.286676 +1.2.276.0.7230010.3.1.4.8323329.6449.1517875198.293152 +1.2.276.0.7230010.3.1.4.8323329.6450.1517875198.300471 +1.2.276.0.7230010.3.1.4.8323329.6451.1517875198.303528 +1.2.276.0.7230010.3.1.4.8323329.645.1517875163.863853 +1.2.276.0.7230010.3.1.4.8323329.6452.1517875198.299463 +1.2.276.0.7230010.3.1.4.8323329.6453.1517875198.315883 +1.2.276.0.7230010.3.1.4.8323329.6454.1517875198.319349 +1.2.276.0.7230010.3.1.4.8323329.6455.1517875198.323945 +1.2.276.0.7230010.3.1.4.8323329.6456.1517875198.328310 +1.2.276.0.7230010.3.1.4.8323329.6457.1517875198.343515 +1.2.276.0.7230010.3.1.4.8323329.6458.1517875198.350986 +1.2.276.0.7230010.3.1.4.8323329.6459.1517875198.354778 +1.2.276.0.7230010.3.1.4.8323329.6460.1517875198.378416 +1.2.276.0.7230010.3.1.4.8323329.6461.1517875198.378637 +1.2.276.0.7230010.3.1.4.8323329.646.1517875163.863397 +1.2.276.0.7230010.3.1.4.8323329.6462.1517875198.381591 +1.2.276.0.7230010.3.1.4.8323329.6463.1517875198.399348 +1.2.276.0.7230010.3.1.4.8323329.6464.1517875198.406892 +1.2.276.0.7230010.3.1.4.8323329.6465.1517875198.397596 +1.2.276.0.7230010.3.1.4.8323329.6466.1517875198.409617 +1.2.276.0.7230010.3.1.4.8323329.6467.1517875198.409616 +1.2.276.0.7230010.3.1.4.8323329.6468.1517875198.422051 +1.2.276.0.7230010.3.1.4.8323329.6469.1517875198.440807 +1.2.276.0.7230010.3.1.4.8323329.6470.1517875198.446351 +1.2.276.0.7230010.3.1.4.8323329.6471.1517875198.446141 +1.2.276.0.7230010.3.1.4.8323329.647.1517875163.875287 +1.2.276.0.7230010.3.1.4.8323329.6472.1517875198.457782 +1.2.276.0.7230010.3.1.4.8323329.6473.1517875198.462294 +1.2.276.0.7230010.3.1.4.8323329.6474.1517875198.470553 +1.2.276.0.7230010.3.1.4.8323329.6475.1517875198.485703 +1.2.276.0.7230010.3.1.4.8323329.6476.1517875198.485034 +1.2.276.0.7230010.3.1.4.8323329.6477.1517875198.495604 +1.2.276.0.7230010.3.1.4.8323329.6478.1517875198.493740 +1.2.276.0.7230010.3.1.4.8323329.6479.1517875198.503451 +1.2.276.0.7230010.3.1.4.8323329.6480.1517875198.516697 +1.2.276.0.7230010.3.1.4.8323329.6481.1517875198.540860 +1.2.276.0.7230010.3.1.4.8323329.648.1517875163.875119 +1.2.276.0.7230010.3.1.4.8323329.6482.1517875198.513497 +1.2.276.0.7230010.3.1.4.8323329.6483.1517875198.520406 +1.2.276.0.7230010.3.1.4.8323329.6484.1517875198.525862 +1.2.276.0.7230010.3.1.4.8323329.6485.1517875198.529405 +1.2.276.0.7230010.3.1.4.8323329.6486.1517875198.537781 +1.2.276.0.7230010.3.1.4.8323329.6487.1517875198.537888 +1.2.276.0.7230010.3.1.4.8323329.6488.1517875198.544466 +1.2.276.0.7230010.3.1.4.8323329.6489.1517875198.546876 +1.2.276.0.7230010.3.1.4.8323329.6490.1517875198.551179 +1.2.276.0.7230010.3.1.4.8323329.6491.1517875198.577052 +1.2.276.0.7230010.3.1.4.8323329.649.1517875163.885178 +1.2.276.0.7230010.3.1.4.8323329.6492.1517875198.571281 +1.2.276.0.7230010.3.1.4.8323329.6493.1517875198.578203 +1.2.276.0.7230010.3.1.4.8323329.6494.1517875198.583424 +1.2.276.0.7230010.3.1.4.8323329.6495.1517875198.607654 +1.2.276.0.7230010.3.1.4.8323329.6496.1517875198.605039 +1.2.276.0.7230010.3.1.4.8323329.6497.1517875198.600389 +1.2.276.0.7230010.3.1.4.8323329.6498.1517875198.604149 +1.2.276.0.7230010.3.1.4.8323329.6499.1517875198.607755 +1.2.276.0.7230010.3.1.4.8323329.6500.1517875198.625322 +1.2.276.0.7230010.3.1.4.8323329.6501.1517875198.625338 +1.2.276.0.7230010.3.1.4.8323329.650.1517875163.887659 +1.2.276.0.7230010.3.1.4.8323329.6502.1517875198.634464 +1.2.276.0.7230010.3.1.4.8323329.6503.1517875198.635973 +1.2.276.0.7230010.3.1.4.8323329.6504.1517875198.644193 +1.2.276.0.7230010.3.1.4.8323329.6505.1517875198.644193 +1.2.276.0.7230010.3.1.4.8323329.6506.1517875198.651132 +1.2.276.0.7230010.3.1.4.8323329.6507.1517875198.652322 +1.2.276.0.7230010.3.1.4.8323329.6508.1517875198.651020 +1.2.276.0.7230010.3.1.4.8323329.6509.1517875198.658543 +1.2.276.0.7230010.3.1.4.8323329.6510.1517875198.659480 +1.2.276.0.7230010.3.1.4.8323329.6511.1517875198.669726 +1.2.276.0.7230010.3.1.4.8323329.651.1517875163.893617 +1.2.276.0.7230010.3.1.4.8323329.6512.1517875198.672709 +1.2.276.0.7230010.3.1.4.8323329.6513.1517875198.679142 +1.2.276.0.7230010.3.1.4.8323329.6514.1517875198.681192 +1.2.276.0.7230010.3.1.4.8323329.6515.1517875198.680967 +1.2.276.0.7230010.3.1.4.8323329.6516.1517875198.689552 +1.2.276.0.7230010.3.1.4.8323329.6517.1517875198.689322 +1.2.276.0.7230010.3.1.4.8323329.6518.1517875198.700842 +1.2.276.0.7230010.3.1.4.8323329.6519.1517875198.710393 +1.2.276.0.7230010.3.1.4.8323329.6520.1517875198.721077 +1.2.276.0.7230010.3.1.4.8323329.6521.1517875198.729391 +1.2.276.0.7230010.3.1.4.8323329.652.1517875163.896438 +1.2.276.0.7230010.3.1.4.8323329.6522.1517875198.731522 +1.2.276.0.7230010.3.1.4.8323329.6523.1517875198.734816 +1.2.276.0.7230010.3.1.4.8323329.6524.1517875198.737947 +1.2.276.0.7230010.3.1.4.8323329.6525.1517875198.746420 +1.2.276.0.7230010.3.1.4.8323329.6526.1517875198.755643 +1.2.276.0.7230010.3.1.4.8323329.6527.1517875198.754879 +1.2.276.0.7230010.3.1.4.8323329.6528.1517875198.775711 +1.2.276.0.7230010.3.1.4.8323329.6529.1517875198.776044 +1.2.276.0.7230010.3.1.4.8323329.6530.1517875198.769967 +1.2.276.0.7230010.3.1.4.8323329.6531.1517875198.771655 +1.2.276.0.7230010.3.1.4.8323329.653.1517875163.898610 +1.2.276.0.7230010.3.1.4.8323329.6532.1517875198.780552 +1.2.276.0.7230010.3.1.4.8323329.6533.1517875198.781947 +1.2.276.0.7230010.3.1.4.8323329.6534.1517875198.790948 +1.2.276.0.7230010.3.1.4.8323329.6535.1517875198.794635 +1.2.276.0.7230010.3.1.4.8323329.6536.1517875198.802172 +1.2.276.0.7230010.3.1.4.8323329.6537.1517875198.795847 +1.2.276.0.7230010.3.1.4.8323329.6538.1517875198.814839 +1.2.276.0.7230010.3.1.4.8323329.6539.1517875198.805503 +1.2.276.0.7230010.3.1.4.8323329.6540.1517875198.820092 +1.2.276.0.7230010.3.1.4.8323329.6541.1517875198.810775 +1.2.276.0.7230010.3.1.4.8323329.654.1517875163.913751 +1.2.276.0.7230010.3.1.4.8323329.6542.1517875198.826821 +1.2.276.0.7230010.3.1.4.8323329.6543.1517875198.827634 +1.2.276.0.7230010.3.1.4.8323329.6544.1517875198.824207 +1.2.276.0.7230010.3.1.4.8323329.6545.1517875198.830763 +1.2.276.0.7230010.3.1.4.8323329.6546.1517875198.835779 +1.2.276.0.7230010.3.1.4.8323329.6547.1517875198.843299 +1.2.276.0.7230010.3.1.4.8323329.6548.1517875198.840825 +1.2.276.0.7230010.3.1.4.8323329.6549.1517875198.851283 +1.2.276.0.7230010.3.1.4.8323329.6550.1517875198.864134 +1.2.276.0.7230010.3.1.4.8323329.6551.1517875198.862018 +1.2.276.0.7230010.3.1.4.8323329.655.1517875163.918966 +1.2.276.0.7230010.3.1.4.8323329.6552.1517875198.864344 +1.2.276.0.7230010.3.1.4.8323329.6553.1517875198.873811 +1.2.276.0.7230010.3.1.4.8323329.6554.1517875198.877881 +1.2.276.0.7230010.3.1.4.8323329.6555.1517875198.878767 +1.2.276.0.7230010.3.1.4.8323329.6556.1517875198.879261 +1.2.276.0.7230010.3.1.4.8323329.6557.1517875198.892166 +1.2.276.0.7230010.3.1.4.8323329.6558.1517875198.904351 +1.2.276.0.7230010.3.1.4.8323329.6559.1517875198.898497 +1.2.276.0.7230010.3.1.4.8323329.6560.1517875198.898497 +1.2.276.0.7230010.3.1.4.8323329.6561.1517875198.912095 +1.2.276.0.7230010.3.1.4.8323329.656.1517875163.919215 +1.2.276.0.7230010.3.1.4.8323329.6562.1517875198.915383 +1.2.276.0.7230010.3.1.4.8323329.6563.1517875198.922529 +1.2.276.0.7230010.3.1.4.8323329.6564.1517875198.919717 +1.2.276.0.7230010.3.1.4.8323329.6565.1517875198.932954 +1.2.276.0.7230010.3.1.4.8323329.6566.1517875198.928786 +1.2.276.0.7230010.3.1.4.8323329.6567.1517875198.951391 +1.2.276.0.7230010.3.1.4.8323329.6568.1517875198.954591 +1.2.276.0.7230010.3.1.4.8323329.6569.1517875198.939534 +1.2.276.0.7230010.3.1.4.8323329.6570.1517875198.942747 +1.2.276.0.7230010.3.1.4.8323329.6571.1517875198.965246 +1.2.276.0.7230010.3.1.4.8323329.657.1517875163.919754 +1.2.276.0.7230010.3.1.4.8323329.6572.1517875198.979958 +1.2.276.0.7230010.3.1.4.8323329.6573.1517875198.975523 +1.2.276.0.7230010.3.1.4.8323329.6574.1517875198.989135 +1.2.276.0.7230010.3.1.4.8323329.6575.1517875199.7977 +1.2.276.0.7230010.3.1.4.8323329.6576.1517875198.998708 +1.2.276.0.7230010.3.1.4.8323329.6577.1517875199.14508 +1.2.276.0.7230010.3.1.4.8323329.6578.1517875199.10533 +1.2.276.0.7230010.3.1.4.8323329.6579.1517875199.15010 +1.2.276.0.7230010.3.1.4.8323329.6580.1517875199.18395 +1.2.276.0.7230010.3.1.4.8323329.6581.1517875199.21716 +1.2.276.0.7230010.3.1.4.8323329.658.1517875163.927055 +1.2.276.0.7230010.3.1.4.8323329.6582.1517875199.31820 +1.2.276.0.7230010.3.1.4.8323329.6583.1517875199.40292 +1.2.276.0.7230010.3.1.4.8323329.6584.1517875199.43754 +1.2.276.0.7230010.3.1.4.8323329.6585.1517875199.37328 +1.2.276.0.7230010.3.1.4.8323329.6586.1517875199.48083 +1.2.276.0.7230010.3.1.4.8323329.6587.1517875199.61902 +1.2.276.0.7230010.3.1.4.8323329.6588.1517875199.69177 +1.2.276.0.7230010.3.1.4.8323329.6589.1517875199.80517 +1.2.276.0.7230010.3.1.4.8323329.6590.1517875199.84443 +1.2.276.0.7230010.3.1.4.8323329.6591.1517875199.84412 +1.2.276.0.7230010.3.1.4.8323329.659.1517875163.931674 +1.2.276.0.7230010.3.1.4.8323329.6592.1517875199.97433 +1.2.276.0.7230010.3.1.4.8323329.6593.1517875199.110218 +1.2.276.0.7230010.3.1.4.8323329.6594.1517875199.106697 +1.2.276.0.7230010.3.1.4.8323329.6595.1517875199.111556 +1.2.276.0.7230010.3.1.4.8323329.6596.1517875199.124056 +1.2.276.0.7230010.3.1.4.8323329.6597.1517875199.131818 +1.2.276.0.7230010.3.1.4.8323329.6598.1517875199.133928 +1.2.276.0.7230010.3.1.4.8323329.6599.1517875199.141732 +1.2.276.0.7230010.3.1.4.8323329.6600.1517875199.146110 +1.2.276.0.7230010.3.1.4.8323329.6601.1517875199.146091 +1.2.276.0.7230010.3.1.4.8323329.660.1517875163.935037 +1.2.276.0.7230010.3.1.4.8323329.6602.1517875199.139989 +1.2.276.0.7230010.3.1.4.8323329.6603.1517875199.140279 +1.2.276.0.7230010.3.1.4.8323329.6604.1517875199.148127 +1.2.276.0.7230010.3.1.4.8323329.6605.1517875199.156822 +1.2.276.0.7230010.3.1.4.8323329.6606.1517875199.159378 +1.2.276.0.7230010.3.1.4.8323329.6607.1517875199.164717 +1.2.276.0.7230010.3.1.4.8323329.6608.1517875199.177191 +1.2.276.0.7230010.3.1.4.8323329.6609.1517875199.177654 +1.2.276.0.7230010.3.1.4.8323329.6610.1517875199.182154 +1.2.276.0.7230010.3.1.4.8323329.6611.1517875199.190860 +1.2.276.0.7230010.3.1.4.8323329.661.1517875163.938399 +1.2.276.0.7230010.3.1.4.8323329.6612.1517875199.190395 +1.2.276.0.7230010.3.1.4.8323329.6613.1517875199.205018 +1.2.276.0.7230010.3.1.4.8323329.6614.1517875199.211710 +1.2.276.0.7230010.3.1.4.8323329.6615.1517875199.230184 +1.2.276.0.7230010.3.1.4.8323329.6616.1517875199.238776 +1.2.276.0.7230010.3.1.4.8323329.6617.1517875199.244695 +1.2.276.0.7230010.3.1.4.8323329.6618.1517875199.260678 +1.2.276.0.7230010.3.1.4.8323329.6619.1517875199.251839 +1.2.276.0.7230010.3.1.4.8323329.6620.1517875199.271573 +1.2.276.0.7230010.3.1.4.8323329.6621.1517875199.272701 +1.2.276.0.7230010.3.1.4.8323329.662.1517875163.939645 +1.2.276.0.7230010.3.1.4.8323329.6622.1517875199.263687 +1.2.276.0.7230010.3.1.4.8323329.6623.1517875199.282547 +1.2.276.0.7230010.3.1.4.8323329.6624.1517875199.270628 +1.2.276.0.7230010.3.1.4.8323329.6625.1517875199.282656 +1.2.276.0.7230010.3.1.4.8323329.6626.1517875199.286412 +1.2.276.0.7230010.3.1.4.8323329.6627.1517875199.290494 +1.2.276.0.7230010.3.1.4.8323329.6628.1517875199.285199 +1.2.276.0.7230010.3.1.4.8323329.6629.1517875199.290940 +1.2.276.0.7230010.3.1.4.8323329.6630.1517875199.297816 +1.2.276.0.7230010.3.1.4.8323329.6631.1517875199.309375 +1.2.276.0.7230010.3.1.4.8323329.663.1517875163.940834 +1.2.276.0.7230010.3.1.4.8323329.6632.1517875199.314301 +1.2.276.0.7230010.3.1.4.8323329.6633.1517875199.334238 +1.2.276.0.7230010.3.1.4.8323329.6634.1517875199.323566 +1.2.276.0.7230010.3.1.4.8323329.6635.1517875199.333236 +1.2.276.0.7230010.3.1.4.8323329.6636.1517875199.340100 +1.2.276.0.7230010.3.1.4.8323329.6637.1517875199.342800 +1.2.276.0.7230010.3.1.4.8323329.6638.1517875199.350879 +1.2.276.0.7230010.3.1.4.8323329.6639.1517875199.350179 +1.2.276.0.7230010.3.1.4.8323329.6640.1517875199.360487 +1.2.276.0.7230010.3.1.4.8323329.6641.1517875199.362072 +1.2.276.0.7230010.3.1.4.8323329.664.1517875163.945572 +1.2.276.0.7230010.3.1.4.8323329.6642.1517875199.372556 +1.2.276.0.7230010.3.1.4.8323329.6643.1517875199.367856 +1.2.276.0.7230010.3.1.4.8323329.6644.1517875199.380151 +1.2.276.0.7230010.3.1.4.8323329.6645.1517875199.374468 +1.2.276.0.7230010.3.1.4.8323329.6646.1517875199.380123 +1.2.276.0.7230010.3.1.4.8323329.6647.1517875199.386037 +1.2.276.0.7230010.3.1.4.8323329.6648.1517875199.391102 +1.2.276.0.7230010.3.1.4.8323329.6649.1517875199.393596 +1.2.276.0.7230010.3.1.4.8323329.6650.1517875199.397208 +1.2.276.0.7230010.3.1.4.8323329.6651.1517875199.406393 +1.2.276.0.7230010.3.1.4.8323329.665.1517875163.960792 +1.2.276.0.7230010.3.1.4.8323329.6652.1517875199.400704 +1.2.276.0.7230010.3.1.4.8323329.6653.1517875199.410266 +1.2.276.0.7230010.3.1.4.8323329.6654.1517875199.406947 +1.2.276.0.7230010.3.1.4.8323329.6655.1517875199.413686 +1.2.276.0.7230010.3.1.4.8323329.6656.1517875199.420710 +1.2.276.0.7230010.3.1.4.8323329.6657.1517875199.426140 +1.2.276.0.7230010.3.1.4.8323329.6658.1517875199.424204 +1.2.276.0.7230010.3.1.4.8323329.6659.1517875199.436666 +1.2.276.0.7230010.3.1.4.8323329.6660.1517875199.429191 +1.2.276.0.7230010.3.1.4.8323329.6661.1517875199.435059 +1.2.276.0.7230010.3.1.4.8323329.666.1517875163.960792 +1.2.276.0.7230010.3.1.4.8323329.6662.1517875199.437895 +1.2.276.0.7230010.3.1.4.8323329.6663.1517875199.450117 +1.2.276.0.7230010.3.1.4.8323329.6664.1517875199.449771 +1.2.276.0.7230010.3.1.4.8323329.6665.1517875199.457200 +1.2.276.0.7230010.3.1.4.8323329.6666.1517875199.461939 +1.2.276.0.7230010.3.1.4.8323329.6667.1517875199.456814 +1.2.276.0.7230010.3.1.4.8323329.6668.1517875199.469312 +1.2.276.0.7230010.3.1.4.8323329.6669.1517875199.470710 +1.2.276.0.7230010.3.1.4.8323329.6670.1517875199.466328 +1.2.276.0.7230010.3.1.4.8323329.6671.1517875199.463444 +1.2.276.0.7230010.3.1.4.8323329.667.1517875163.961533 +1.2.276.0.7230010.3.1.4.8323329.6672.1517875199.474905 +1.2.276.0.7230010.3.1.4.8323329.6673.1517875199.490794 +1.2.276.0.7230010.3.1.4.8323329.6674.1517875199.491611 +1.2.276.0.7230010.3.1.4.8323329.6675.1517875199.487284 +1.2.276.0.7230010.3.1.4.8323329.6676.1517875199.491496 +1.2.276.0.7230010.3.1.4.8323329.6677.1517875199.495537 +1.2.276.0.7230010.3.1.4.8323329.6678.1517875199.500272 +1.2.276.0.7230010.3.1.4.8323329.6679.1517875199.500120 +1.2.276.0.7230010.3.1.4.8323329.6680.1517875199.515191 +1.2.276.0.7230010.3.1.4.8323329.6681.1517875199.520870 +1.2.276.0.7230010.3.1.4.8323329.668.1517875163.968012 +1.2.276.0.7230010.3.1.4.8323329.6682.1517875199.532073 +1.2.276.0.7230010.3.1.4.8323329.6683.1517875199.532856 +1.2.276.0.7230010.3.1.4.8323329.6684.1517875199.529930 +1.2.276.0.7230010.3.1.4.8323329.6685.1517875199.541872 +1.2.276.0.7230010.3.1.4.8323329.6686.1517875199.541895 +1.2.276.0.7230010.3.1.4.8323329.6687.1517875199.571503 +1.2.276.0.7230010.3.1.4.8323329.6688.1517875199.558555 +1.2.276.0.7230010.3.1.4.8323329.6689.1517875199.555128 +1.2.276.0.7230010.3.1.4.8323329.6690.1517875199.573338 +1.2.276.0.7230010.3.1.4.8323329.6691.1517875199.563286 +1.2.276.0.7230010.3.1.4.8323329.669.1517875163.986899 +1.2.276.0.7230010.3.1.4.8323329.6692.1517875199.564955 +1.2.276.0.7230010.3.1.4.8323329.6693.1517875199.563983 +1.2.276.0.7230010.3.1.4.8323329.6694.1517875199.581265 +1.2.276.0.7230010.3.1.4.8323329.6695.1517875199.588749 +1.2.276.0.7230010.3.1.4.8323329.6696.1517875199.589969 +1.2.276.0.7230010.3.1.4.8323329.6697.1517875199.592582 +1.2.276.0.7230010.3.1.4.8323329.6698.1517875199.595479 +1.2.276.0.7230010.3.1.4.8323329.6699.1517875199.598578 +1.2.276.0.7230010.3.1.4.8323329.6700.1517875199.611132 +1.2.276.0.7230010.3.1.4.8323329.6701.1517875199.611398 +1.2.276.0.7230010.3.1.4.8323329.670.1517875163.985810 +1.2.276.0.7230010.3.1.4.8323329.6702.1517875199.609626 +1.2.276.0.7230010.3.1.4.8323329.6703.1517875199.611760 +1.2.276.0.7230010.3.1.4.8323329.6704.1517875199.614236 +1.2.276.0.7230010.3.1.4.8323329.6705.1517875199.622481 +1.2.276.0.7230010.3.1.4.8323329.6706.1517875199.622555 +1.2.276.0.7230010.3.1.4.8323329.6707.1517875199.622316 +1.2.276.0.7230010.3.1.4.8323329.6708.1517875199.644734 +1.2.276.0.7230010.3.1.4.8323329.6709.1517875199.634834 +1.2.276.0.7230010.3.1.4.8323329.6710.1517875199.646714 +1.2.276.0.7230010.3.1.4.8323329.6711.1517875199.641714 +1.2.276.0.7230010.3.1.4.8323329.671.1517875163.997213 +1.2.276.0.7230010.3.1.4.8323329.6712.1517875199.651590 +1.2.276.0.7230010.3.1.4.8323329.6713.1517875199.656666 +1.2.276.0.7230010.3.1.4.8323329.6714.1517875199.656366 +1.2.276.0.7230010.3.1.4.8323329.6715.1517875199.659979 +1.2.276.0.7230010.3.1.4.8323329.6716.1517875199.767612 +1.2.276.0.7230010.3.1.4.8323329.6717.1517875199.779271 +1.2.276.0.7230010.3.1.4.8323329.6718.1517875199.803396 +1.2.276.0.7230010.3.1.4.8323329.6719.1517875199.823885 +1.2.276.0.7230010.3.1.4.8323329.6720.1517875199.820401 +1.2.276.0.7230010.3.1.4.8323329.6721.1517875199.820400 +1.2.276.0.7230010.3.1.4.8323329.672.1517875163.992193 +1.2.276.0.7230010.3.1.4.8323329.6722.1517875199.827171 +1.2.276.0.7230010.3.1.4.8323329.6723.1517875199.835099 +1.2.276.0.7230010.3.1.4.8323329.6724.1517875199.839734 +1.2.276.0.7230010.3.1.4.8323329.6725.1517875199.856222 +1.2.276.0.7230010.3.1.4.8323329.6726.1517875199.859748 +1.2.276.0.7230010.3.1.4.8323329.6727.1517875199.870360 +1.2.276.0.7230010.3.1.4.8323329.6728.1517875199.880934 +1.2.276.0.7230010.3.1.4.8323329.6729.1517875199.890014 +1.2.276.0.7230010.3.1.4.8323329.6730.1517875199.890014 +1.2.276.0.7230010.3.1.4.8323329.6731.1517875199.883403 +1.2.276.0.7230010.3.1.4.8323329.673.1517875164.12208 +1.2.276.0.7230010.3.1.4.8323329.6732.1517875199.896641 +1.2.276.0.7230010.3.1.4.8323329.6733.1517875199.896136 +1.2.276.0.7230010.3.1.4.8323329.6734.1517875199.892769 +1.2.276.0.7230010.3.1.4.8323329.6735.1517875199.903595 +1.2.276.0.7230010.3.1.4.8323329.6736.1517875199.920542 +1.2.276.0.7230010.3.1.4.8323329.6737.1517875199.916387 +1.2.276.0.7230010.3.1.4.8323329.6738.1517875199.906742 +1.2.276.0.7230010.3.1.4.8323329.6739.1517875199.911844 +1.2.276.0.7230010.3.1.4.8323329.6740.1517875199.920682 +1.2.276.0.7230010.3.1.4.8323329.6741.1517875199.920967 +1.2.276.0.7230010.3.1.4.8323329.674.1517875164.12184 +1.2.276.0.7230010.3.1.4.8323329.6742.1517875199.938420 +1.2.276.0.7230010.3.1.4.8323329.6743.1517875199.938420 +1.2.276.0.7230010.3.1.4.8323329.6744.1517875199.935332 +1.2.276.0.7230010.3.1.4.8323329.6745.1517875199.937448 +1.2.276.0.7230010.3.1.4.8323329.6746.1517875199.945146 +1.2.276.0.7230010.3.1.4.8323329.6747.1517875199.943884 +1.2.276.0.7230010.3.1.4.8323329.6748.1517875199.955109 +1.2.276.0.7230010.3.1.4.8323329.6749.1517875199.961009 +1.2.276.0.7230010.3.1.4.8323329.6750.1517875199.968613 +1.2.276.0.7230010.3.1.4.8323329.6751.1517875199.967291 +1.2.276.0.7230010.3.1.4.8323329.675.1517875164.10320 +1.2.276.0.7230010.3.1.4.8323329.6752.1517875199.981513 +1.2.276.0.7230010.3.1.4.8323329.6753.1517875199.978778 +1.2.276.0.7230010.3.1.4.8323329.6754.1517875199.970404 +1.2.276.0.7230010.3.1.4.8323329.6755.1517875199.968684 +1.2.276.0.7230010.3.1.4.8323329.6756.1517875199.983168 +1.2.276.0.7230010.3.1.4.8323329.6757.1517875199.983390 +1.2.276.0.7230010.3.1.4.8323329.6758.1517875199.990313 +1.2.276.0.7230010.3.1.4.8323329.6759.1517875199.993684 +1.2.276.0.7230010.3.1.4.8323329.6760.1517875199.998297 +1.2.276.0.7230010.3.1.4.8323329.6761.1517875199.608 +1.2.276.0.7230010.3.1.4.8323329.676.1517875164.13474 +1.2.276.0.7230010.3.1.4.8323329.6762.1517875200.14897 +1.2.276.0.7230010.3.1.4.8323329.6763.1517875200.19551 +1.2.276.0.7230010.3.1.4.8323329.6764.1517875200.26996 +1.2.276.0.7230010.3.1.4.8323329.6765.1517875200.31861 +1.2.276.0.7230010.3.1.4.8323329.6766.1517875200.47126 +1.2.276.0.7230010.3.1.4.8323329.6767.1517875200.55900 +1.2.276.0.7230010.3.1.4.8323329.6768.1517875200.55755 +1.2.276.0.7230010.3.1.4.8323329.6769.1517875200.68561 +1.2.276.0.7230010.3.1.4.8323329.6770.1517875200.69710 +1.2.276.0.7230010.3.1.4.8323329.6771.1517875200.76102 +1.2.276.0.7230010.3.1.4.8323329.677.1517875164.30058 +1.2.276.0.7230010.3.1.4.8323329.6772.1517875200.80257 +1.2.276.0.7230010.3.1.4.8323329.6773.1517875200.92882 +1.2.276.0.7230010.3.1.4.8323329.6774.1517875200.164786 +1.2.276.0.7230010.3.1.4.8323329.6775.1517875201.60945 +1.2.276.0.7230010.3.1.4.8323329.6776.1517875201.61444 +1.2.276.0.7230010.3.1.4.8323329.6777.1517875201.77738 +1.2.276.0.7230010.3.1.4.8323329.6778.1517875201.70165 +1.2.276.0.7230010.3.1.4.8323329.6779.1517875201.87697 +1.2.276.0.7230010.3.1.4.8323329.6780.1517875201.100186 +1.2.276.0.7230010.3.1.4.8323329.6781.1517875201.130398 +1.2.276.0.7230010.3.1.4.8323329.678.1517875164.31438 +1.2.276.0.7230010.3.1.4.8323329.6782.1517875201.120620 +1.2.276.0.7230010.3.1.4.8323329.6783.1517875201.141546 +1.2.276.0.7230010.3.1.4.8323329.6784.1517875201.131468 +1.2.276.0.7230010.3.1.4.8323329.6785.1517875201.139181 +1.2.276.0.7230010.3.1.4.8323329.6786.1517875201.157870 +1.2.276.0.7230010.3.1.4.8323329.6787.1517875201.153497 +1.2.276.0.7230010.3.1.4.8323329.6788.1517875201.161955 +1.2.276.0.7230010.3.1.4.8323329.6789.1517875201.161293 +1.2.276.0.7230010.3.1.4.8323329.6790.1517875201.158382 +1.2.276.0.7230010.3.1.4.8323329.6791.1517875201.180186 +1.2.276.0.7230010.3.1.4.8323329.679.1517875164.41707 +1.2.276.0.7230010.3.1.4.8323329.6792.1517875201.188335 +1.2.276.0.7230010.3.1.4.8323329.6793.1517875201.186924 +1.2.276.0.7230010.3.1.4.8323329.6794.1517875201.199439 +1.2.276.0.7230010.3.1.4.8323329.6795.1517875201.180505 +1.2.276.0.7230010.3.1.4.8323329.6796.1517875201.207193 +1.2.276.0.7230010.3.1.4.8323329.6797.1517875201.200345 +1.2.276.0.7230010.3.1.4.8323329.6798.1517875201.210781 +1.2.276.0.7230010.3.1.4.8323329.6799.1517875201.196390 +1.2.276.0.7230010.3.1.4.8323329.6800.1517875201.205049 +1.2.276.0.7230010.3.1.4.8323329.6801.1517875201.217275 +1.2.276.0.7230010.3.1.4.8323329.680.1517875164.49743 +1.2.276.0.7230010.3.1.4.8323329.6802.1517875201.218423 +1.2.276.0.7230010.3.1.4.8323329.6803.1517875201.222978 +1.2.276.0.7230010.3.1.4.8323329.6804.1517875201.230988 +1.2.276.0.7230010.3.1.4.8323329.6805.1517875201.240159 +1.2.276.0.7230010.3.1.4.8323329.6806.1517875201.272877 +1.2.276.0.7230010.3.1.4.8323329.6807.1517875201.291209 +1.2.276.0.7230010.3.1.4.8323329.6808.1517875201.285397 +1.2.276.0.7230010.3.1.4.8323329.6809.1517875201.284225 +1.2.276.0.7230010.3.1.4.8323329.6810.1517875201.300234 +1.2.276.0.7230010.3.1.4.8323329.6811.1517875201.300941 +1.2.276.0.7230010.3.1.4.8323329.681.1517875164.55275 +1.2.276.0.7230010.3.1.4.8323329.6812.1517875201.324941 +1.2.276.0.7230010.3.1.4.8323329.6813.1517875201.320440 +1.2.276.0.7230010.3.1.4.8323329.6814.1517875201.351762 +1.2.276.0.7230010.3.1.4.8323329.6815.1517875201.351860 +1.2.276.0.7230010.3.1.4.8323329.6816.1517875201.337765 +1.2.276.0.7230010.3.1.4.8323329.6817.1517875201.367825 +1.2.276.0.7230010.3.1.4.8323329.6818.1517875201.360945 +1.2.276.0.7230010.3.1.4.8323329.6819.1517875201.383336 +1.2.276.0.7230010.3.1.4.8323329.6820.1517875201.375382 +1.2.276.0.7230010.3.1.4.8323329.6821.1517875201.402359 +1.2.276.0.7230010.3.1.4.8323329.682.1517875164.57531 +1.2.276.0.7230010.3.1.4.8323329.6822.1517875201.394232 +1.2.276.0.7230010.3.1.4.8323329.6823.1517875201.397687 +1.2.276.0.7230010.3.1.4.8323329.6824.1517875201.386690 +1.2.276.0.7230010.3.1.4.8323329.6825.1517875201.400087 +1.2.276.0.7230010.3.1.4.8323329.6826.1517875201.392575 +1.2.276.0.7230010.3.1.4.8323329.6827.1517875201.405754 +1.2.276.0.7230010.3.1.4.8323329.6828.1517875201.426585 +1.2.276.0.7230010.3.1.4.8323329.6829.1517875201.439325 +1.2.276.0.7230010.3.1.4.8323329.6830.1517875201.424165 +1.2.276.0.7230010.3.1.4.8323329.6831.1517875201.431248 +1.2.276.0.7230010.3.1.4.8323329.683.1517875164.58425 +1.2.276.0.7230010.3.1.4.8323329.6832.1517875201.439581 +1.2.276.0.7230010.3.1.4.8323329.6833.1517875201.453153 +1.2.276.0.7230010.3.1.4.8323329.6834.1517875201.446022 +1.2.276.0.7230010.3.1.4.8323329.6835.1517875201.448547 +1.2.276.0.7230010.3.1.4.8323329.6836.1517875201.458630 +1.2.276.0.7230010.3.1.4.8323329.6837.1517875201.470973 +1.2.276.0.7230010.3.1.4.8323329.6838.1517875201.466106 +1.2.276.0.7230010.3.1.4.8323329.6839.1517875201.472166 +1.2.276.0.7230010.3.1.4.8323329.6840.1517875201.471080 +1.2.276.0.7230010.3.1.4.8323329.6841.1517875201.482233 +1.2.276.0.7230010.3.1.4.8323329.684.1517875164.54773 +1.2.276.0.7230010.3.1.4.8323329.6842.1517875201.485408 +1.2.276.0.7230010.3.1.4.8323329.6843.1517875201.486100 +1.2.276.0.7230010.3.1.4.8323329.6844.1517875201.487769 +1.2.276.0.7230010.3.1.4.8323329.6845.1517875201.507400 +1.2.276.0.7230010.3.1.4.8323329.6846.1517875201.507400 +1.2.276.0.7230010.3.1.4.8323329.6847.1517875201.507516 +1.2.276.0.7230010.3.1.4.8323329.6848.1517875201.515650 +1.2.276.0.7230010.3.1.4.8323329.6849.1517875201.513885 +1.2.276.0.7230010.3.1.4.8323329.6850.1517875201.518647 +1.2.276.0.7230010.3.1.4.8323329.6851.1517875201.524530 +1.2.276.0.7230010.3.1.4.8323329.685.1517875164.68060 +1.2.276.0.7230010.3.1.4.8323329.6852.1517875201.527299 +1.2.276.0.7230010.3.1.4.8323329.6853.1517875201.532183 +1.2.276.0.7230010.3.1.4.8323329.6854.1517875201.536084 +1.2.276.0.7230010.3.1.4.8323329.6855.1517875201.544939 +1.2.276.0.7230010.3.1.4.8323329.6856.1517875201.558218 +1.2.276.0.7230010.3.1.4.8323329.6857.1517875201.561042 +1.2.276.0.7230010.3.1.4.8323329.6858.1517875201.564963 +1.2.276.0.7230010.3.1.4.8323329.6859.1517875201.577556 +1.2.276.0.7230010.3.1.4.8323329.6860.1517875201.579869 +1.2.276.0.7230010.3.1.4.8323329.6861.1517875201.583436 +1.2.276.0.7230010.3.1.4.8323329.686.1517875164.69732 +1.2.276.0.7230010.3.1.4.8323329.6862.1517875201.605439 +1.2.276.0.7230010.3.1.4.8323329.6863.1517875201.600298 +1.2.276.0.7230010.3.1.4.8323329.6864.1517875201.606556 +1.2.276.0.7230010.3.1.4.8323329.6865.1517875201.605873 +1.2.276.0.7230010.3.1.4.8323329.6866.1517875201.609401 +1.2.276.0.7230010.3.1.4.8323329.6867.1517875201.621169 +1.2.276.0.7230010.3.1.4.8323329.6868.1517875201.625311 +1.2.276.0.7230010.3.1.4.8323329.6869.1517875201.629299 +1.2.276.0.7230010.3.1.4.8323329.6870.1517875201.638094 +1.2.276.0.7230010.3.1.4.8323329.6871.1517875201.637250 +1.2.276.0.7230010.3.1.4.8323329.687.1517875164.69886 +1.2.276.0.7230010.3.1.4.8323329.6872.1517875201.654092 +1.2.276.0.7230010.3.1.4.8323329.6873.1517875201.655705 +1.2.276.0.7230010.3.1.4.8323329.6874.1517875201.664174 +1.2.276.0.7230010.3.1.4.8323329.6875.1517875201.668021 +1.2.276.0.7230010.3.1.4.8323329.6876.1517875201.676171 +1.2.276.0.7230010.3.1.4.8323329.6877.1517875201.678042 +1.2.276.0.7230010.3.1.4.8323329.6878.1517875201.691288 +1.2.276.0.7230010.3.1.4.8323329.6879.1517875201.708100 +1.2.276.0.7230010.3.1.4.8323329.6880.1517875201.741038 +1.2.276.0.7230010.3.1.4.8323329.6881.1517875201.725302 +1.2.276.0.7230010.3.1.4.8323329.688.1517875164.73775 +1.2.276.0.7230010.3.1.4.8323329.6882.1517875201.737674 +1.2.276.0.7230010.3.1.4.8323329.6883.1517875201.748736 +1.2.276.0.7230010.3.1.4.8323329.6884.1517875201.771096 +1.2.276.0.7230010.3.1.4.8323329.6885.1517875201.772061 +1.2.276.0.7230010.3.1.4.8323329.6886.1517875201.793358 +1.2.276.0.7230010.3.1.4.8323329.6887.1517875201.793301 +1.2.276.0.7230010.3.1.4.8323329.6888.1517875201.784045 +1.2.276.0.7230010.3.1.4.8323329.6889.1517875201.785330 +1.2.276.0.7230010.3.1.4.8323329.6890.1517875201.786808 +1.2.276.0.7230010.3.1.4.8323329.6891.1517875201.799279 +1.2.276.0.7230010.3.1.4.8323329.689.1517875164.84125 +1.2.276.0.7230010.3.1.4.8323329.6892.1517875201.804776 +1.2.276.0.7230010.3.1.4.8323329.6893.1517875201.808136 +1.2.276.0.7230010.3.1.4.8323329.6894.1517875201.804774 +1.2.276.0.7230010.3.1.4.8323329.6895.1517875201.821716 +1.2.276.0.7230010.3.1.4.8323329.6896.1517875201.824767 +1.2.276.0.7230010.3.1.4.8323329.6897.1517875201.824559 +1.2.276.0.7230010.3.1.4.8323329.6898.1517875201.832875 +1.2.276.0.7230010.3.1.4.8323329.6899.1517875201.838278 +1.2.276.0.7230010.3.1.4.8323329.6900.1517875201.836204 +1.2.276.0.7230010.3.1.4.8323329.6901.1517875201.856523 +1.2.276.0.7230010.3.1.4.8323329.690.1517875164.88816 +1.2.276.0.7230010.3.1.4.8323329.6902.1517875201.857897 +1.2.276.0.7230010.3.1.4.8323329.6903.1517875201.859612 +1.2.276.0.7230010.3.1.4.8323329.6904.1517875201.850819 +1.2.276.0.7230010.3.1.4.8323329.6905.1517875201.856594 +1.2.276.0.7230010.3.1.4.8323329.6906.1517875201.864098 +1.2.276.0.7230010.3.1.4.8323329.6907.1517875201.860185 +1.2.276.0.7230010.3.1.4.8323329.6908.1517875201.881093 +1.2.276.0.7230010.3.1.4.8323329.6909.1517875201.885061 +1.2.276.0.7230010.3.1.4.8323329.6910.1517875201.884835 +1.2.276.0.7230010.3.1.4.8323329.6911.1517875201.889332 +1.2.276.0.7230010.3.1.4.8323329.691.1517875164.100426 +1.2.276.0.7230010.3.1.4.8323329.6912.1517875201.884477 +1.2.276.0.7230010.3.1.4.8323329.6913.1517875201.894316 +1.2.276.0.7230010.3.1.4.8323329.6914.1517875201.894071 +1.2.276.0.7230010.3.1.4.8323329.6915.1517875201.910729 +1.2.276.0.7230010.3.1.4.8323329.6916.1517875201.913487 +1.2.276.0.7230010.3.1.4.8323329.6917.1517875201.904913 +1.2.276.0.7230010.3.1.4.8323329.6918.1517875201.907653 +1.2.276.0.7230010.3.1.4.8323329.6919.1517875201.924360 +1.2.276.0.7230010.3.1.4.8323329.6920.1517875201.925793 +1.2.276.0.7230010.3.1.4.8323329.6921.1517875201.928113 +1.2.276.0.7230010.3.1.4.8323329.692.1517875164.93182 +1.2.276.0.7230010.3.1.4.8323329.6922.1517875201.931479 +1.2.276.0.7230010.3.1.4.8323329.6923.1517875201.943058 +1.2.276.0.7230010.3.1.4.8323329.6924.1517875201.945689 +1.2.276.0.7230010.3.1.4.8323329.6925.1517875201.948185 +1.2.276.0.7230010.3.1.4.8323329.6926.1517875201.953896 +1.2.276.0.7230010.3.1.4.8323329.6927.1517875201.946669 +1.2.276.0.7230010.3.1.4.8323329.6928.1517875201.955660 +1.2.276.0.7230010.3.1.4.8323329.6929.1517875201.948747 +1.2.276.0.7230010.3.1.4.8323329.6930.1517875201.959681 +1.2.276.0.7230010.3.1.4.8323329.6931.1517875201.970596 +1.2.276.0.7230010.3.1.4.8323329.693.1517875164.108437 +1.2.276.0.7230010.3.1.4.8323329.6932.1517875201.970611 +1.2.276.0.7230010.3.1.4.8323329.6933.1517875201.1099 +1.2.276.0.7230010.3.1.4.8323329.6934.1517875202.4595 +1.2.276.0.7230010.3.1.4.8323329.6935.1517875202.12552 +1.2.276.0.7230010.3.1.4.8323329.6936.1517875202.13043 +1.2.276.0.7230010.3.1.4.8323329.6937.1517875202.19296 +1.2.276.0.7230010.3.1.4.8323329.6938.1517875202.22045 +1.2.276.0.7230010.3.1.4.8323329.6939.1517875202.28137 +1.2.276.0.7230010.3.1.4.8323329.6940.1517875202.32538 +1.2.276.0.7230010.3.1.4.8323329.6941.1517875202.36388 +1.2.276.0.7230010.3.1.4.8323329.694.1517875164.101008 +1.2.276.0.7230010.3.1.4.8323329.6942.1517875202.44642 +1.2.276.0.7230010.3.1.4.8323329.6943.1517875202.40300 +1.2.276.0.7230010.3.1.4.8323329.6944.1517875202.49757 +1.2.276.0.7230010.3.1.4.8323329.6945.1517875202.54700 +1.2.276.0.7230010.3.1.4.8323329.6946.1517875202.66342 +1.2.276.0.7230010.3.1.4.8323329.6947.1517875202.67033 +1.2.276.0.7230010.3.1.4.8323329.6948.1517875202.70176 +1.2.276.0.7230010.3.1.4.8323329.6949.1517875202.68681 +1.2.276.0.7230010.3.1.4.8323329.6950.1517875202.75840 +1.2.276.0.7230010.3.1.4.8323329.6951.1517875202.79390 +1.2.276.0.7230010.3.1.4.8323329.695.1517875164.106587 +1.2.276.0.7230010.3.1.4.8323329.6952.1517875202.103917 +1.2.276.0.7230010.3.1.4.8323329.6953.1517875202.105566 +1.2.276.0.7230010.3.1.4.8323329.6954.1517875202.93995 +1.2.276.0.7230010.3.1.4.8323329.6955.1517875202.97481 +1.2.276.0.7230010.3.1.4.8323329.6956.1517875202.94094 +1.2.276.0.7230010.3.1.4.8323329.6957.1517875202.112518 +1.2.276.0.7230010.3.1.4.8323329.6958.1517875202.100374 +1.2.276.0.7230010.3.1.4.8323329.6959.1517875202.114093 +1.2.276.0.7230010.3.1.4.8323329.6960.1517875202.117220 +1.2.276.0.7230010.3.1.4.8323329.6961.1517875202.117862 +1.2.276.0.7230010.3.1.4.8323329.696.1517875164.110900 +1.2.276.0.7230010.3.1.4.8323329.6962.1517875202.129753 +1.2.276.0.7230010.3.1.4.8323329.6963.1517875202.125428 +1.2.276.0.7230010.3.1.4.8323329.6964.1517875202.125199 +1.2.276.0.7230010.3.1.4.8323329.6965.1517875202.135394 +1.2.276.0.7230010.3.1.4.8323329.6966.1517875202.134564 +1.2.276.0.7230010.3.1.4.8323329.6967.1517875202.154226 +1.2.276.0.7230010.3.1.4.8323329.6968.1517875202.154923 +1.2.276.0.7230010.3.1.4.8323329.6969.1517875202.158139 +1.2.276.0.7230010.3.1.4.8323329.6970.1517875202.164844 +1.2.276.0.7230010.3.1.4.8323329.6971.1517875202.167417 +1.2.276.0.7230010.3.1.4.8323329.697.1517875164.115252 +1.2.276.0.7230010.3.1.4.8323329.6972.1517875202.174315 +1.2.276.0.7230010.3.1.4.8323329.6973.1517875202.182782 +1.2.276.0.7230010.3.1.4.8323329.6974.1517875202.181748 +1.2.276.0.7230010.3.1.4.8323329.6975.1517875202.189609 +1.2.276.0.7230010.3.1.4.8323329.6976.1517875202.203544 +1.2.276.0.7230010.3.1.4.8323329.6977.1517875202.193349 +1.2.276.0.7230010.3.1.4.8323329.6978.1517875202.198017 +1.2.276.0.7230010.3.1.4.8323329.6979.1517875202.202861 +1.2.276.0.7230010.3.1.4.8323329.6980.1517875202.217290 +1.2.276.0.7230010.3.1.4.8323329.6981.1517875202.215520 +1.2.276.0.7230010.3.1.4.8323329.698.1517875164.123807 +1.2.276.0.7230010.3.1.4.8323329.6982.1517875202.223313 +1.2.276.0.7230010.3.1.4.8323329.6983.1517875202.227128 +1.2.276.0.7230010.3.1.4.8323329.6984.1517875202.223625 +1.2.276.0.7230010.3.1.4.8323329.6985.1517875202.229878 +1.2.276.0.7230010.3.1.4.8323329.6986.1517875202.225672 +1.2.276.0.7230010.3.1.4.8323329.6987.1517875202.239524 +1.2.276.0.7230010.3.1.4.8323329.6988.1517875202.239230 +1.2.276.0.7230010.3.1.4.8323329.6989.1517875202.239524 +1.2.276.0.7230010.3.1.4.8323329.6990.1517875202.244266 +1.2.276.0.7230010.3.1.4.8323329.6991.1517875202.256199 +1.2.276.0.7230010.3.1.4.8323329.699.1517875164.126785 +1.2.276.0.7230010.3.1.4.8323329.6992.1517875202.260362 +1.2.276.0.7230010.3.1.4.8323329.6993.1517875202.254440 +1.2.276.0.7230010.3.1.4.8323329.6994.1517875202.260918 +1.2.276.0.7230010.3.1.4.8323329.6995.1517875202.277687 +1.2.276.0.7230010.3.1.4.8323329.6996.1517875202.277447 +1.2.276.0.7230010.3.1.4.8323329.6997.1517875202.285157 +1.2.276.0.7230010.3.1.4.8323329.6998.1517875202.282944 +1.2.276.0.7230010.3.1.4.8323329.6999.1517875202.291639 +1.2.276.0.7230010.3.1.4.8323329.7000.1517875202.296916 +1.2.276.0.7230010.3.1.4.8323329.7001.1517875202.297845 +1.2.276.0.7230010.3.1.4.8323329.700.1517875164.130498 +1.2.276.0.7230010.3.1.4.8323329.7002.1517875202.301297 +1.2.276.0.7230010.3.1.4.8323329.7003.1517875202.311738 +1.2.276.0.7230010.3.1.4.8323329.7004.1517875202.317493 +1.2.276.0.7230010.3.1.4.8323329.7005.1517875202.319691 +1.2.276.0.7230010.3.1.4.8323329.7006.1517875202.324929 +1.2.276.0.7230010.3.1.4.8323329.7007.1517875202.322827 +1.2.276.0.7230010.3.1.4.8323329.7008.1517875202.331329 +1.2.276.0.7230010.3.1.4.8323329.7009.1517875202.349776 +1.2.276.0.7230010.3.1.4.8323329.7010.1517875202.349776 +1.2.276.0.7230010.3.1.4.8323329.7011.1517875202.344418 +1.2.276.0.7230010.3.1.4.8323329.701.1517875164.133706 +1.2.276.0.7230010.3.1.4.8323329.7012.1517875202.340379 +1.2.276.0.7230010.3.1.4.8323329.7013.1517875202.343274 +1.2.276.0.7230010.3.1.4.8323329.7014.1517875202.364072 +1.2.276.0.7230010.3.1.4.8323329.7015.1517875202.356514 +1.2.276.0.7230010.3.1.4.8323329.7016.1517875202.366060 +1.2.276.0.7230010.3.1.4.8323329.7017.1517875202.371741 +1.2.276.0.7230010.3.1.4.8323329.7018.1517875202.374713 +1.2.276.0.7230010.3.1.4.8323329.7019.1517875202.380202 +1.2.276.0.7230010.3.1.4.8323329.7020.1517875202.386064 +1.2.276.0.7230010.3.1.4.8323329.7021.1517875202.389388 +1.2.276.0.7230010.3.1.4.8323329.702.1517875164.137686 +1.2.276.0.7230010.3.1.4.8323329.7022.1517875202.388047 +1.2.276.0.7230010.3.1.4.8323329.7023.1517875202.391467 +1.2.276.0.7230010.3.1.4.8323329.7024.1517875202.397477 +1.2.276.0.7230010.3.1.4.8323329.7025.1517875202.410790 +1.2.276.0.7230010.3.1.4.8323329.7026.1517875202.408536 +1.2.276.0.7230010.3.1.4.8323329.7027.1517875202.410965 +1.2.276.0.7230010.3.1.4.8323329.7028.1517875202.417326 +1.2.276.0.7230010.3.1.4.8323329.7029.1517875202.423404 +1.2.276.0.7230010.3.1.4.8323329.7030.1517875202.422511 +1.2.276.0.7230010.3.1.4.8323329.7031.1517875202.421958 +1.2.276.0.7230010.3.1.4.8323329.703.1517875164.145334 +1.2.276.0.7230010.3.1.4.8323329.7032.1517875202.435458 +1.2.276.0.7230010.3.1.4.8323329.7033.1517875202.436717 +1.2.276.0.7230010.3.1.4.8323329.7034.1517875202.438663 +1.2.276.0.7230010.3.1.4.8323329.7035.1517875202.444266 +1.2.276.0.7230010.3.1.4.8323329.7036.1517875202.452248 +1.2.276.0.7230010.3.1.4.8323329.7037.1517875202.457252 +1.2.276.0.7230010.3.1.4.8323329.7038.1517875202.456951 +1.2.276.0.7230010.3.1.4.8323329.7039.1517875202.465844 +1.2.276.0.7230010.3.1.4.8323329.7040.1517875202.469591 +1.2.276.0.7230010.3.1.4.8323329.7041.1517875202.485810 +1.2.276.0.7230010.3.1.4.8323329.704.1517875164.143532 +1.2.276.0.7230010.3.1.4.8323329.7042.1517875202.489196 +1.2.276.0.7230010.3.1.4.8323329.7043.1517875202.491800 +1.2.276.0.7230010.3.1.4.8323329.7044.1517875202.483852 +1.2.276.0.7230010.3.1.4.8323329.7045.1517875202.495044 +1.2.276.0.7230010.3.1.4.8323329.7046.1517875202.494784 +1.2.276.0.7230010.3.1.4.8323329.7047.1517875202.497314 +1.2.276.0.7230010.3.1.4.8323329.7048.1517875202.494683 +1.2.276.0.7230010.3.1.4.8323329.7049.1517875202.496144 +1.2.276.0.7230010.3.1.4.8323329.7050.1517875202.507168 +1.2.276.0.7230010.3.1.4.8323329.7051.1517875202.516087 +1.2.276.0.7230010.3.1.4.8323329.705.1517875164.152399 +1.2.276.0.7230010.3.1.4.8323329.7052.1517875202.524577 diff --git a/submit/.DS_Store b/submit/.DS_Store new file mode 100644 index 0000000..5008ddf Binary files /dev/null and b/submit/.DS_Store differ diff --git a/submit/create_submission.py b/submit/create_submission.py new file mode 100644 index 0000000..89b6ab5 --- /dev/null +++ b/submit/create_submission.py @@ -0,0 +1,164 @@ +import pandas as pd +import numpy as np +import pickle +import glob +import os + +from scipy.ndimage.interpolation import zoom +from skimage.morphology import remove_small_objects +from skimage.measure import label +from tqdm import tqdm + +def mask2rle(img, width, height): + rle = [] + lastColor = 0; + currentPixel = 0; + runStart = -1; + runLength = 0; + for x in range(width): + for y in range(height): + currentColor = img[x][y] + if currentColor != lastColor: + if currentColor == 1: + runStart = currentPixel; + runLength = 1; + else: + rle.append(str(runStart)); + rle.append(str(runLength)); + runStart = -1; + runLength = 0; + currentPixel = 0; + elif runStart > -1: + runLength += 1 + lastColor = currentColor; + currentPixel+=1; + return " ".join(rle) + +def extract_masks_from_dict(pred_dict, sop_order, real_size=1024): + p_list = [] + t_list = [] + for sop in sop_order: + p_list.append(pred_dict[sop]['pred_mask']) + rescale = float(real_size) / p_list[-1].shape[-1] + if rescale != 1: + # Nearest neighbor + return zoom(np.asarray(p_list), [1., 1., rescale, rescale], prefilter=False, order=0) + else: + return np.asarray(p_list) + +def filter_small_masks(masks, min_size=3.5*1024): + _masks = masks.copy() + for i, m in enumerate(masks): + if m.sum() < min_size: + _masks[i][:] = 0 + return _masks + +def remove_small_objects(masks, min_size=3.5*1024): + _masks = masks.copy() + for i, m in enumerate(masks): + labels = label(m) + m = remove_small_objects(labels, min_size) + _masks[i][:] = (m > 0).astype('uint8') + return _masks + +# CLASSIFICATION +predictions = np.sort(glob.glob('/users/ipan/scratch/siim-ptx/segment/stage2-predictions/TRAIN_V100Flip/o0/*.csv')) +# SEGMENTATION +segmentations = np.sort(glob.glob('/users/ipan/scratch/siim-ptx/segment/stage2-predictions/TRAIN_DEEPLABXYFlip/o0/*.pkl')) +pure_segmentations = np.sort(glob.glob('/users/ipan/scratch/siim-ptx/segment/stage2-predictions/TRAIN_SEGMENTFlip/o0/*.pkl')) + +with open(segmentations[0], 'rb') as f: + pickled = pickle.load(f) + +dfs = [] +for pred in predictions: + dfs.append(pd.read_csv(pred)) + +y_pred_mean = np.mean([_['Top0'] for _ in dfs], axis=0) +ensemble_df = pd.DataFrame({'y_pred': y_pred_mean, 'sop': dfs[0]['sop']}) +sop_order = np.sort([*pickled]) +y_pred_mean_dict = {sop : df['y_pred'].iloc[0] for sop, df in ensemble_df.groupby('sop')} +y_pred_mean = [y_pred_mean_dict[sop] for sop in sop_order] + +seg_list = [] +for seg in segmentations: + with open(seg, 'rb') as f: + pickled = pickle.load(f) + # Get predictions from classifier-segmenter dual model + p = extract_masks_from_dict(pickled, sop_order) + # Multiply by predictions from classifier model + seg_list.append(p) + +for seg in pure_segmentations: + with open(seg, 'rb') as f: + pickled = pickle.load(f) + # Get predictions from classifier-segmenter dual model + p = extract_masks_from_dict(pickled, sop_order) + # Multiply by predictions from classifier model + p = np.asarray([p[i]*y_pred_mean[i] for i in range(len(p))]) + seg_list.append(p) + +dice_segmentations = np.sort(glob.glob('/users/ipan/scratch/siim-ptx/segment/lb-predictions/TRAIN_KITAMURA2/o0/*')) +dice_seg_list = [] +for seg in dice_segmentations: + with open(seg, 'rb') as f: + pickled = pickle.load(f) + p = extract_masks_from_dict(pickled, sop_order) + dice_seg_list.append(p) + +dice_y_seg_mean = np.mean(np.asarray(dice_seg_list), axis=0) +seg_list.append(dice_y_seg_mean) +### + +def filter_small_masks(masks, min_size=3.5*1024): + _masks = masks.copy() + for i, m in enumerate(masks): + if m.sum() < min_size: + _masks[i][:] = 0 + return _masks + +dscs = np.asarray([0.8579, 0.8615, 0.8647, 0.8593, + 0.8636, 0.8622, 0.8620, 0.8638]) +dsc_cutoff = 0.857 +models_to_include = [ind for ind in range(len(dscs)) if dscs[ind] > dsc_cutoff] +#models_to_include = [4,5,6,7] +dscs = dscs[models_to_include] +#weights = np.asarray([_**2 for _ in dscs]) +weights = np.repeat(1., len(dscs)) +#weights[-1] = 0.01 +weights = weights / np.sum(weights) +__seg_list = [_ for i, _ in enumerate(seg_list) if i in models_to_include] +y_seg_mean = np.asarray([_.astype('uint8') for _ in __seg_list]) +y_seg_mean = [np.average(y_seg_mean[:,ind], axis=0, weights=weights) for ind in range(y_seg_mean.shape[1])] +y_seg_mean = np.asarray(y_seg_mean) + +p = (y_seg_mean > 70).astype('float32') +s = (y_seg_mean > 40).astype('float32') +#y_seg_binary = np.expand_dims(p.sum((-1,-2,-3))>0, axis=-1).astype('float32')*s +y_seg_binary = np.asarray([(p[_].sum((-1,-2)) > 0).astype('float32') * s[_] for _ in range(len(p))]) +y_seg_binary = filter_small_masks(y_seg_binary, 2048.) +print('{:.1f}% PTX'.format(np.mean([1 if _.sum() > 0 else 0 for _ in y_seg_binary])*100.)) + +y_final_rle = [mask2rle(_.T, 1024, 1024) for _ in tqdm(y_seg_binary, total=len(y_seg_binary))] +SAVE_FILE = '../submissions/stage2-submissions/ensemble_submission002.csv' +if not os.path.exists(os.path.dirname(SAVE_FILE)): os.makedirs(os.path.dirname(SAVE_FILE)) + +y_final_df = pd.DataFrame({'ImageId': sop_order, 'EncodedPixels': y_final_rle}) +y_final_df.loc[y_final_df['EncodedPixels'] == '', 'EncodedPixels'] = '-1' +print('{:.1f}% PTX'.format(np.mean(y_final_df['EncodedPixels'] != '-1')*100.)) + +y_final_df.to_csv(SAVE_FILE, index=False) + +y_seg_binary = filter_small_masks(y_seg_binary, 4*1024.) +print('{:.1f}% PTX'.format(np.mean([1 if _.sum() > 0 else 0 for _ in y_seg_binary])*100.)) +pos_sops = [sop_order[i] for i in range(len(y_seg_binary)) if y_seg_binary[i].sum() > 0] +y_final_df.loc[~y_final_df['ImageId'].isin(pos_sops), 'EncodedPixels'] = '-1' +print('{:.1f}% PTX'.format(np.mean(y_final_df['EncodedPixels'] != '-1')*100.)) +SAVE_FILE = '../submissions/stage2-submissions/ensemble_submission006.csv' +if not os.path.exists(os.path.dirname(SAVE_FILE)): os.makedirs(os.path.dirname(SAVE_FILE)) +y_final_df.to_csv(SAVE_FILE, index=False) + + + + + diff --git a/submit/create_submission_partitioned.py b/submit/create_submission_partitioned.py new file mode 100644 index 0000000..bb7b603 --- /dev/null +++ b/submit/create_submission_partitioned.py @@ -0,0 +1,163 @@ +import pandas as pd +import numpy as np +import pickle +import glob +import os + +from scipy.ndimage.interpolation import zoom +from skimage.morphology import remove_small_objects +from skimage.measure import label +from tqdm import tqdm + +def mask2rle(img, width, height): + rle = [] + lastColor = 0; + currentPixel = 0; + runStart = -1; + runLength = 0; + for x in range(width): + for y in range(height): + currentColor = img[x][y] + if currentColor != lastColor: + if currentColor == 1: + runStart = currentPixel; + runLength = 1; + else: + rle.append(str(runStart)); + rle.append(str(runLength)); + runStart = -1; + runLength = 0; + currentPixel = 0; + elif runStart > -1: + runLength += 1 + lastColor = currentColor; + currentPixel+=1; + return " ".join(rle) + +def extract_masks_from_dict(pred_dict, sop_order, real_size=1024): + p_list = [] + t_list = [] + for sop in sop_order: + p_list.append(pred_dict[sop]['pred_mask']) + rescale = float(real_size) / p_list[-1].shape[-1] + if rescale != 1: + # Nearest neighbor + return zoom(np.asarray(p_list), [1., 1., rescale, rescale], prefilter=False, order=0) + else: + return np.asarray(p_list) + +def filter_small_masks(masks, min_size=3.5*1024): + _masks = masks.copy() + for i, m in enumerate(masks): + if m.sum() < min_size: + _masks[i][:] = 0 + return _masks + +def remove_small_objects(masks, min_size=3.5*1024): + _masks = masks.copy() + for i, m in enumerate(masks): + labels = label(m) + m = remove_small_objects(labels, min_size) + _masks[i][:] = (m > 0).astype('uint8') + return _masks + +def get_final_df_predictions(preds, segs, pure_segs, cls_thres=70, seg_thres=40, size_thres=2048.): + with open(segs[0], 'rb') as f: + pickled = pickle.load(f) + dfs = [] + for pred in preds: + dfs.append(pd.read_csv(pred)) + y_pred_mean = np.mean([_['Top0'] for _ in dfs], axis=0) + ensemble_df = pd.DataFrame({'y_pred': y_pred_mean, 'sop': dfs[0]['sop']}) + sop_order = np.sort([*pickled]) + y_pred_mean_dict = {sop : df['y_pred'].iloc[0] for sop, df in ensemble_df.groupby('sop')} + y_pred_mean = [y_pred_mean_dict[sop] for sop in sop_order] + seg_list = [] + for seg in segs: + with open(seg, 'rb') as f: + pickled = pickle.load(f) + # Get predictions from classifier-segmenter dual model + p = extract_masks_from_dict(pickled, sop_order) + # Multiply by predictions from classifier model + seg_list.append(p) + for seg in pure_segs: + with open(seg, 'rb') as f: + pickled = pickle.load(f) + # Get predictions from classifier-segmenter dual model + p = extract_masks_from_dict(pickled, sop_order) + # Multiply by predictions from classifier model + p = np.asarray([p[i]*y_pred_mean[i] for i in range(len(p))]) + seg_list.append(p) + weights = np.repeat(1., len(seg_list)) + weights = weights / np.sum(weights) + y_seg_mean = np.asarray([_.astype('uint8') for _ in seg_list]) + y_seg_mean = [np.average(y_seg_mean[:,ind], axis=0, weights=weights) for ind in range(y_seg_mean.shape[1])] + y_seg_mean = np.asarray(y_seg_mean) + p = (y_seg_mean > cls_thres).astype('float32') + s = (y_seg_mean > seg_thres).astype('float32') + y_seg_binary = np.asarray([(p[_].sum((-1,-2)) > 0).astype('float32') * s[_] for _ in range(len(p))]) + y_seg_binary = filter_small_masks(y_seg_binary, size_thres) + print('{:.1f}% PTX'.format(np.mean([1 if _.sum() > 0 else 0 for _ in y_seg_binary])*100.)) + return y_seg_binary, sop_order + +# Remove masks <2048 pixels +predictions = np.sort(glob.glob('../segment/stage2-predictions/TRAIN_V100Flip/o0/*.csv0')) +segmentations = np.sort(glob.glob('../segment/stage2-predictions/TRAIN_DEEPLABXYFlip/o0/*.pkl0')) +pure_segmentations = np.sort(glob.glob('../segment/stage2-predictions/TRAIN_SEGMENTFlip/o0/*.pkl0')) + +group0, sop_order0 = get_final_df_predictions(predictions, segmentations, pure_segmentations, 70, 40, 2048.) + +predictions = np.sort(glob.glob('../segment/stage2-predictions/TRAIN_V100Flip/o0/*.csv1')) +segmentations = np.sort(glob.glob('../segment/stage2-predictions/TRAIN_DEEPLABXYFlip/o0/*.pkl1')) +pure_segmentations = np.sort(glob.glob('../segment/stage2-predictions/TRAIN_SEGMENTFlip/o0/*.pkl1')) + +group1, sop_order1 = get_final_df_predictions(predictions, segmentations, pure_segmentations, 70, 40, 2048.) + +predictions = np.sort(glob.glob('../segment/stage2-predictions/TRAIN_V100Flip/o0/*.csv2')) +segmentations = np.sort(glob.glob('../segment/stage2-predictions/TRAIN_DEEPLABXYFlip/o0/*.pkl2')) +pure_segmentations = np.sort(glob.glob('../segment/stage2-predictions/TRAIN_SEGMENTFlip/o0/*.pkl2')) + +group2, sop_order2 = get_final_df_predictions(predictions, segmentations, pure_segmentations, 70, 40, 2048.) + +y_seg_binary = np.vstack((group0,group1,group2)) +y_final_rle = [mask2rle(_.T, 1024, 1024) for _ in tqdm(y_seg_binary, total=len(y_seg_binary))] +sop_order = np.concatenate((sop_order0, sop_order1, sop_order2)) + +y_final_df = pd.DataFrame({'ImageId': sop_order, 'EncodedPixels': y_final_rle}) +y_final_df.loc[y_final_df['EncodedPixels'] == '', 'EncodedPixels'] = '-1' +print('{:.1f}% PTX'.format(np.mean(y_final_df['EncodedPixels'] != '-1')*100.)) + +SAVE_FILE = '../segment/stage2-submissions/submission0.csv' +if not os.path.exists(os.path.dirname(SAVE_FILE)): os.makedirs(os.path.dirname(SAVE_FILE)) +y_final_df.to_csv(SAVE_FILE, index=False) + +# Remove masks <4096 pixels +predictions = np.sort(glob.glob('../segment/stage2-predictions/TRAIN_V100Flip/o0/*.csv0')) +segmentations = np.sort(glob.glob('../segment/stage2-predictions/TRAIN_DEEPLABXYFlip/o0/*.pkl0')) +pure_segmentations = np.sort(glob.glob('../segment/stage2-predictions/TRAIN_SEGMENTFlip/o0/*.pkl0')) + +group0, sop_order0 = get_final_df_predictions(predictions, segmentations, pure_segmentations, 70, 40, 4096.) + +predictions = np.sort(glob.glob('../segment/stage2-predictions/TRAIN_V100Flip/o0/*.csv1')) +segmentations = np.sort(glob.glob('../segment/stage2-predictions/TRAIN_DEEPLABXYFlip/o0/*.pkl1')) +pure_segmentations = np.sort(glob.glob('../segment/stage2-predictions/TRAIN_SEGMENTFlip/o0/*.pkl1')) + +group1, sop_order1 = get_final_df_predictions(predictions, segmentations, pure_segmentations, 70, 40, 4096.) + +predictions = np.sort(glob.glob('../segment/stage2-predictions/TRAIN_V100Flip/o0/*.csv2')) +segmentations = np.sort(glob.glob('../segment/stage2-predictions/TRAIN_DEEPLABXYFlip/o0/*.pkl2')) +pure_segmentations = np.sort(glob.glob('../segment/stage2-predictions/TRAIN_SEGMENTFlip/o0/*.pkl2')) + +group2, sop_order2 = get_final_df_predictions(predictions, segmentations, pure_segmentations, 70, 40, 4096.) + +y_seg_binary = np.vstack((group0,group1,group2)) +y_final_rle = [mask2rle(_.T, 1024, 1024) for _ in tqdm(y_seg_binary, total=len(y_seg_binary))] +sop_order = np.concatenate((sop_order0, sop_order1, sop_order2)) + +y_final_df = pd.DataFrame({'ImageId': sop_order, 'EncodedPixels': y_final_rle}) +y_final_df.loc[y_final_df['EncodedPixels'] == '', 'EncodedPixels'] = '-1' +print('{:.1f}% PTX'.format(np.mean(y_final_df['EncodedPixels'] != '-1')*100.)) + +SAVE_FILE = '../segment/stage2-submissions/submission1.csv' +if not os.path.exists(os.path.dirname(SAVE_FILE)): os.makedirs(os.path.dirname(SAVE_FILE)) +y_final_df.to_csv(SAVE_FILE, index=False)