Skip to content

Commit

Permalink
Merge pull request #921 from azavea/lf/unit-tests
Browse files Browse the repository at this point in the history
Add rv2 unit tests and other fixes
  • Loading branch information
lewfish committed May 19, 2020
2 parents 2766e72 + 43e4fec commit c617c49
Show file tree
Hide file tree
Showing 70 changed files with 4,976 additions and 60 deletions.
11 changes: 6 additions & 5 deletions docs/rv2/index.rst
Expand Up @@ -105,20 +105,21 @@ and maintain.
])
# Use the PyTorch backend for the SemanticSegmentation pipeline.
train_chip_sz = 300
chip_sz = 300
backend = PyTorchSemanticSegmentationConfig(
model=SemanticSegmentationModelConfig(backbone=Backbone.resnet50),
solver=SolverConfig(lr=1e-4, num_epochs=1, batch_sz=2))
chip_options = SemanticSegmentationChipOptions(
window_method=SemanticSegmentationWindowMethod.random_sample, chips_per_scene=10)
window_method=SemanticSegmentationWindowMethod.random_sample,
chips_per_scene=10)
return SemanticSegmentationConfig(
root_uri=root_uri,
dataset=dataset,
backend=backend,
train_chip_sz=train_chip_sz,
chip_options=chip_options,
debug=False)
train_chip_sz=chip_sz,
predict_chip_sz=chip_sz,
chip_options=chip_options)
Raster Vision uses a ``unittest``-like method for executing pipelines. For instance, if the
Expand Down
12 changes: 6 additions & 6 deletions docs/rv2/quickstart.rst
Expand Up @@ -108,21 +108,21 @@ Create a Python file in the ``${RV_QUICKSTART_CODE_DIR}`` named ``tiny_spacenet.
])
# Use the PyTorch backend for the SemanticSegmentation pipeline.
train_chip_sz = 300
chip_sz = 300
backend = PyTorchSemanticSegmentationConfig(
model=SemanticSegmentationModelConfig(backbone=Backbone.resnet50),
solver=SolverConfig(lr=1e-4, num_epochs=1, batch_sz=2))
chip_options = SemanticSegmentationChipOptions(
window_method=SemanticSegmentationWindowMethod.random_sample, chips_per_scene=10)
window_method=SemanticSegmentationWindowMethod.random_sample,
chips_per_scene=10)
return SemanticSegmentationConfig(
root_uri=root_uri,
dataset=dataset,
backend=backend,
train_chip_sz=train_chip_sz,
chip_options=chip_options,
debug=False)
train_chip_sz=chip_sz,
predict_chip_sz=chip_sz,
chip_options=chip_options)
Running the pipeline
---------------------
Expand Down
6 changes: 3 additions & 3 deletions rastervision2/core/analyzer/stats_analyzer_config.py
Expand Up @@ -12,11 +12,11 @@ class StatsAnalyzerConfig(AnalyzerConfig):
description=(
'URI for output. If None and this is part of an RVPipeline, this is '
'auto-generated.'))
sample_prob: float = Field(
sample_prob: Optional[float] = Field(
0.1,
description=(
'The probability of using a random window for computing statistics.'
))
'The probability of using a random window for computing statistics. '
'If None, will use a sliding window.'))

def update(self, pipeline=None):
if pipeline is not None and self.output_uri is None:
Expand Down
18 changes: 4 additions & 14 deletions rastervision2/core/data/label/semantic_segmentation_labels.py
Expand Up @@ -44,30 +44,20 @@ def set_label_arr(self, window, label_arr):
def get_label_arr(self, window):
return self.window_to_label_arr[window.tuple_format()]

def filter_by_aoi(self, aoi_polygons):
"""Get the label array for a window.
Args:
window: Box
extent: a Box representing the extent of the corresponding Scene
Returns:
np.ndarray of class_ids with zeros filled in outside the AOIs and
clipped to the clip_extent
"""
def filter_by_aoi(self, aoi_polygons, null_class_id):
new_labels = SemanticSegmentationLabels()

for window in self.get_windows():
window_geom = window.to_shapely()
label_arr = self.get_label_arr(window)

if not self.aoi_polygons:
if not aoi_polygons:
return self
else:
# For each aoi_polygon, intersect with window, and put in window frame of
# reference.
window_aois = []
for aoi in self.aoi_polygons:
for aoi in aoi_polygons:
window_aoi = aoi.intersection(window_geom)
if not window_aoi.is_empty:

Expand All @@ -87,7 +77,7 @@ def transform_shape(x, y, z=None):
out_shape=label_arr.shape,
fill=1,
dtype=np.uint8)
label_arr[mask.astype(np.bool)] = 0
label_arr[mask.astype(np.bool)] = null_class_id
new_labels.set_label_arr(window, label_arr)

return new_labels
Expand Up @@ -3,7 +3,7 @@


class ObjectDetectionLabelSource(LabelSource):
def __init__(self, vector_source, extent):
def __init__(self, vector_source, extent=None):
"""Constructor.
Args:
Expand Down
@@ -1,6 +1,6 @@
from rastervision2.core.data.raster_source import (RasterizedSource)
from rastervision2.core.data.vector_source import (VectorSourceConfig)
from rastervision2.pipeline.config import register_config, Config, Field
from rastervision2.pipeline.config import register_config, Config, Field, ConfigError


@register_config('rasterizer')
Expand Down Expand Up @@ -29,3 +29,9 @@ def build(self, class_config, crs_transformer, extent):

return RasterizedSource(vector_source, self.rasterizer_config, extent,
crs_transformer)

def validate_config(self):
if self.vector_source.has_null_class_bufs():
raise ConfigError(
'Setting buffer to None for a class in the vector_source is '
'not allowed for RasterizedSourceConfig.')
Expand Up @@ -138,7 +138,7 @@ def get_geoms(x):
gt_count = len(gt)
class_name = 'vector-{}-{}'.format(
mode,
self.class_map.get_by_id(class_id).name)
self.class_config.names[class_id])

evaluation_item = ClassEvaluationItem(precision, recall, f1,
count_error, gt_count,
Expand Down
15 changes: 9 additions & 6 deletions rastervision2/core/evaluation/semantic_segmentation_evaluator.py
Expand Up @@ -4,7 +4,7 @@
from shapely.strtree import STRtree

from rastervision2.core.data import ActivateMixin
from rastervision2.core.data.vector_source import GeoJSONVectorSource
from rastervision2.core.data.vector_source import GeoJSONVectorSourceConfig
from rastervision2.core.evaluation import (ClassificationEvaluator,
SemanticSegmentationEvaluation)

Expand Down Expand Up @@ -44,6 +44,7 @@ def create_evaluation(self):
def process(self, scenes, tmp_dir):
evaluation = self.create_evaluation()
vect_evaluation = self.create_evaluation()
null_class_id = self.class_config.get_null_class_id()

for scene in scenes:
log.info('Computing evaluation for scene {}...'.format(scene.id))
Expand All @@ -56,8 +57,9 @@ def process(self, scenes, tmp_dir):
if scene.aoi_polygons:
# Filter labels based on AOI.
ground_truth = ground_truth.filter_by_aoi(
scene.aoi_polygons)
predictions = predictions.filter_by_aoi(scene.aoi_polygons)
scene.aoi_polygons, null_class_id)
predictions = predictions.filter_by_aoi(
scene.aoi_polygons, null_class_id)
scene_evaluation = self.create_evaluation()
scene_evaluation.compute(ground_truth, predictions)
evaluation.merge(scene_evaluation, scene_id=scene.id)
Expand All @@ -69,10 +71,11 @@ def process(self, scenes, tmp_dir):
)
for vo in label_store.vector_output:
pred_geojson_uri = vo.uri
mode = vo.mode
mode = vo.get_mode()
class_id = vo.class_id
pred_geojson_source = GeoJSONVectorSource(
pred_geojson_uri,
pred_geojson_source = GeoJSONVectorSourceConfig(
uri=pred_geojson_uri, default_class_id=None).build(
self.class_config,
scene.raster_source.get_crs_transformer())
pred_geojson = pred_geojson_source.get_geojson()

Expand Down
5 changes: 2 additions & 3 deletions rastervision2/core/rv_pipeline/rv_pipeline_config.py
Expand Up @@ -34,11 +34,10 @@ class RVPipelineConfig(PipelineConfig):
('Analyzers to run during analyzer command. A StatsAnalyzer will be added '
'automatically if any scenes have a RasterTransformer.'))

debug: bool = Field(False, description='If True, use debug mode.')
train_chip_sz: int = Field(
200, description='Size of training chips in pixels.')
300, description='Size of training chips in pixels.')
predict_chip_sz: int = Field(
200, description='Size of predictions chips in pixels.')
300, description='Size of predictions chips in pixels.')
predict_batch_sz: int = Field(
8, description='Batch size to use during prediction.')

Expand Down
8 changes: 5 additions & 3 deletions rastervision2/examples/chip_classification.py
Expand Up @@ -78,6 +78,7 @@ def make_scene(scene_info):
label_source=label_source,
aoi_uris=[aoi_uri])

chip_sz = 200
train_scenes = [make_scene(info) for info in train_scene_info]
val_scenes = [make_scene(info) for info in val_scene_info]
dataset = DatasetConfig(
Expand All @@ -92,12 +93,13 @@ def make_scene(scene_info):
model=model,
solver=solver,
log_tensorboard=log_tensorboard,
run_tensorboard=run_tensorboard)
run_tensorboard=run_tensorboard,
test_mode=test)

config = ChipClassificationConfig(
root_uri=root_uri,
dataset=dataset,
backend=backend,
train_chip_sz=200,
debug=debug)
train_chip_sz=chip_sz,
predict_chip_sz=chip_sz)
return config
10 changes: 6 additions & 4 deletions rastervision2/examples/object_detection.py
Expand Up @@ -61,6 +61,7 @@ def make_scene(id):
id=id, raster_source=raster_source, label_source=label_source)

class_config = ClassConfig(names=['vehicle'])
chip_sz = 300
dataset = DatasetConfig(
class_config=class_config,
train_scenes=[make_scene(id) for id in train_ids],
Expand All @@ -78,13 +79,14 @@ def make_scene(id):
batch_sz=16,
one_cycle=True),
log_tensorboard=True,
run_tensorboard=False)
run_tensorboard=False,
test_mode=test)

return ObjectDetectionConfig(
root_uri=root_uri,
dataset=dataset,
backend=backend,
train_chip_sz=300,
train_chip_sz=chip_sz,
predict_chip_sz=chip_sz,
chip_options=chip_options,
predict_options=predict_options,
debug=test)
predict_options=predict_options)
13 changes: 7 additions & 6 deletions rastervision2/examples/semantic_segmentation.py
Expand Up @@ -94,9 +94,9 @@ def make_scene(id):
class_config=class_config,
train_scenes=[make_scene(id) for id in train_ids],
validation_scenes=[make_scene(id) for id in val_ids])
train_chip_sz = 300
chip_sz = 300
chip_options = SemanticSegmentationChipOptions(
window_method=SemanticSegmentationWindowMethod.sliding, stride=300)
window_method=SemanticSegmentationWindowMethod.sliding, stride=chip_sz)

backend = PyTorchSemanticSegmentationConfig(
model=SemanticSegmentationModelConfig(backbone=Backbone.resnet50),
Expand All @@ -107,12 +107,13 @@ def make_scene(id):
batch_sz=8,
one_cycle=True),
log_tensorboard=True,
run_tensorboard=False)
run_tensorboard=False,
test_mode=test)

return SemanticSegmentationConfig(
root_uri=root_uri,
dataset=dataset,
backend=backend,
train_chip_sz=train_chip_sz,
chip_options=chip_options,
debug=test)
train_chip_sz=chip_sz,
predict_chip_sz=chip_sz,
chip_options=chip_options)
8 changes: 4 additions & 4 deletions rastervision2/examples/tiny_spacenet.py
Expand Up @@ -58,7 +58,7 @@ def make_scene(scene_id, image_uri, label_uri):
])

# Use the PyTorch backend for the SemanticSegmentation pipeline.
train_chip_sz = 300
chip_sz = 300
backend = PyTorchSemanticSegmentationConfig(
model=SemanticSegmentationModelConfig(backbone=Backbone.resnet50),
solver=SolverConfig(lr=1e-4, num_epochs=1, batch_sz=2))
Expand All @@ -70,6 +70,6 @@ def make_scene(scene_id, image_uri, label_uri):
root_uri=root_uri,
dataset=dataset,
backend=backend,
train_chip_sz=train_chip_sz,
chip_options=chip_options,
debug=False)
train_chip_sz=chip_sz,
predict_chip_sz=chip_sz,
chip_options=chip_options)
Expand Up @@ -25,7 +25,7 @@ def get_learner_config(self, pipeline):
data=data,
model=self.model,
solver=self.solver,
test_mode=pipeline.debug,
test_mode=self.test_mode,
output_uri=pipeline.train_uri,
log_tensorboard=self.log_tensorboard,
run_tensorboard=self.run_tensorboard)
Expand Down
Expand Up @@ -21,6 +21,12 @@ class PyTorchLearnerBackendConfig(BackendConfig):
description=(
'Names of albumentations augmentors to use for training batches. '
'Choices include: ' + str(augmentor_list)))
test_mode: bool = Field(
False,
description=
('This field is passed along to the LearnerConfig which is returned by '
'get_learner_config(). For more info, see the docs for'
'pytorch_learner.learner_config.LearnerConfig.test_mode.'))

def get_bundle_filenames(self):
return ['model-bundle.zip']
Expand Down
Expand Up @@ -24,7 +24,7 @@ def get_learner_config(self, pipeline):
data=data,
model=self.model,
solver=self.solver,
test_mode=pipeline.debug,
test_mode=self.test_mode,
output_uri=pipeline.train_uri,
log_tensorboard=self.log_tensorboard,
run_tensorboard=self.run_tensorboard)
Expand Down
Expand Up @@ -24,7 +24,7 @@ def get_learner_config(self, pipeline):
data=data,
model=self.model,
solver=self.solver,
test_mode=pipeline.debug,
test_mode=self.test_mode,
output_uri=pipeline.train_uri,
log_tensorboard=self.log_tensorboard,
run_tensorboard=self.run_tensorboard)
Expand Down
6 changes: 6 additions & 0 deletions tests_v2/__init__.py
@@ -0,0 +1,6 @@
import os


def data_file_path(rel_path):
data_dir = os.path.join(os.path.dirname(__file__), 'data_files')
return os.path.join(data_dir, rel_path)
File renamed without changes.
File renamed without changes.
Empty file.

0 comments on commit c617c49

Please sign in to comment.