Skip to content

Commit

Permalink
fixing build issue
Browse files Browse the repository at this point in the history
  • Loading branch information
taylor committed Aug 5, 2019
1 parent 8abf412 commit 002a671
Show file tree
Hide file tree
Showing 6 changed files with 44 additions and 7 deletions.
2 changes: 0 additions & 2 deletions .travis.yml
Expand Up @@ -7,10 +7,8 @@ python:
install:
- pip install --upgrade pip
- pip install -U pytest
- pip install -U pylint
- pip install .[tests,optional]

script:
- cd tests; pytest --disable-warnings --cov=dvt --cov-report term-missing .
- cd tests; codecov
- pylint dvt
2 changes: 1 addition & 1 deletion dvt/aggregate/core.py
Expand Up @@ -34,4 +34,4 @@ def aggregate(self, ldframe, **kwargs):
While not strictly enforced, subclasses should return a DictFrame
or dictionary of DictFrames from the aggregate method.
"""
return # pragma nocov
return # pragma: no cover
2 changes: 1 addition & 1 deletion dvt/aggregate/people.py
Expand Up @@ -134,7 +134,7 @@ def make_fprint_from_images(dinput):

fpobj.process(ImageInput(input_paths=join(dinput, "", "*")))

faces = fpobj.collect('face')
ss = fpobj.collect('face')
names = [splitext(basename(x))[0] for x in faces['frame']]

return faces['embed'], names
2 changes: 1 addition & 1 deletion dvt/annotate/core.py
Expand Up @@ -227,7 +227,7 @@ def annotate(self, batch):
(or first shape value, in the case of numpy array). Can also return
None, in which case nothing is added to the current output.
"""
return # pragma nocov
return # pragma: no cover


class FrameInput:
Expand Down
8 changes: 8 additions & 0 deletions dvt/cli.py
Expand Up @@ -32,6 +32,7 @@ def run_cli():
doutput=args.dirout,
diff_co=args.diff_cutoff,
cut_min_length=args.cut_min_length,
path_to_faces=args.path_to_faces
)
vpipe.make_breaks(freq=args.frequency)
vpipe.run(level=args.pipeline_level)
Expand Down Expand Up @@ -95,6 +96,13 @@ def _get_arg_parse():
action="store_true",
help="flag to indicate that the pipeline should run quietly",
)
parser.add_argument(
"--path-to-faces",
type=str,
default=None,
help="Path to directory containing protype faces (optional). See "
"tutorial on the commandline interface for more details.",
)

return parser

Expand Down
35 changes: 33 additions & 2 deletions dvt/pipeline/video.py
Expand Up @@ -28,12 +28,13 @@

from ..annotate.cielab import CIElabAnnotator
from ..annotate.core import FrameProcessor, FrameInput
from ..annotate.face import FaceAnnotator, FaceDetectMtcnn
from ..annotate.face import FaceAnnotator, FaceDetectMtcnn, FaceEmbedVgg2
from ..annotate.meta import MetaAnnotator
from ..annotate.obj import ObjectAnnotator, ObjectDetectRetinaNet
from ..annotate.opticalflow import OpticalFlowAnnotator
from ..annotate.png import PngAnnotator
from ..aggregate.display import DisplayAggregator
from ..aggregate.people import PeopleAggregator, make_fprint_from_images
from ..aggregate.length import ShotLengthAggregator
from ..utils import setup_tensorflow, _format_time, DictFrame
from .utils import _get_cuts, _get_meta
Expand All @@ -52,9 +53,20 @@ class VideoPipeline:
values produce fewer cuts.
cut_min_length (int): minimum length of a detected cut in frames;
higher values produce few cuts.
path_to_faces (str): Path to directory containing protype faces
(optional). See tutorial on the commandline interface for more
details.
"""

def __init__(self, finput, doutput=None, diff_co=10, cut_min_length=30):
def __init__(
self,
finput,
doutput=None,
diff_co=10,
cut_min_length=30,
path_to_faces=None
):

setup_tensorflow()

# find absolute path to the input and determine the output location
Expand All @@ -69,6 +81,8 @@ def __init__(self, finput, doutput=None, diff_co=10, cut_min_length=30):
self.doutput = os.path.join(doutput, fname)
self.diff_co = diff_co
self.cut_min_length = cut_min_length
self.path_to_faces = path_to_faces

self.cuts = None
self.pipeline_data = None

Expand Down Expand Up @@ -145,6 +159,20 @@ def _run_pipeline(self):
ObjectAnnotator(detector=ObjectDetectRetinaNet(), frames=frames)
)

if self.path_to_faces is not None:
fembed, fnames = make_fprint_from_images(self.path_to_faces)
fpobj.load_annotator(
FaceAnnotator(
detector=FaceDetectMtcnn(),
embedding=FaceEmbedVgg2()
frames=frames
)
)
else:
fpobj.load_annotator(
FaceAnnotator(detector=FaceDetectMtcnn(), frames=frames)
)

fri = FrameInput(self.finput, bsize=128)
fpobj.process(fri)
self.pipeline_data = fpobj.collect_all()
Expand All @@ -169,6 +197,9 @@ def _make_json(self):
fps = self.pipeline_data["meta"]["fps"][0]
ldata = self.pipeline_data["length"]

if self.path_to_faces is not None:
people = PeopleAggregator(self.pipeline_data)

output = []
for fnum in range(nframes):

Expand Down

0 comments on commit 002a671

Please sign in to comment.