Skip to content
Permalink
Browse files

Project import generated by Copybara.

GitOrigin-RevId: 796203faee20d7aae2876aac8ca5a1827dee4fe3
  • Loading branch information...
MediaPipe Team jqtang
MediaPipe Team authored and jqtang committed Sep 30, 2019
1 parent 412ab42 commit a2a63e387680c090f76072864a107647e874d390
Showing with 7,340 additions and 2,026 deletions.
  1. +6 −0 WORKSPACE
  2. +6 −2 mediapipe/calculators/audio/audio_decoder_calculator.cc
  3. +6 −1 mediapipe/calculators/audio/stabilized_log_calculator.cc
  4. +12 −2 mediapipe/calculators/audio/stabilized_log_calculator_test.cc
  5. +40 −6 mediapipe/calculators/audio/time_series_framer_calculator.cc
  6. +7 −0 mediapipe/calculators/audio/time_series_framer_calculator.proto
  7. +91 −1 mediapipe/calculators/audio/time_series_framer_calculator_test.cc
  8. +7 −1 mediapipe/calculators/core/BUILD
  9. +11 −0 mediapipe/calculators/core/concatenate_vector_calculator.cc
  10. +45 −2 mediapipe/calculators/core/concatenate_vector_calculator.h
  11. +163 −0 mediapipe/calculators/core/concatenate_vector_calculator_test.cc
  12. +19 −31 mediapipe/calculators/image/BUILD
  13. +35 −28 mediapipe/calculators/image/bilateral_filter_calculator.cc
  14. +92 −61 mediapipe/calculators/image/image_cropping_calculator.cc
  15. +6 −6 mediapipe/calculators/image/image_properties_calculator.cc
  16. +29 −22 mediapipe/calculators/image/image_transformation_calculator.cc
  17. +32 −26 mediapipe/calculators/image/recolor_calculator.cc
  18. +35 −29 mediapipe/calculators/image/set_alpha_calculator.cc
  19. +3 −1 mediapipe/calculators/tensorflow/BUILD
  20. +24 −4 mediapipe/calculators/tensorflow/pack_media_sequence_calculator.cc
  21. +142 −0 mediapipe/calculators/tensorflow/pack_media_sequence_calculator_test.cc
  22. +34 −4 mediapipe/calculators/tensorflow/unpack_media_sequence_calculator.cc
  23. +7 −0 mediapipe/calculators/tensorflow/unpack_media_sequence_calculator.proto
  24. +57 −0 mediapipe/calculators/tensorflow/unpack_media_sequence_calculator_test.cc
  25. +58 −22 mediapipe/calculators/tflite/BUILD
  26. +99 −104 mediapipe/calculators/tflite/tflite_converter_calculator.cc
  27. +1 −2 mediapipe/calculators/tflite/tflite_converter_calculator.proto
  28. +144 −98 mediapipe/calculators/tflite/tflite_inference_calculator.cc
  29. +499 −162 mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator.cc
  30. +8 −2 mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator.cc
  31. +6 −0 mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator.proto
  32. +118 −121 mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator.cc
  33. +25 −0 mediapipe/calculators/tflite/util.h
  34. +64 −8 mediapipe/calculators/util/BUILD
  35. +41 −34 mediapipe/calculators/util/annotation_overlay_calculator.cc
  36. +60 −20 mediapipe/calculators/util/rect_to_render_data_calculator.cc
  37. +194 −0 mediapipe/calculators/util/top_k_scores_calculator.cc
  38. +33 −0 mediapipe/calculators/util/top_k_scores_calculator.proto
  39. +150 −0 mediapipe/calculators/util/top_k_scores_calculator_test.cc
  40. +35 −0 mediapipe/docs/examples.md
  41. +265 −0 mediapipe/docs/face_detection_desktop.md
  42. +244 −0 mediapipe/docs/face_detection_mobile_cpu.md
  43. +209 −0 mediapipe/docs/hair_segmentation_desktop.md
  44. +2 −2 mediapipe/docs/hand_detection_mobile_gpu.md
  45. +184 −0 mediapipe/docs/hand_tracking_desktop.md
  46. +3 −3 mediapipe/docs/hand_tracking_mobile_gpu.md
  47. BIN mediapipe/docs/images/face_detection_desktop.png
  48. BIN mediapipe/docs/images/hand_tracking_desktop.png
  49. BIN mediapipe/docs/images/mobile/face_detection_mobile_cpu.png
  50. BIN mediapipe/docs/images/mobile/hand_tracking_mobile.png
  51. +23 −3 mediapipe/docs/install.md
  52. +26 −2 mediapipe/docs/object_detection_desktop.md
  53. +1 −1 mediapipe/docs/visualizer.md
  54. +44 −1 mediapipe/examples/desktop/BUILD
  55. +146 −0 mediapipe/examples/desktop/demo_run_graph_main.cc
  56. +186 −0 mediapipe/examples/desktop/demo_run_graph_main_gpu.cc
  57. +34 −0 mediapipe/examples/desktop/face_detection/BUILD
  58. +26 −0 mediapipe/examples/desktop/hair_segmentation/BUILD
  59. +42 −0 mediapipe/examples/desktop/hand_tracking/BUILD
  60. +8 −0 mediapipe/examples/desktop/object_detection/BUILD
  61. +1 −1 mediapipe/examples/desktop/youtube8m/BUILD
  62. +3 −1 mediapipe/examples/desktop/youtube8m/README.md
  63. +12 −3 mediapipe/examples/desktop/youtube8m/generate_input_sequence_example.py
  64. +35 −0 mediapipe/framework/BUILD
  65. +776 −1 mediapipe/framework/calculator_graph_bounds_test.cc
  66. +747 −0 mediapipe/framework/calculator_graph_side_packet_test.cc
  67. +1 −1,046 mediapipe/framework/calculator_graph_test.cc
  68. +3 −3 mediapipe/framework/formats/matrix_data.proto
  69. +7 −5 mediapipe/framework/graph_validation_test.cc
  70. +2 −2 mediapipe/framework/subgraph.cc
  71. +8 −1 mediapipe/framework/subgraph.h
  72. +37 −5 mediapipe/framework/test_calculators.cc
  73. +8 −0 mediapipe/framework/testdata/BUILD
  74. +3 −3 mediapipe/framework/tool/subgraph_expansion.cc
  75. +2 −2 mediapipe/framework/tool/subgraph_expansion_test.cc
  76. +21 −2 mediapipe/gpu/BUILD
  77. +49 −0 mediapipe/gpu/MPPMetalUtil.h
  78. +51 −0 mediapipe/gpu/MPPMetalUtil.mm
  79. +1 −1 mediapipe/gpu/gl_calculator_helper_impl.h
  80. +22 −5 mediapipe/gpu/gl_calculator_helper_impl_common.cc
  81. +2 −2 mediapipe/gpu/gl_calculator_helper_impl_ios.mm
  82. +22 −8 mediapipe/gpu/gl_context_egl.cc
  83. +2 −1 mediapipe/gpu/gl_texture_buffer.cc
  84. +50 −0 mediapipe/gpu/gpu_buffer_test.cc
  85. +1 −1 mediapipe/gpu/gpu_shared_data_internal.h
  86. +39 −0 mediapipe/gpu/gpu_test_base.h
  87. +17 −0 mediapipe/graphs/face_detection/BUILD
  88. +184 −0 mediapipe/graphs/face_detection/face_detection_desktop_live.pbtxt
  89. +4 −4 mediapipe/graphs/face_detection/face_detection_mobile_cpu.pbtxt
  90. +5 −5 mediapipe/graphs/face_detection/face_detection_mobile_gpu.pbtxt
  91. +1 −1 mediapipe/graphs/hair_segmentation/hair_segmentation_mobile_gpu.pbtxt
  92. +18 −56 mediapipe/graphs/hand_tracking/BUILD
  93. +62 −0 mediapipe/graphs/hand_tracking/hand_detection_desktop.pbtxt
  94. +38 −0 mediapipe/graphs/hand_tracking/hand_detection_desktop_live.pbtxt
  95. +126 −0 mediapipe/graphs/hand_tracking/hand_tracking_desktop.pbtxt
  96. +103 −0 mediapipe/graphs/hand_tracking/hand_tracking_desktop_live.pbtxt
  97. +132 −0 mediapipe/graphs/hand_tracking/subgraphs/BUILD
  98. +193 −0 mediapipe/graphs/hand_tracking/subgraphs/hand_detection_cpu.pbtxt
  99. +4 −4 mediapipe/graphs/hand_tracking/{ → subgraphs}/hand_detection_gpu.pbtxt
  100. +185 −0 mediapipe/graphs/hand_tracking/subgraphs/hand_landmark_cpu.pbtxt
  101. +6 −1 mediapipe/graphs/hand_tracking/{ → subgraphs}/hand_landmark_gpu.pbtxt
  102. +102 −0 mediapipe/graphs/hand_tracking/subgraphs/renderer_cpu.pbtxt
  103. 0 mediapipe/graphs/hand_tracking/{ → subgraphs}/renderer_gpu.pbtxt
  104. +4 −0 mediapipe/graphs/object_detection/BUILD
  105. +174 −0 mediapipe/graphs/object_detection/object_detection_desktop_live.pbtxt
  106. +2 −2 mediapipe/graphs/object_detection/object_detection_mobile_cpu.pbtxt
  107. +4 −4 mediapipe/graphs/object_detection/object_detection_mobile_gpu.pbtxt
  108. +1 −1 mediapipe/graphs/youtube8m/BUILD
  109. +5 −5 mediapipe/graphs/youtube8m/feature_extraction.pbtxt
  110. +2 −8 mediapipe/java/com/google/mediapipe/components/FrameProcessor.java
  111. +1 −2 mediapipe/java/com/google/mediapipe/framework/Graph.java
  112. +3 −4 mediapipe/java/com/google/mediapipe/framework/jni/BUILD
  113. +1 −1 mediapipe/java/com/google/mediapipe/framework/jni/graph.h
  114. +7 −0 mediapipe/objc/MPPGraph.h
  115. +33 −8 mediapipe/objc/MPPGraph.mm
  116. +3 −0 mediapipe/objc/MPPGraphTestBase.h
  117. +6 −2 mediapipe/objc/MPPGraphTestBase.mm
  118. +6 −4 mediapipe/util/android/file/base/helpers.cc
  119. +2 −1 mediapipe/util/annotation_renderer.cc
  120. +26 −1 mediapipe/util/resource_util_android.cc
  121. +30 −7 mediapipe/util/resource_util_apple.cc
  122. +58 −0 third_party/com_github_glog_glog_9779e5ea6ef59562b030248947f787d1256132ae.diff
@@ -64,6 +64,12 @@ http_archive(
sha256 = "267103f8a1e9578978aa1dc256001e6529ef593e5aea38193d31c2872ee025e8",
strip_prefix = "glog-0.3.5",
build_file = "@//third_party:glog.BUILD",
patches = [
"@//third_party:com_github_glog_glog_9779e5ea6ef59562b030248947f787d1256132ae.diff"
],
patch_args = [
"-p1",
],
)

# libyuv
@@ -61,7 +61,9 @@ class AudioDecoderCalculator : public CalculatorBase {
::mediapipe::Status AudioDecoderCalculator::GetContract(
CalculatorContract* cc) {
cc->InputSidePackets().Tag("INPUT_FILE_PATH").Set<std::string>();

if (cc->InputSidePackets().HasTag("OPTIONS")) {
cc->InputSidePackets().Tag("OPTIONS").Set<mediapipe::AudioDecoderOptions>();
}
cc->Outputs().Tag("AUDIO").Set<Matrix>();
if (cc->Outputs().HasTag("AUDIO_HEADER")) {
cc->Outputs().Tag("AUDIO_HEADER").SetNone();
@@ -72,7 +74,9 @@ ::mediapipe::Status AudioDecoderCalculator::GetContract(
::mediapipe::Status AudioDecoderCalculator::Open(CalculatorContext* cc) {
const std::string& input_file_path =
cc->InputSidePackets().Tag("INPUT_FILE_PATH").Get<std::string>();
const auto& decoder_options = cc->Options<mediapipe::AudioDecoderOptions>();
const auto& decoder_options =
tool::RetrieveOptions(cc->Options<mediapipe::AudioDecoderOptions>(),
cc->InputSidePackets(), "OPTIONS");
decoder_ = absl::make_unique<AudioDecoder>();
MP_RETURN_IF_ERROR(decoder_->Initialize(input_file_path, decoder_options));
std::unique_ptr<mediapipe::TimeSeriesHeader> header =
@@ -75,8 +75,13 @@ class StabilizedLogCalculator : public CalculatorBase {

::mediapipe::Status Process(CalculatorContext* cc) override {
auto input_matrix = cc->Inputs().Index(0).Get<Matrix>();
if (input_matrix.array().isNaN().any()) {
return ::mediapipe::InvalidArgumentError("NaN input to log operation.");
}
if (check_nonnegativity_) {
CHECK_GE(input_matrix.minCoeff(), 0);
if (input_matrix.minCoeff() < 0.0) {
return ::mediapipe::OutOfRangeError("Negative input to log operation.");
}
}
std::unique_ptr<Matrix> output_frame(new Matrix(
output_scale_ * (input_matrix.array() + stabilizer_).log().matrix()));
@@ -11,6 +11,7 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cmath>

#include "Eigen/Core"
#include "mediapipe/calculators/audio/stabilized_log_calculator.pb.h"
@@ -108,13 +109,22 @@ TEST_F(StabilizedLogCalculatorTest, ZerosAreStabilized) {
runner_->Outputs().Index(0).packets[0].Get<Matrix>());
}

TEST_F(StabilizedLogCalculatorTest, NegativeValuesCheckFail) {
TEST_F(StabilizedLogCalculatorTest, NanValuesReturnError) {
InitializeGraph();
FillInputHeader();
AppendInputPacket(
new Matrix(Matrix::Constant(kNumChannels, kNumSamples, std::nanf(""))),
0 /* timestamp */);
ASSERT_FALSE(RunGraph().ok());
}

TEST_F(StabilizedLogCalculatorTest, NegativeValuesReturnError) {
InitializeGraph();
FillInputHeader();
AppendInputPacket(
new Matrix(Matrix::Constant(kNumChannels, kNumSamples, -1.0)),
0 /* timestamp */);
ASSERT_DEATH(RunGraphNoReturn(), "");
ASSERT_FALSE(RunGraph().ok());
}

TEST_F(StabilizedLogCalculatorTest, NegativeValuesDoNotCheckFailIfCheckIsOff) {
@@ -56,6 +56,14 @@ namespace mediapipe {
// If pad_final_packet is true, all input samples will be emitted and the final
// packet will be zero padded as necessary. If pad_final_packet is false, some
// samples may be dropped at the end of the stream.
//
// If use_local_timestamp is true, the output packet's timestamp is based on the
// last sample of the packet. The timestamp of this sample is inferred by
// input_packet_timesamp + local_sample_index / sampling_rate_. If false, the
// output packet's timestamp is based on the cumulative timestamping, which is
// done by adopting the timestamp of the first sample of the packet and this
// sample's timestamp is inferred by initial_input_timestamp_ +
// cumulative_completed_samples / sample_rate_.
class TimeSeriesFramerCalculator : public CalculatorBase {
public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) {
@@ -86,11 +94,26 @@ class TimeSeriesFramerCalculator : public CalculatorBase {
void FrameOutput(CalculatorContext* cc);

Timestamp CurrentOutputTimestamp() {
if (use_local_timestamp_) {
return current_timestamp_;
}
return CumulativeOutputTimestamp();
}

Timestamp CumulativeOutputTimestamp() {
return initial_input_timestamp_ +
round(cumulative_completed_samples_ / sample_rate_ *
Timestamp::kTimestampUnitsPerSecond);
}

// Returns the timestamp of a sample on a base, which is usually the time
// stamp of a packet.
Timestamp CurrentSampleTimestamp(const Timestamp& timestamp_base,
int64 number_of_samples) {
return timestamp_base + round(number_of_samples / sample_rate_ *
Timestamp::kTimestampUnitsPerSecond);
}

// The number of input samples to advance after the current output frame is
// emitted.
int next_frame_step_samples() const {
@@ -118,22 +141,27 @@ class TimeSeriesFramerCalculator : public CalculatorBase {
// any overlap).
int64 cumulative_completed_samples_;
Timestamp initial_input_timestamp_;
// The current timestamp is updated along with the incoming packets.
Timestamp current_timestamp_;
int num_channels_;

// Each entry in this deque consists of a single sample, i.e. a
// single column vector.
std::deque<Matrix> sample_buffer_;
// single column vector, and its timestamp.
std::deque<std::pair<Matrix, Timestamp>> sample_buffer_;

bool use_window_;
Matrix window_;

bool use_local_timestamp_;
};
REGISTER_CALCULATOR(TimeSeriesFramerCalculator);

void TimeSeriesFramerCalculator::EnqueueInput(CalculatorContext* cc) {
const Matrix& input_frame = cc->Inputs().Index(0).Get<Matrix>();

for (int i = 0; i < input_frame.cols(); ++i) {
sample_buffer_.emplace_back(input_frame.col(i));
sample_buffer_.emplace_back(std::make_pair(
input_frame.col(i), CurrentSampleTimestamp(cc->InputTimestamp(), i)));
}

cumulative_input_samples_ += input_frame.cols();
@@ -151,14 +179,16 @@ void TimeSeriesFramerCalculator::FrameOutput(CalculatorContext* cc) {
new Matrix(num_channels_, frame_duration_samples_));
for (int i = 0; i < std::min(frame_step_samples, frame_duration_samples_);
++i) {
output_frame->col(i) = sample_buffer_.front();
output_frame->col(i) = sample_buffer_.front().first;
current_timestamp_ = sample_buffer_.front().second;
sample_buffer_.pop_front();
}
const int frame_overlap_samples =
frame_duration_samples_ - frame_step_samples;
if (frame_overlap_samples > 0) {
for (int i = 0; i < frame_overlap_samples; ++i) {
output_frame->col(i + frame_step_samples) = sample_buffer_[i];
output_frame->col(i + frame_step_samples) = sample_buffer_[i].first;
current_timestamp_ = sample_buffer_[i].second;
}
} else {
samples_still_to_drop_ = -frame_overlap_samples;
@@ -178,6 +208,7 @@ void TimeSeriesFramerCalculator::FrameOutput(CalculatorContext* cc) {
::mediapipe::Status TimeSeriesFramerCalculator::Process(CalculatorContext* cc) {
if (initial_input_timestamp_ == Timestamp::Unstarted()) {
initial_input_timestamp_ = cc->InputTimestamp();
current_timestamp_ = initial_input_timestamp_;
}

EnqueueInput(cc);
@@ -195,7 +226,8 @@ ::mediapipe::Status TimeSeriesFramerCalculator::Close(CalculatorContext* cc) {
std::unique_ptr<Matrix> output_frame(new Matrix);
output_frame->setZero(num_channels_, frame_duration_samples_);
for (int i = 0; i < sample_buffer_.size(); ++i) {
output_frame->col(i) = sample_buffer_[i];
output_frame->col(i) = sample_buffer_[i].first;
current_timestamp_ = sample_buffer_[i].second;
}

cc->Outputs().Index(0).Add(output_frame.release(),
@@ -258,6 +290,7 @@ ::mediapipe::Status TimeSeriesFramerCalculator::Open(CalculatorContext* cc) {
cumulative_output_frames_ = 0;
samples_still_to_drop_ = 0;
initial_input_timestamp_ = Timestamp::Unstarted();
current_timestamp_ = Timestamp::Unstarted();

std::vector<double> window_vector;
use_window_ = false;
@@ -282,6 +315,7 @@ ::mediapipe::Status TimeSeriesFramerCalculator::Open(CalculatorContext* cc) {
frame_duration_samples_)
.cast<float>();
}
use_local_timestamp_ = framer_options.use_local_timestamp();

return ::mediapipe::OkStatus();
}
@@ -62,4 +62,11 @@ message TimeSeriesFramerCalculatorOptions {
HANN = 2;
}
optional WindowFunction window_function = 4 [default = NONE];

// If use_local_timestamp is true, the output packet's timestamp is based on
// the last sample of the packet and it's inferred from the latest input
// packet's timestamp. If false, the output packet's timestamp is based on
// the cumulative timestamping, which is inferred from the intial input
// timestamp and the cumulative number of samples.
optional bool use_local_timestamp = 6 [default = false];
}
@@ -35,6 +35,8 @@ namespace mediapipe {
namespace {

const int kInitialTimestampOffsetMicroseconds = 4;
const int kGapBetweenPacketsInSeconds = 1;
const int kUniversalInputPacketSize = 50;

class TimeSeriesFramerCalculatorTest
: public TimeSeriesCalculatorTest<TimeSeriesFramerCalculatorOptions> {
@@ -391,5 +393,93 @@ TEST_F(TimeSeriesFramerCalculatorWindowingSanityTest, HannWindowSanityCheck) {
RunAndTestSinglePacketAverage(0.5f);
}

} // anonymous namespace
// A simple test class that checks the local packet time stamp. This class
// generate a series of packets with and without gaps between packets and tests
// the behavior with cumulative timestamping and local packet timestamping.
class TimeSeriesFramerCalculatorTimestampingTest
: public TimeSeriesFramerCalculatorTest {
protected:
// Creates test input and saves a reference copy.
void InitializeInputForTimeStampingTest() {
concatenated_input_samples_.resize(0, num_input_channels_);
num_input_samples_ = 0;
for (int i = 0; i < 10; ++i) {
// This range of packet sizes was chosen such that some input
// packets will be smaller than the output packet size and other
// input packets will be larger.
int packet_size = kUniversalInputPacketSize;
double timestamp_seconds = kInitialTimestampOffsetMicroseconds * 1.0e-6 +
num_input_samples_ / input_sample_rate_;
if (options_.use_local_timestamp()) {
timestamp_seconds += kGapBetweenPacketsInSeconds * i;
}

Matrix* data_frame =
NewTestFrame(num_input_channels_, packet_size, timestamp_seconds);

AppendInputPacket(data_frame, round(timestamp_seconds *
Timestamp::kTimestampUnitsPerSecond));
num_input_samples_ += packet_size;
}
}

void CheckOutputTimestamps() {
int num_full_packets = output().packets.size();
if (options_.pad_final_packet()) {
num_full_packets -= 1;
}

int64 num_samples = 0;
for (int packet_num = 0; packet_num < num_full_packets; ++packet_num) {
const Packet& packet = output().packets[packet_num];
num_samples += FrameDurationSamples();
double expected_timestamp =
options_.use_local_timestamp()
? GetExpectedLocalTimestampForSample(num_samples - 1)
: GetExpectedCumulativeTimestamp(num_samples - 1);
ASSERT_NEAR(packet.Timestamp().Seconds(), expected_timestamp, 1e-10);
}
}

::mediapipe::Status RunTimestampTest() {
InitializeGraph();
InitializeInputForTimeStampingTest();
FillInputHeader();
return RunGraph();
}

private:
// Returns the timestamp in seconds based on local timestamping.
double GetExpectedLocalTimestampForSample(int sample_index) {
return kInitialTimestampOffsetMicroseconds * 1.0e-6 +
sample_index / input_sample_rate_ +
(sample_index / kUniversalInputPacketSize) *
kGapBetweenPacketsInSeconds;
}

// Returns the timestamp inseconds based on cumulative timestamping.
double GetExpectedCumulativeTimestamp(int sample_index) {
return kInitialTimestampOffsetMicroseconds * 1.0e-6 +
sample_index / FrameDurationSamples() * FrameDurationSamples() /
input_sample_rate_;
}
};

TEST_F(TimeSeriesFramerCalculatorTimestampingTest, UseLocalTimeStamp) {
options_.set_frame_duration_seconds(100.0 / input_sample_rate_);
options_.set_use_local_timestamp(true);

MP_ASSERT_OK(RunTimestampTest());
CheckOutputTimestamps();
}

TEST_F(TimeSeriesFramerCalculatorTimestampingTest, UseCumulativeTimeStamp) {
options_.set_frame_duration_seconds(100.0 / input_sample_rate_);
options_.set_use_local_timestamp(false);

MP_ASSERT_OK(RunTimestampTest());
CheckOutputTimestamps();
}

} // namespace
} // namespace mediapipe
@@ -166,7 +166,13 @@ cc_library(
"//mediapipe/framework/port:ret_check",
"//mediapipe/framework/port:status",
"@org_tensorflow//tensorflow/lite:framework",
],
] + select({
"//mediapipe/gpu:disable_gpu": [],
"//mediapipe:ios": [],
"//conditions:default": [
"@org_tensorflow//tensorflow/lite/delegates/gpu/gl:gl_buffer",
],
}),
alwayslink = 1,
)

@@ -19,6 +19,10 @@
#include "mediapipe/framework/formats/landmark.pb.h"
#include "tensorflow/lite/interpreter.h"

#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__APPLE__)
#include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h"
#endif // !MEDIAPIPE_DISABLE_GPU

namespace mediapipe {

// Example config:
@@ -45,4 +49,11 @@ REGISTER_CALCULATOR(ConcatenateTfLiteTensorVectorCalculator);
typedef ConcatenateVectorCalculator<::mediapipe::NormalizedLandmark>
ConcatenateLandmarkVectorCalculator;
REGISTER_CALCULATOR(ConcatenateLandmarkVectorCalculator);

#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__APPLE__)
typedef ConcatenateVectorCalculator<::tflite::gpu::gl::GlBuffer>
ConcatenateGlBufferVectorCalculator;
REGISTER_CALCULATOR(ConcatenateGlBufferVectorCalculator);
#endif

} // namespace mediapipe

0 comments on commit a2a63e3

Please sign in to comment.
You can’t perform that action at this time.