Skip to content
This repository was archived by the owner on Aug 28, 2024. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions D2Go/D2Go.xcodeproj/project.pbxproj
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,13 @@

/* Begin PBXBuildFile section */
260F86432627C445008F2567 /* classes.txt in Resources */ = {isa = PBXBuildFile; fileRef = 260F86422627C445008F2567 /* classes.txt */; };
26A8C10A26DFEC4700F4A58D /* d2go_optimized.ptl in Resources */ = {isa = PBXBuildFile; fileRef = 26A8C10926DFEC4700F4A58D /* d2go_optimized.ptl */; };
26C3345C26210C3E00C37AD3 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 26C3345B26210C3E00C37AD3 /* AppDelegate.swift */; };
26C3345E26210C3E00C37AD3 /* SceneDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 26C3345D26210C3E00C37AD3 /* SceneDelegate.swift */; };
26C3346026210C3E00C37AD3 /* ViewController.swift in Sources */ = {isa = PBXBuildFile; fileRef = 26C3345F26210C3E00C37AD3 /* ViewController.swift */; };
26C3346326210C3E00C37AD3 /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 26C3346126210C3E00C37AD3 /* Main.storyboard */; };
26C3346526210C3F00C37AD3 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 26C3346426210C3F00C37AD3 /* Assets.xcassets */; };
26C3346826210C3F00C37AD3 /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 26C3346626210C3F00C37AD3 /* LaunchScreen.storyboard */; };
26C3347626211FC900C37AD3 /* d2go_optimized.pt in Resources */ = {isa = PBXBuildFile; fileRef = 26C3347526211FC900C37AD3 /* d2go_optimized.pt */; };
26C3348C262120CB00C37AD3 /* PrePostProcessor.swift in Sources */ = {isa = PBXBuildFile; fileRef = 26C3347D262120CB00C37AD3 /* PrePostProcessor.swift */; };
26C3348D262120CB00C37AD3 /* UIImage+Helper.swift in Sources */ = {isa = PBXBuildFile; fileRef = 26C3347E262120CB00C37AD3 /* UIImage+Helper.swift */; };
26C3348F262120CB00C37AD3 /* ObjectDetector.swift in Sources */ = {isa = PBXBuildFile; fileRef = 26C33483262120CB00C37AD3 /* ObjectDetector.swift */; };
Expand All @@ -30,6 +30,7 @@

/* Begin PBXFileReference section */
260F86422627C445008F2567 /* classes.txt */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = classes.txt; sourceTree = "<group>"; };
26A8C10926DFEC4700F4A58D /* d2go_optimized.ptl */ = {isa = PBXFileReference; lastKnownFileType = file; path = d2go_optimized.ptl; sourceTree = "<group>"; };
26C3345826210C3E00C37AD3 /* D2Go.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = D2Go.app; sourceTree = BUILT_PRODUCTS_DIR; };
26C3345B26210C3E00C37AD3 /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = "<group>"; };
26C3345D26210C3E00C37AD3 /* SceneDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SceneDelegate.swift; sourceTree = "<group>"; };
Expand All @@ -38,7 +39,6 @@
26C3346426210C3F00C37AD3 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = "<group>"; };
26C3346726210C3F00C37AD3 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/LaunchScreen.storyboard; sourceTree = "<group>"; };
26C3346926210C3F00C37AD3 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
26C3347526211FC900C37AD3 /* d2go_optimized.pt */ = {isa = PBXFileReference; lastKnownFileType = file; path = d2go_optimized.pt; sourceTree = "<group>"; };
26C3347D262120CB00C37AD3 /* PrePostProcessor.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = PrePostProcessor.swift; sourceTree = "<group>"; };
26C3347E262120CB00C37AD3 /* UIImage+Helper.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "UIImage+Helper.swift"; sourceTree = "<group>"; };
26C33483262120CB00C37AD3 /* ObjectDetector.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ObjectDetector.swift; sourceTree = "<group>"; };
Expand Down Expand Up @@ -105,7 +105,7 @@
26C3348A262120CB00C37AD3 /* test1.png */,
26C33484262120CB00C37AD3 /* test2.jpg */,
26C3348B262120CB00C37AD3 /* test3.png */,
26C3347526211FC900C37AD3 /* d2go_optimized.pt */,
26A8C10926DFEC4700F4A58D /* d2go_optimized.ptl */,
260F86422627C445008F2567 /* classes.txt */,
);
path = D2Go;
Expand Down Expand Up @@ -204,10 +204,10 @@
26C33490262120CB00C37AD3 /* test2.jpg in Resources */,
26C3346526210C3F00C37AD3 /* Assets.xcassets in Resources */,
26C3346326210C3E00C37AD3 /* Main.storyboard in Resources */,
26C3347626211FC900C37AD3 /* d2go_optimized.pt in Resources */,
26C33495262120CB00C37AD3 /* test1.png in Resources */,
26C33496262120CB00C37AD3 /* test3.png in Resources */,
260F86432627C445008F2567 /* classes.txt in Resources */,
26A8C10A26DFEC4700F4A58D /* d2go_optimized.ptl in Resources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
Expand Down Expand Up @@ -401,9 +401,9 @@
"-l\"torch_cpu\"",
"-l\"torchvision_ops\"",
"-force_load",
"$(PODS_ROOT)/LibTorch/install/lib/libtorch.a",
"$(PODS_ROOT)/LibTorch-Lite/install/lib/libtorch.a",
"-force_load",
"$(PODS_ROOT)/LibTorch/install/lib/libtorch_cpu.a",
"$(PODS_ROOT)/LibTorch-Lite/install/lib/libtorch_cpu.a",
"-force_load",
"$(PODS_ROOT)/LibTorchvision/install/lib/libtorchvision_ops.a",
);
Expand Down Expand Up @@ -445,9 +445,9 @@
"-l\"torch_cpu\"",
"-l\"torchvision_ops\"",
"-force_load",
"$(PODS_ROOT)/LibTorch/install/lib/libtorch.a",
"$(PODS_ROOT)/LibTorch-Lite/install/lib/libtorch.a",
"-force_load",
"$(PODS_ROOT)/LibTorch/install/lib/libtorch_cpu.a",
"$(PODS_ROOT)/LibTorch-Lite/install/lib/libtorch_cpu.a",
"-force_load",
"$(PODS_ROOT)/LibTorchvision/install/lib/libtorchvision_ops.a",
);
Expand Down
1 change: 1 addition & 0 deletions D2Go/D2Go/Inference/InferenceModule.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
// LICENSE file in the root directory of this source tree.

#import <Foundation/Foundation.h>
#import <UIKit/UIKit.h>

NS_ASSUME_NONNULL_BEGIN

Expand Down
27 changes: 15 additions & 12 deletions D2Go/D2Go/Inference/InferenceModule.mm
Original file line number Diff line number Diff line change
Expand Up @@ -5,23 +5,22 @@
// LICENSE file in the root directory of this source tree.

#import "InferenceModule.h"
#import <LibTorch.h>
#import <Libtorch-Lite/Libtorch-Lite.h>

const int input_width = 640;
const int input_height = 640;
const int threshold = 0.5;


@implementation InferenceModule {
@protected torch::jit::script::Module _impl;
@protected torch::jit::mobile::Module _impl;
}

- (nullable instancetype)initWithFileAtPath:(NSString*)filePath {
self = [super init];
if (self) {
try {
_impl = torch::jit::load(filePath.UTF8String);
_impl.eval();
_impl = torch::jit::_load_for_mobile(filePath.UTF8String);
} catch (const std::exception& exception) {
NSLog(@"%s", exception.what());
return nil;
Expand All @@ -33,14 +32,18 @@ - (nullable instancetype)initWithFileAtPath:(NSString*)filePath {
- (NSArray<NSNumber*>*)detectImage:(void*)imageBuffer {
try {
at::Tensor tensor = torch::from_blob(imageBuffer, { 3, input_width, input_height }, at::kFloat);
torch::autograd::AutoGradMode guard(false);
at::AutoNonVariableTypeMode non_var_type_mode(true);

c10::InferenceMode guard;

std::vector<torch::Tensor> v;
v.push_back(tensor);



CFTimeInterval startTime = CACurrentMediaTime();
auto outputTuple = _impl.forward({ at::TensorList(v) }).toTuple();

CFTimeInterval elapsedTime = CACurrentMediaTime() - startTime;
NSLog(@"inference time:%f", elapsedTime);


auto outputDict = outputTuple->elements()[1].toList().get(0).toGenericDict();
auto boxesTensor = outputDict.at("boxes").toTensor();
auto scoresTensor = outputDict.at("scores").toTensor();
Expand All @@ -58,7 +61,7 @@ - (nullable instancetype)initWithFileAtPath:(NSString*)filePath {
if (!labelsBuffer) {
return nil;
}

NSMutableArray* results = [[NSMutableArray alloc] init];
long num = scoresTensor.numel();
for (int i = 0; i < num; i++) {
Expand All @@ -72,9 +75,9 @@ - (nullable instancetype)initWithFileAtPath:(NSString*)filePath {
[results addObject:@(scoresBuffer[i])];
[results addObject:@(labelsBuffer[i])];
}

return [results copy];

} catch (const std::exception& exception) {
NSLog(@"%s", exception.what());
}
Expand Down
2 changes: 1 addition & 1 deletion D2Go/D2Go/Inference/ObjectDetector.swift
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ import UIKit

class ObjectDetector {
lazy var module: InferenceModule = {
if let filePath = Bundle.main.path(forResource: "d2go_optimized", ofType: "pt"),
if let filePath = Bundle.main.path(forResource: "d2go_optimized", ofType: "ptl"),
let module = InferenceModule(fileAtPath: filePath) {
return module
} else {
Expand Down
Binary file modified D2Go/D2Go/d2go_optimized.pt
Binary file not shown.
Binary file added D2Go/D2Go/d2go_optimized.ptl
Binary file not shown.
2 changes: 1 addition & 1 deletion D2Go/Podfile
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ target 'D2Go' do
# Comment the next line if you don't want to use dynamic frameworks
use_frameworks!

pod 'LibTorch', '1.9.0'
pod 'LibTorch-Lite', '~>1.9.0'
pod 'LibTorchvision', '0.10.0'

end
7 changes: 4 additions & 3 deletions D2Go/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,22 @@

## Introduction

[Detectron2](https://github.com/facebookresearch/detectron2) is one of the most widely adopted open source projects and implements state-of-the-art object detection, semantic segmentation, panoptic segmentation, and human pose prediction. [D2Go](https://github.com/facebookresearch/d2go) is powered by PyTorch 1.9.0, torchvision 0.10.0, and Detectron2 with built-in SOTA networks for mobile - the D2Go model is very small (only 2.15MB) and runs very fast on iOS.
[Detectron2](https://github.com/facebookresearch/detectron2) is one of the most widely adopted open source projects and implements state-of-the-art object detection, semantic segmentation, panoptic segmentation, and human pose prediction. [D2Go](https://github.com/facebookresearch/d2go) is powered by PyTorch 1.9, torchvision 0.10, and Detectron2 with built-in SOTA networks for mobile - the D2Go model is very small (only 2.15MB) and runs very fast on iOS.

This D2Go iOS demo app shows how to prepare and use the D2Go model on iOS with the newly released LibTorchvision Cocoapods. The code is based on a previous PyTorch iOS [Object Detection demo app](https://github.com/pytorch/ios-demo-app/tree/master/ObjectDetection) that uses a pre-trained YOLOv5 model, with modified pre-processing and post-processing code required by the D2Go model.

## Prerequisites

* PyTorch 1.9 and torchvision 0.10 (Optional)
* Python 3.8 or above (Optional)
* iOS Cocoapods LibTorch 1.9.0 and LibTorchvision 0.10.0
* iOS Cocoapods LibTorch-Lite 1.9.0 and LibTorchvision 0.10.0
* Xcode 12.4 or later

## Quick Start

This section shows how to create and use the D2Go model in an iOS app. To just build and run the app without creating the D2Go model yourself, go directly to Step 4.

1. Install PyTorch 1.9.0 and torchvision 0.10.0, for example:
1. Install PyTorch 1.9 and torchvision 0.10, for example:

```
conda create -n d2go python=3.8.5
Expand Down Expand Up @@ -50,6 +50,7 @@ Run the following command to create the optimized D2Go model `d2go_optimized.pt`
```
python create_d2go.py
```
Both the optimized JIT model and the Lite Interpreter model will be created and saved in the project folder.

4. Build and run the D2Go iOS app

Expand Down
1 change: 1 addition & 0 deletions D2Go/create_d2go.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ def forward(self, inputs: List[torch.Tensor]):
scripted_model = torch.jit.script(wrapped_model)
optimized_model = optimize_for_mobile(scripted_model)
optimized_model.save("D2Go/d2go_optimized.pt")
optimized_model._save_for_lite_interpreter("D2Go/d2go_optimized.ptl")

if __name__ == '__main__':
test_export_torchvision_format()
11 changes: 6 additions & 5 deletions ImageSegmentation/ImageSegmentation/TorchModule.mm
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ - (nullable instancetype)initWithFileAtPath:(NSString*)filePath {
if (self) {
try {
_impl = torch::jit::_load_for_mobile(filePath.UTF8String);
_impl.eval();
} catch (const std::exception& exception) {
NSLog(@"%s", exception.what());
return nil;
Expand All @@ -40,9 +39,6 @@ - (unsigned char*)segmentImage:(void *)imageBuffer withWidth:(int)width withHeig

at::Tensor tensor = torch::from_blob(imageBuffer, { 1, 3, width, height }, at::kFloat);

torch::autograd::AutoGradMode guard(false);
at::AutoNonVariableTypeMode non_var_type_mode(true);

float* floatInput = tensor.data_ptr<float>();
if (!floatInput) {
return nil;
Expand All @@ -52,8 +48,13 @@ - (unsigned char*)segmentImage:(void *)imageBuffer withWidth:(int)width withHeig
[inputs addObject:@(floatInput[i])];
}

c10::InferenceMode guard;

CFTimeInterval startTime = CACurrentMediaTime();
auto outputDict = _impl.forward({ tensor }).toGenericDict();

CFTimeInterval elapsedTime = CACurrentMediaTime() - startTime;
NSLog(@"inference time:%f", elapsedTime);

auto outputTensor = outputDict.at("out").toTensor();

float* floatBuffer = outputTensor.data_ptr<float>();
Expand Down
12 changes: 5 additions & 7 deletions ImageSegmentation/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@ This repo offers a Python script that converts the [PyTorch DeepLabV3 model](htt

## Prerequisites

* PyTorch 1.9.0 and torchvision 0.10.0 (Optional)
* PyTorch 1.9 and torchvision 0.10 (Optional)
* Python 3.8 or above (Optional)
* iOS Cocoapods LibTorch-Lite 1.9.0
* iOS Cocoapods LibTorch-Lite 1.9.0 and LibTorchvision 0.10.0
* Xcode 12.4 or later

## Quick Start
Expand All @@ -17,11 +17,9 @@ To Test Run the Image Segmentation iOS App, follow the steps below:

### 1. Prepare the Model

If you don't have the PyTorch environment set up to run the script below to generate the model file, you can download it to the `ios-demo-app/ImageSegmentation` folder using the link [here](https://drive.google.com/file/d/1FHV9tN6-e3EWUgM_K3YvDoRLPBj7NHXO/view?usp=sharing).
If you don't have the PyTorch environment set up to run the script below to generate the model file, you can download it to the `ios-demo-app/ImageSegmentation` folder using the link [here](https://pytorch-mobile-demo-apps.s3.us-east-2.amazonaws.com/deeplabv3_scripted.ptl).

Be aware that the downloadable model file was created with PyTorch 1.7.0, matching the iOS LibTorch library 1.7.0 specified in the `Podfile`. If you use a different version of PyTorch to create your model by following the instructions below, make sure you specify the same iOS LibTorch version in the `Podfile` to avoid possible errors caused by the version mismatch. Furthermore, if you want to use the latest prototype features in the PyTorch master branch to create the model, follow the steps at [Building PyTorch iOS Libraries from Source](https://pytorch.org/mobile/ios/#build-pytorch-ios-libraries-from-source) on how to use the model in iOS.
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need to update all the recipes as well or add new set for lite interpreter specific recipes?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think so, as all PT Mobile recipes have this:

Pre-requisites
PyTorch 1.6.0 or 1.7.0
torchvision 0.6.0 or 0.7.0

So do the tutorials https://pytorch.org/tutorials/beginner/deeplabv3_on_ios.html and https://pytorch.org/tutorials/beginner/deeplabv3_on_android.html


Open a Mac Terminal, first install PyTorch 1.9.0 and torchvision 0.10.0 using command like `pip install torch torchvision`, then run the following commands:
Open a Mac Terminal, first install PyTorch 1.9 and torchvision 0.10 using command like `pip install torch torchvision`, then run the following commands:

```
git clone https://github.com/pytorch/ios-demo-app
Expand All @@ -31,7 +29,7 @@ python deeplabv3.py

The Python script `deeplabv3.py` is used to generate the Lite Interpreter model file `deeplabv3_scripted.ptl` to be used in iOS.

### 2. Use LibTorch
### 2. Use LibTorch-Lite

Run the commands below (note the `Podfile` uses `pod 'LibTorch-Lite', '~>1.9.0'`):

Expand Down
4 changes: 3 additions & 1 deletion ImageSegmentation/deeplabv3.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
import torch
from torch.utils.mobile_optimizer import optimize_for_mobile

model = torch.hub.load('pytorch/vision:v0.9.0', 'deeplabv3_resnet50', pretrained=True)
model.eval()

scripted_module = torch.jit.script(model)
scripted_module._save_for_lite_interpreter("ImageSegmentation/deeplabv3_scripted.ptl")
optimized_model = optimize_for_mobile(scripted_module)
optimized_model._save_for_lite_interpreter("ImageSegmentation/deeplabv3_scripted.ptl")
8 changes: 4 additions & 4 deletions ObjectDetection/ObjectDetection.xcodeproj/project.pbxproj
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
objects = {

/* Begin PBXBuildFile section */
266E871F2563077D00CF5151 /* yolov5s.torchscript.pt in Resources */ = {isa = PBXBuildFile; fileRef = 266E871E2563077D00CF5151 /* yolov5s.torchscript.pt */; };
266E87232563120D00CF5151 /* classes.txt in Resources */ = {isa = PBXBuildFile; fileRef = 266E87222563120D00CF5151 /* classes.txt */; };
266E8746256350C000CF5151 /* CameraController.swift in Sources */ = {isa = PBXBuildFile; fileRef = 266E8742256350C000CF5151 /* CameraController.swift */; };
266E8747256350C000CF5151 /* CVPixelBuffer+Helper.swift in Sources */ = {isa = PBXBuildFile; fileRef = 266E8743256350C000CF5151 /* CVPixelBuffer+Helper.swift */; };
Expand All @@ -25,11 +24,11 @@
269E748D255CC6D100B1D6CA /* test2.jpg in Resources */ = {isa = PBXBuildFile; fileRef = 269E748B255CC6D100B1D6CA /* test2.jpg */; };
269E7498255CCBA000B1D6CA /* InferenceModule.mm in Sources */ = {isa = PBXBuildFile; fileRef = 269E7496255CCBA000B1D6CA /* InferenceModule.mm */; };
269E749B255CCBC300B1D6CA /* UIImage+Helper.swift in Sources */ = {isa = PBXBuildFile; fileRef = 269E749A255CCBC300B1D6CA /* UIImage+Helper.swift */; };
26A8C11426E17F8100F4A58D /* yolov5s.torchscript.ptl in Resources */ = {isa = PBXBuildFile; fileRef = 26A8C11326E17F8100F4A58D /* yolov5s.torchscript.ptl */; };
26FAB5692565E8FC00EA6ED6 /* PrePostProcessor.swift in Sources */ = {isa = PBXBuildFile; fileRef = 26FAB5682565E8FC00EA6ED6 /* PrePostProcessor.swift */; };
/* End PBXBuildFile section */

/* Begin PBXFileReference section */
266E871E2563077D00CF5151 /* yolov5s.torchscript.pt */ = {isa = PBXFileReference; lastKnownFileType = file; path = yolov5s.torchscript.pt; sourceTree = "<group>"; };
266E87222563120D00CF5151 /* classes.txt */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = classes.txt; sourceTree = "<group>"; };
266E8742256350C000CF5151 /* CameraController.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = CameraController.swift; sourceTree = "<group>"; };
266E8743256350C000CF5151 /* CVPixelBuffer+Helper.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "CVPixelBuffer+Helper.swift"; sourceTree = "<group>"; };
Expand All @@ -51,6 +50,7 @@
269E7496255CCBA000B1D6CA /* InferenceModule.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = InferenceModule.mm; sourceTree = "<group>"; };
269E7497255CCBA000B1D6CA /* InferenceModule.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InferenceModule.h; sourceTree = "<group>"; };
269E749A255CCBC300B1D6CA /* UIImage+Helper.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "UIImage+Helper.swift"; sourceTree = "<group>"; };
26A8C11326E17F8100F4A58D /* yolov5s.torchscript.ptl */ = {isa = PBXFileReference; lastKnownFileType = file; path = yolov5s.torchscript.ptl; sourceTree = "<group>"; };
26FAB5682565E8FC00EA6ED6 /* PrePostProcessor.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = PrePostProcessor.swift; sourceTree = "<group>"; };
/* End PBXFileReference section */

Expand Down Expand Up @@ -110,7 +110,7 @@
269E748B255CC6D100B1D6CA /* test2.jpg */,
269E748A255CC6D100B1D6CA /* test3.png */,
266E87222563120D00CF5151 /* classes.txt */,
266E871E2563077D00CF5151 /* yolov5s.torchscript.pt */,
26A8C11326E17F8100F4A58D /* yolov5s.torchscript.ptl */,
);
path = ObjectDetection;
sourceTree = "<group>";
Expand Down Expand Up @@ -200,8 +200,8 @@
isa = PBXResourcesBuildPhase;
buildActionMask = 2147483647;
files = (
266E871F2563077D00CF5151 /* yolov5s.torchscript.pt in Resources */,
269E747E255CC56400B1D6CA /* LaunchScreen.storyboard in Resources */,
26A8C11426E17F8100F4A58D /* yolov5s.torchscript.ptl in Resources */,
266E87232563120D00CF5151 /* classes.txt in Resources */,
269E747B255CC56400B1D6CA /* Assets.xcassets in Resources */,
269E748D255CC6D100B1D6CA /* test2.jpg in Resources */,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
// LICENSE file in the root directory of this source tree.

#import <Foundation/Foundation.h>
#import <UIKit/UIKit.h>

NS_ASSUME_NONNULL_BEGIN

Expand Down
Loading