Skip to content
This repository was archived by the owner on Aug 28, 2024. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions ImageSegmentation/ImageSegmentation.xcodeproj/project.pbxproj
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

/* Begin PBXBuildFile section */
2655CA6425454E6E006AD893 /* UIImageHelper.mm in Sources */ = {isa = PBXBuildFile; fileRef = 2655CA6225454E6E006AD893 /* UIImageHelper.mm */; };
2658DBFB26EC059F00AA0F61 /* deeplabv3_scripted.pt in Resources */ = {isa = PBXBuildFile; fileRef = 2658DBFA26EC059F00AA0F61 /* deeplabv3_scripted.pt */; };
265BAFE8253A6A6800467AC4 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 265BAFE7253A6A6800467AC4 /* AppDelegate.swift */; };
265BAFEA253A6A6800467AC4 /* SceneDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 265BAFE9253A6A6800467AC4 /* SceneDelegate.swift */; };
265BAFEC253A6A6800467AC4 /* ViewController.swift in Sources */ = {isa = PBXBuildFile; fileRef = 265BAFEB253A6A6800467AC4 /* ViewController.swift */; };
Expand All @@ -18,12 +19,12 @@
265BB00E253A6E0E00467AC4 /* UIImage+Helper.swift in Sources */ = {isa = PBXBuildFile; fileRef = 265BB00D253A6E0E00467AC4 /* UIImage+Helper.swift */; };
265BB017253A7F0500467AC4 /* TorchModule.mm in Sources */ = {isa = PBXBuildFile; fileRef = 265BB015253A7F0500467AC4 /* TorchModule.mm */; };
265F9A6F2551CB3700B8F2EC /* dog.jpg in Resources */ = {isa = PBXBuildFile; fileRef = 265F9A6E2551CB3700B8F2EC /* dog.jpg */; };
266A451D267974C300548578 /* deeplabv3_scripted.ptl in Resources */ = {isa = PBXBuildFile; fileRef = 266A451C267974C300548578 /* deeplabv3_scripted.ptl */; };
/* End PBXBuildFile section */

/* Begin PBXFileReference section */
2655CA6225454E6E006AD893 /* UIImageHelper.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = UIImageHelper.mm; sourceTree = "<group>"; };
2655CA6325454E6E006AD893 /* UIImageHelper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UIImageHelper.h; sourceTree = "<group>"; };
2658DBFA26EC059F00AA0F61 /* deeplabv3_scripted.pt */ = {isa = PBXFileReference; lastKnownFileType = file; path = deeplabv3_scripted.pt; sourceTree = "<group>"; };
265BAFE4253A6A6800467AC4 /* ImageSegmentation.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = ImageSegmentation.app; sourceTree = BUILT_PRODUCTS_DIR; };
265BAFE7253A6A6800467AC4 /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = "<group>"; };
265BAFE9253A6A6800467AC4 /* SceneDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SceneDelegate.swift; sourceTree = "<group>"; };
Expand All @@ -38,7 +39,6 @@
265BB015253A7F0500467AC4 /* TorchModule.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = TorchModule.mm; sourceTree = "<group>"; };
265BB016253A7F0500467AC4 /* TorchModule.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TorchModule.h; sourceTree = "<group>"; };
265F9A6E2551CB3700B8F2EC /* dog.jpg */ = {isa = PBXFileReference; lastKnownFileType = image.jpeg; path = dog.jpg; sourceTree = "<group>"; };
266A451C267974C300548578 /* deeplabv3_scripted.ptl */ = {isa = PBXFileReference; lastKnownFileType = file; path = deeplabv3_scripted.ptl; sourceTree = "<group>"; };
/* End PBXFileReference section */

/* Begin PBXFrameworksBuildPhase section */
Expand Down Expand Up @@ -92,7 +92,7 @@
265BB015253A7F0500467AC4 /* TorchModule.mm */,
265BB00D253A6E0E00467AC4 /* UIImage+Helper.swift */,
265BAFFF253A6B1200467AC4 /* ImageSegmentation-Bridging-Header.h */,
266A451C267974C300548578 /* deeplabv3_scripted.ptl */,
2658DBFA26EC059F00AA0F61 /* deeplabv3_scripted.pt */,
265BB007253A6B9600467AC4 /* deeplab.jpg */,
265F9A6E2551CB3700B8F2EC /* dog.jpg */,
);
Expand Down Expand Up @@ -161,8 +161,8 @@
265BAFF4253A6A6900467AC4 /* LaunchScreen.storyboard in Resources */,
265BB008253A6B9600467AC4 /* deeplab.jpg in Resources */,
265BAFF1253A6A6900467AC4 /* Assets.xcassets in Resources */,
2658DBFB26EC059F00AA0F61 /* deeplabv3_scripted.pt in Resources */,
265BAFEF253A6A6800467AC4 /* Main.storyboard in Resources */,
266A451D267974C300548578 /* deeplabv3_scripted.ptl in Resources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
Expand Down
7 changes: 4 additions & 3 deletions ImageSegmentation/ImageSegmentation/TorchModule.mm
Original file line number Diff line number Diff line change
Expand Up @@ -8,18 +8,19 @@
#import "UIImageHelper.h"
#import <CoreImage/CoreImage.h>
#import <ImageIO/ImageIO.h>
#import <Libtorch-Lite/Libtorch-Lite.h>
#import <Libtorch/Libtorch.h>

@implementation TorchModule {
@protected
torch::jit::mobile::Module _impl;
torch::jit::script::Module _impl;
}

- (nullable instancetype)initWithFileAtPath:(NSString*)filePath {
self = [super init];
if (self) {
try {
_impl = torch::jit::_load_for_mobile(filePath.UTF8String);
_impl = torch::jit::load(filePath.UTF8String);
_impl.eval();
} catch (const std::exception& exception) {
NSLog(@"%s", exception.what());
return nil;
Expand Down
2 changes: 1 addition & 1 deletion ImageSegmentation/ImageSegmentation/ViewController.swift
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ class ViewController: UIViewController {

private lazy var module: TorchModule = {
if let filePath = Bundle.main.path(forResource:
"deeplabv3_scripted", ofType: "ptl"),
"deeplabv3_scripted", ofType: "pt"),
let module = TorchModule(fileAtPath: filePath) {
return module
} else {
Expand Down
2 changes: 1 addition & 1 deletion ImageSegmentation/Podfile
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,5 @@ target 'ImageSegmentation' do
use_frameworks!

# Pods for ImageSegmentation
pod 'LibTorch-Lite', '~>1.9.0'
pod 'LibTorch', '~>1.9.0'
end
46 changes: 38 additions & 8 deletions ImageSegmentation/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,13 @@

## Introduction

This repo offers a Python script that converts the [PyTorch DeepLabV3 model](https://pytorch.org/hub/pytorch_vision_deeplabv3_resnet101) to the Mobile Interpreter version and an iOS app that uses the model to segment images.
This repo offers a Python script that converts the [PyTorch DeepLabV3 model](https://pytorch.org/hub/pytorch_vision_deeplabv3_resnet101) to both the Full JIT and the Lite/Mobile Interpreter versions and an iOS app that uses the Full JIT model to segment images. Steps of how to prepare the Lite model and make the code changes in the Xcode project to use the Lite model are also provided.

## Prerequisites

* PyTorch 1.9 and torchvision 0.10 (Optional)
* Python 3.8 or above (Optional)
* iOS Cocoapods LibTorch-Lite 1.9.0 and LibTorchvision 0.10.0
* iOS Cocoapods LibTorch 1.9.0 or LibTorch-Lite 1.9.0
* Xcode 12.4 or later

## Quick Start
Expand All @@ -17,25 +17,25 @@ To Test Run the Image Segmentation iOS App, follow the steps below:

### 1. Prepare the Model

If you don't have the PyTorch environment set up to run the script below to generate the model file, you can download it to the `ios-demo-app/ImageSegmentation` folder using the link [here](https://pytorch-mobile-demo-apps.s3.us-east-2.amazonaws.com/deeplabv3_scripted.ptl).
If you don't have the PyTorch environment set up to run the script below to generate the full JIT model file, you can download it to the `ios-demo-app/ImageSegmentation` folder using the link [here](https://pytorch-mobile-demo-apps.s3.us-east-2.amazonaws.com/deeplabv3_scripted.pt).

Open a Mac Terminal, first install PyTorch 1.9 and torchvision 0.10 using command like `pip install torch torchvision`, then run the following commands:
Open a Terminal, first install PyTorch 1.9 and torchvision 0.10 using command like `pip install torch torchvision`, then run the following commands:

```
git clone https://github.com/pytorch/ios-demo-app
cd ios-demo-app/ImageSegmentation
python deeplabv3.py
```

The Python script `deeplabv3.py` is used to generate the Lite Interpreter model file `deeplabv3_scripted.ptl` to be used in iOS.
The Python script `deeplabv3.py` is used to generate both the full JIT and the Lite Interpreter model files `deeplabv3_scripted.pt` and `deeplabv3_scripted.ptl` to be used in iOS.

### 2. Use LibTorch-Lite
### 2. Use LibTorch

Run the commands below (note the `Podfile` uses `pod 'LibTorch-Lite', '~>1.9.0'`):
Run the commands below (note the `Podfile` uses `pod 'LibTorch', '~>1.9.0'`):

```
pod install
open ImageSegmentation.xcworkspace/
open ImageSegmentation.xcworkspace
```

### 3. Run the app
Expand All @@ -46,6 +46,36 @@ Select an iOS simulator or device on Xcode to run the app. The example image and

Note that the `resized` method in `UIImage+Helper.swift` is used to speed up the model inference, but a smaller size may cause the result to be less accurate.

## Using the Lite/Mobile Interpreter Model

All the other iOS demo apps have been converted to use the new Mobile Interpreter model, except this Image Segmentation demo app, which is used to illustrate how to convert a demo using a full JIT model to one using the mobile interpreter model, by following 3 simple steps.

### 1. Prepare the Lite model

If you don't have the PyTorch environment set up to run the script `deeplabv3.py` to generate the mobile interpreter model file, you can download it to the `ios-demo-app/ImageSegmentation` folder using the link [here](https://pytorch-mobile-demo-apps.s3.us-east-2.amazonaws.com/deeplabv3_scripted.ptl). Otherwise, or if you prefer to run the script to generate the model yourself, just run `python deeplabv3.py`.

Note that to save a model in the mobile interpreter format, simply call `_save_for_lite_interpreter`, as shown at the end of the `deeplabv3.py`:
```
optimized_model.save("ImageSegmentation/deeplabv3_scripted.pt")
optimized_model._save_for_lite_interpreter("ImageSegmentation/deeplabv3_scripted.ptl")
```

### 2. Modify the Podfile

If you already went through the previous section and have the demo using the full JIT model up and running, close Xcode, go to the `ios-demo-app/ImageSegmentation` directory and run `pod deintegrate` first.

In `Podfile`, change `pod 'LibTorch', '~>1.9.0'` to `pod 'LibTorch-Lite', '~>1.9.0'`

Then run `pod install` and `open ImageSegmentation.xcworkspace`. Don't forget to drag and drop the `deeplabv3_scripted.ptl` file from step 1 to the project.

### 3. Change the iOS code

In `InferenceModule.mm`, first change `#import <LibTorch/LibTorch.h>` to `#import <Libtorch-Lite/Libtorch-Lite.h>`, then change `@protected torch::jit::script::Module _impl;` to `@protected torch::jit::mobile::Module _impl;` and `_impl = torch::jit::load(filePath.UTF8String);` to `_impl = torch::jit::_load_for_mobile(filePath.UTF8String);`.
Copy link
Contributor

@xta0 xta0 Sep 13, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why @Protected? What would be the problem is we make it as a private ivar?

Copy link
Contributor Author

@jeffxtang jeffxtang Sep 13, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i got it from the HelloWorld example https://github.com/pytorch/ios-demo-app/blob/master/HelloWorld/HelloWorld/HelloWorld/TorchBridge/TorchModule.mm#L5 last year i think and never really thought about it - even tutorials like https://pytorch.org/tutorials/recipes/mobile_interpreter.html refer to the protected - should we change it or leave it for consistency?


Finally, modify `pt` in ViewController.swift's `Bundle.main.path(forResource: "deeplabv3_scripted", ofType: "pt")` to `ptl`.

Now you can build and run the app using the Lite/Mobile interpreter model.

## Tutorial

Read the tutorial [here](https://pytorch.org/tutorials/beginner/deeplabv3_on_ios.html) for detailed step-by-step instructions of how to prepare and run the [PyTorch DeepLabV3 model](https://pytorch.org/hub/pytorch_vision_deeplabv3_resnet101) on iOS, as well as practical tips on how to successfully use a pre-trained PyTorch model on iOS and avoid common pitfalls.
Expand Down
1 change: 1 addition & 0 deletions ImageSegmentation/deeplabv3.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,5 @@

scripted_module = torch.jit.script(model)
optimized_model = optimize_for_mobile(scripted_module)
optimized_model.save("ImageSegmentation/deeplabv3_scripted.pt")
optimized_model._save_for_lite_interpreter("ImageSegmentation/deeplabv3_scripted.ptl")