From 6fe5d12ee052e7a76b933d351fcb45588fdd0ec6 Mon Sep 17 00:00:00 2001 From: Jeff Tang Date: Fri, 10 Sep 2021 15:21:01 -0700 Subject: [PATCH] modified ImageSegmentation to use full jit and added steps to convert it to use lite --- .../project.pbxproj | 8 ++-- .../ImageSegmentation/TorchModule.mm | 7 +-- .../ImageSegmentation/ViewController.swift | 2 +- ImageSegmentation/Podfile | 2 +- ImageSegmentation/README.md | 46 +++++++++++++++---- ImageSegmentation/deeplabv3.py | 1 + 6 files changed, 49 insertions(+), 17 deletions(-) diff --git a/ImageSegmentation/ImageSegmentation.xcodeproj/project.pbxproj b/ImageSegmentation/ImageSegmentation.xcodeproj/project.pbxproj index e7645f5..e42d5d9 100644 --- a/ImageSegmentation/ImageSegmentation.xcodeproj/project.pbxproj +++ b/ImageSegmentation/ImageSegmentation.xcodeproj/project.pbxproj @@ -8,6 +8,7 @@ /* Begin PBXBuildFile section */ 2655CA6425454E6E006AD893 /* UIImageHelper.mm in Sources */ = {isa = PBXBuildFile; fileRef = 2655CA6225454E6E006AD893 /* UIImageHelper.mm */; }; + 2658DBFB26EC059F00AA0F61 /* deeplabv3_scripted.pt in Resources */ = {isa = PBXBuildFile; fileRef = 2658DBFA26EC059F00AA0F61 /* deeplabv3_scripted.pt */; }; 265BAFE8253A6A6800467AC4 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 265BAFE7253A6A6800467AC4 /* AppDelegate.swift */; }; 265BAFEA253A6A6800467AC4 /* SceneDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 265BAFE9253A6A6800467AC4 /* SceneDelegate.swift */; }; 265BAFEC253A6A6800467AC4 /* ViewController.swift in Sources */ = {isa = PBXBuildFile; fileRef = 265BAFEB253A6A6800467AC4 /* ViewController.swift */; }; @@ -18,12 +19,12 @@ 265BB00E253A6E0E00467AC4 /* UIImage+Helper.swift in Sources */ = {isa = PBXBuildFile; fileRef = 265BB00D253A6E0E00467AC4 /* UIImage+Helper.swift */; }; 265BB017253A7F0500467AC4 /* TorchModule.mm in Sources */ = {isa = PBXBuildFile; fileRef = 265BB015253A7F0500467AC4 /* TorchModule.mm */; }; 265F9A6F2551CB3700B8F2EC /* dog.jpg in Resources */ = {isa = PBXBuildFile; fileRef = 265F9A6E2551CB3700B8F2EC /* dog.jpg */; }; - 266A451D267974C300548578 /* deeplabv3_scripted.ptl in Resources */ = {isa = PBXBuildFile; fileRef = 266A451C267974C300548578 /* deeplabv3_scripted.ptl */; }; /* End PBXBuildFile section */ /* Begin PBXFileReference section */ 2655CA6225454E6E006AD893 /* UIImageHelper.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = UIImageHelper.mm; sourceTree = ""; }; 2655CA6325454E6E006AD893 /* UIImageHelper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UIImageHelper.h; sourceTree = ""; }; + 2658DBFA26EC059F00AA0F61 /* deeplabv3_scripted.pt */ = {isa = PBXFileReference; lastKnownFileType = file; path = deeplabv3_scripted.pt; sourceTree = ""; }; 265BAFE4253A6A6800467AC4 /* ImageSegmentation.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = ImageSegmentation.app; sourceTree = BUILT_PRODUCTS_DIR; }; 265BAFE7253A6A6800467AC4 /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = ""; }; 265BAFE9253A6A6800467AC4 /* SceneDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SceneDelegate.swift; sourceTree = ""; }; @@ -38,7 +39,6 @@ 265BB015253A7F0500467AC4 /* TorchModule.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = TorchModule.mm; sourceTree = ""; }; 265BB016253A7F0500467AC4 /* TorchModule.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TorchModule.h; sourceTree = ""; }; 265F9A6E2551CB3700B8F2EC /* dog.jpg */ = {isa = PBXFileReference; lastKnownFileType = image.jpeg; path = dog.jpg; sourceTree = ""; }; - 266A451C267974C300548578 /* deeplabv3_scripted.ptl */ = {isa = PBXFileReference; lastKnownFileType = file; path = deeplabv3_scripted.ptl; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ @@ -92,7 +92,7 @@ 265BB015253A7F0500467AC4 /* TorchModule.mm */, 265BB00D253A6E0E00467AC4 /* UIImage+Helper.swift */, 265BAFFF253A6B1200467AC4 /* ImageSegmentation-Bridging-Header.h */, - 266A451C267974C300548578 /* deeplabv3_scripted.ptl */, + 2658DBFA26EC059F00AA0F61 /* deeplabv3_scripted.pt */, 265BB007253A6B9600467AC4 /* deeplab.jpg */, 265F9A6E2551CB3700B8F2EC /* dog.jpg */, ); @@ -161,8 +161,8 @@ 265BAFF4253A6A6900467AC4 /* LaunchScreen.storyboard in Resources */, 265BB008253A6B9600467AC4 /* deeplab.jpg in Resources */, 265BAFF1253A6A6900467AC4 /* Assets.xcassets in Resources */, + 2658DBFB26EC059F00AA0F61 /* deeplabv3_scripted.pt in Resources */, 265BAFEF253A6A6800467AC4 /* Main.storyboard in Resources */, - 266A451D267974C300548578 /* deeplabv3_scripted.ptl in Resources */, ); runOnlyForDeploymentPostprocessing = 0; }; diff --git a/ImageSegmentation/ImageSegmentation/TorchModule.mm b/ImageSegmentation/ImageSegmentation/TorchModule.mm index 66e6d86..6edfac4 100644 --- a/ImageSegmentation/ImageSegmentation/TorchModule.mm +++ b/ImageSegmentation/ImageSegmentation/TorchModule.mm @@ -8,18 +8,19 @@ #import "UIImageHelper.h" #import #import -#import +#import @implementation TorchModule { @protected - torch::jit::mobile::Module _impl; + torch::jit::script::Module _impl; } - (nullable instancetype)initWithFileAtPath:(NSString*)filePath { self = [super init]; if (self) { try { - _impl = torch::jit::_load_for_mobile(filePath.UTF8String); + _impl = torch::jit::load(filePath.UTF8String); + _impl.eval(); } catch (const std::exception& exception) { NSLog(@"%s", exception.what()); return nil; diff --git a/ImageSegmentation/ImageSegmentation/ViewController.swift b/ImageSegmentation/ImageSegmentation/ViewController.swift index 4bd8617..d2c4678 100644 --- a/ImageSegmentation/ImageSegmentation/ViewController.swift +++ b/ImageSegmentation/ImageSegmentation/ViewController.swift @@ -10,7 +10,7 @@ class ViewController: UIViewController { private lazy var module: TorchModule = { if let filePath = Bundle.main.path(forResource: - "deeplabv3_scripted", ofType: "ptl"), + "deeplabv3_scripted", ofType: "pt"), let module = TorchModule(fileAtPath: filePath) { return module } else { diff --git a/ImageSegmentation/Podfile b/ImageSegmentation/Podfile index 80b3b6b..4383143 100644 --- a/ImageSegmentation/Podfile +++ b/ImageSegmentation/Podfile @@ -6,5 +6,5 @@ target 'ImageSegmentation' do use_frameworks! # Pods for ImageSegmentation - pod 'LibTorch-Lite', '~>1.9.0' + pod 'LibTorch', '~>1.9.0' end diff --git a/ImageSegmentation/README.md b/ImageSegmentation/README.md index a40e136..e4d9bd5 100644 --- a/ImageSegmentation/README.md +++ b/ImageSegmentation/README.md @@ -2,13 +2,13 @@ ## Introduction -This repo offers a Python script that converts the [PyTorch DeepLabV3 model](https://pytorch.org/hub/pytorch_vision_deeplabv3_resnet101) to the Mobile Interpreter version and an iOS app that uses the model to segment images. +This repo offers a Python script that converts the [PyTorch DeepLabV3 model](https://pytorch.org/hub/pytorch_vision_deeplabv3_resnet101) to both the Full JIT and the Lite/Mobile Interpreter versions and an iOS app that uses the Full JIT model to segment images. Steps of how to prepare the Lite model and make the code changes in the Xcode project to use the Lite model are also provided. ## Prerequisites * PyTorch 1.9 and torchvision 0.10 (Optional) * Python 3.8 or above (Optional) -* iOS Cocoapods LibTorch-Lite 1.9.0 and LibTorchvision 0.10.0 +* iOS Cocoapods LibTorch 1.9.0 or LibTorch-Lite 1.9.0 * Xcode 12.4 or later ## Quick Start @@ -17,9 +17,9 @@ To Test Run the Image Segmentation iOS App, follow the steps below: ### 1. Prepare the Model -If you don't have the PyTorch environment set up to run the script below to generate the model file, you can download it to the `ios-demo-app/ImageSegmentation` folder using the link [here](https://pytorch-mobile-demo-apps.s3.us-east-2.amazonaws.com/deeplabv3_scripted.ptl). +If you don't have the PyTorch environment set up to run the script below to generate the full JIT model file, you can download it to the `ios-demo-app/ImageSegmentation` folder using the link [here](https://pytorch-mobile-demo-apps.s3.us-east-2.amazonaws.com/deeplabv3_scripted.pt). -Open a Mac Terminal, first install PyTorch 1.9 and torchvision 0.10 using command like `pip install torch torchvision`, then run the following commands: +Open a Terminal, first install PyTorch 1.9 and torchvision 0.10 using command like `pip install torch torchvision`, then run the following commands: ``` git clone https://github.com/pytorch/ios-demo-app @@ -27,15 +27,15 @@ cd ios-demo-app/ImageSegmentation python deeplabv3.py ``` -The Python script `deeplabv3.py` is used to generate the Lite Interpreter model file `deeplabv3_scripted.ptl` to be used in iOS. +The Python script `deeplabv3.py` is used to generate both the full JIT and the Lite Interpreter model files `deeplabv3_scripted.pt` and `deeplabv3_scripted.ptl` to be used in iOS. -### 2. Use LibTorch-Lite +### 2. Use LibTorch -Run the commands below (note the `Podfile` uses `pod 'LibTorch-Lite', '~>1.9.0'`): +Run the commands below (note the `Podfile` uses `pod 'LibTorch', '~>1.9.0'`): ``` pod install -open ImageSegmentation.xcworkspace/ +open ImageSegmentation.xcworkspace ``` ### 3. Run the app @@ -46,6 +46,36 @@ Select an iOS simulator or device on Xcode to run the app. The example image and Note that the `resized` method in `UIImage+Helper.swift` is used to speed up the model inference, but a smaller size may cause the result to be less accurate. +## Using the Lite/Mobile Interpreter Model + +All the other iOS demo apps have been converted to use the new Mobile Interpreter model, except this Image Segmentation demo app, which is used to illustrate how to convert a demo using a full JIT model to one using the mobile interpreter model, by following 3 simple steps. + +### 1. Prepare the Lite model + +If you don't have the PyTorch environment set up to run the script `deeplabv3.py` to generate the mobile interpreter model file, you can download it to the `ios-demo-app/ImageSegmentation` folder using the link [here](https://pytorch-mobile-demo-apps.s3.us-east-2.amazonaws.com/deeplabv3_scripted.ptl). Otherwise, or if you prefer to run the script to generate the model yourself, just run `python deeplabv3.py`. + +Note that to save a model in the mobile interpreter format, simply call `_save_for_lite_interpreter`, as shown at the end of the `deeplabv3.py`: +``` +optimized_model.save("ImageSegmentation/deeplabv3_scripted.pt") +optimized_model._save_for_lite_interpreter("ImageSegmentation/deeplabv3_scripted.ptl") +``` + +### 2. Modify the Podfile + +If you already went through the previous section and have the demo using the full JIT model up and running, close Xcode, go to the `ios-demo-app/ImageSegmentation` directory and run `pod deintegrate` first. + +In `Podfile`, change `pod 'LibTorch', '~>1.9.0'` to `pod 'LibTorch-Lite', '~>1.9.0'` + +Then run `pod install` and `open ImageSegmentation.xcworkspace`. Don't forget to drag and drop the `deeplabv3_scripted.ptl` file from step 1 to the project. + +### 3. Change the iOS code + +In `InferenceModule.mm`, first change `#import ` to `#import `, then change `@protected torch::jit::script::Module _impl;` to `@protected torch::jit::mobile::Module _impl;` and `_impl = torch::jit::load(filePath.UTF8String);` to `_impl = torch::jit::_load_for_mobile(filePath.UTF8String);`. + +Finally, modify `pt` in ViewController.swift's `Bundle.main.path(forResource: "deeplabv3_scripted", ofType: "pt")` to `ptl`. + +Now you can build and run the app using the Lite/Mobile interpreter model. + ## Tutorial Read the tutorial [here](https://pytorch.org/tutorials/beginner/deeplabv3_on_ios.html) for detailed step-by-step instructions of how to prepare and run the [PyTorch DeepLabV3 model](https://pytorch.org/hub/pytorch_vision_deeplabv3_resnet101) on iOS, as well as practical tips on how to successfully use a pre-trained PyTorch model on iOS and avoid common pitfalls. diff --git a/ImageSegmentation/deeplabv3.py b/ImageSegmentation/deeplabv3.py index fc8f26c..ce9abc4 100644 --- a/ImageSegmentation/deeplabv3.py +++ b/ImageSegmentation/deeplabv3.py @@ -6,4 +6,5 @@ scripted_module = torch.jit.script(model) optimized_model = optimize_for_mobile(scripted_module) +optimized_model.save("ImageSegmentation/deeplabv3_scripted.pt") optimized_model._save_for_lite_interpreter("ImageSegmentation/deeplabv3_scripted.ptl")