Skip to content
This repository has been archived by the owner on Jan 3, 2023. It is now read-only.

Commit

Permalink
Fboemer/grappler (#285)
Browse files Browse the repository at this point in the history
* Reduced usage of command-line flags:
  - NGRAPH_COMPLEX_PACK is now specified as part of the encryption parameters
  - NGRAPH_ENCRYPT_DATA has been replaced by `encrypt` in a `server_config.parameter_map`
  -  NGRAPH_ENCRYPT_MODEL has been removed
  - NGRAPH_TF_BACKEND has been replaced by `ngraph_backend` in a `server_config.parameter_map`
  - NGRAPH_HE_SEAL_CONFIG is still used for debugging, but should be specified via `encryption_parameters` in a `server_config.parameter_map`
  - NGRAPH_ENABLE_CLIENT has been replaced with `enable_client` in a `server_config.parameter_map`

* Use of `HEOpAnnotations` and `PropagateHEAnnotations` pass to label each node as `(encrypted vs. plaintext)`, `(packed vs. unpacked)`, `(from_client vs. from_server)`
  - Not every Node/Op has an annotation (until NervanaSystems/ngraph#3752, NervanaSystems/ngraph#3738), so some logic is redundant / incomplete

* Add plaintexts to protobuf messages

* Fixed clang client build
* Preliminary support for `max` / `divide` / `exp` / `softmax` ops

* MobileNetV2 example is broken due to Power() not being constant-folded (fixed in NervanaSystems/ngraph#3725, but not yet in ngraph-brige)

* Expanded unit-tests for better coverage of plaintext packing / complex packing

* `tf.Keras` remains  unsupported, because the function has different ngraph-tf clusters, each of whose result op will be a plain tensor (created via create_tensor). Then, the result will be plaintext for each cluster, but we want ciphertext for each; one solution would be to just have a single HETensor class
  • Loading branch information
fboemer committed Oct 16, 2019
1 parent e786c01 commit 862e300
Show file tree
Hide file tree
Showing 125 changed files with 11,168 additions and 8,662 deletions.
30 changes: 13 additions & 17 deletions CMakeLists.txt
Expand Up @@ -20,25 +20,28 @@ cmake_minimum_required(VERSION 3.10)
# Global project name
project(he_transformer LANGUAGES CXX)

# CXX flags
if(DEFINED CMAKE_CXX_CLAGS)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++1z -std=gnu++1z -Wall")
else()
set(CMAKE_CXX_FLAGS "-std=c++1z -std=gnu++1z -Wall")
endif()
# Cmake flags SEAL requires C++17
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_INSTALL_MESSAGE LAZY)
set(CMAKE_CXX_FLAGS
"${CMAKE_CXX_FLAGS} -march=native -Wno-deprecated-declarations")

"${CMAKE_CXX_FLAGS} -Wall -march=native -Wno-deprecated-declarations")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS} -g")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS} -O0 -g")

# This allows libhe_seal_backend.so to find libraries in the same directory
set(CMAKE_INSTALL_RPATH "\$ORIGIN")

# These variables are undocumented but useful.
set(CMAKE_DISABLE_SOURCE_CHANGES ON)
set(CMAKE_DISABLE_IN_SOURCE_BUILD ON)

if("${CMAKE_CXX_COMPILER_ID}" MATCHES "^(Apple)?Clang$")
message(STATUS "Setting clang flags...")
include(cmake/clang_flags.cmake)
endif()

set(CMAKE_INSTALL_MESSAGE LAZY)

if(CMAKE_BUILD_TYPE)
set(RELEASE_TYPES
Debug
Expand All @@ -56,17 +59,10 @@ endif()

include(cmake/sdl.cmake)

# These variables are undocumented but useful.
set(CMAKE_DISABLE_SOURCE_CHANGES ON)
set(CMAKE_DISABLE_IN_SOURCE_BUILD ON)

# For ngraph/log
set(PROJECT_ROOT_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
add_definitions(-DPROJECT_ROOT_DIR="${PROJECT_ROOT_DIR}")

# This allows libhe_seal_backend.so to find libraries in the same directory
set(CMAKE_INSTALL_RPATH "\$ORIGIN")

# he-transformer headers
set(HE_TRANSFORMER_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/src)

Expand Down
7 changes: 3 additions & 4 deletions README.md
Expand Up @@ -28,8 +28,8 @@ The [examples](https://github.com/NervanaSystems/he-transformer/tree/master/exam
- virtualenv v16.1.0
- bazel v0.25.2
#### The following dependencies are built automatically
- [nGraph](https://github.com/NervanaSystems/ngraph) - v0.25.0
- [nGraph-tf](https://github.com/tensorflow/ngraph-bridge) - v0.18.1
- [nGraph](https://github.com/NervanaSystems/ngraph) - v0.25.1-rc.7
- [nGraph-tf](https://github.com/tensorflow/ngraph-bridge) - v0.19.0-rc4
- [SEAL](https://github.com/Microsoft/SEAL) - v3.3.1
- [TensorFlow](https://github.com/tensorflow/tensorflow) - v1.14.0
- [Boost](https://github.com/boostorg) v1.69
Expand Down Expand Up @@ -77,8 +77,7 @@ make doc
to create doxygen documentation in `$HE_TRANSFORMER/build/doc/doxygen`.

#### 1b. Python bindings for client
To build an experimental client-server model with python bindings, see the [python](https://github.com/NervanaSystems/he-transformer/tree/master/python) folder.
***Note***: This feature is experimental. For best experience, you should skip this step.
To build an client-server model with python bindings, see the [python](https://github.com/NervanaSystems/he-transformer/tree/master/python) folder.

### 2. Run C++ unit-tests
Ensure the virtual environment is active, i.e. run `source $HE_TRANSFORMER/build/external/venv-tf-py3/bin/activate`
Expand Down
5 changes: 4 additions & 1 deletion cmake/ngraph-tf.cmake
Expand Up @@ -20,7 +20,7 @@ set(EXTERNAL_NGRAPH_INSTALL_DIR ${EXTERNAL_INSTALL_DIR})
set(NGRAPH_TF_CMAKE_PREFIX ext_ngraph_tf)

set(NGRAPH_TF_REPO_URL https://github.com/tensorflow/ngraph-bridge.git)
set(NGRAPH_TF_GIT_LABEL v0.18.1)
set(NGRAPH_TF_GIT_LABEL cad093d84cc3a1ce212d8a96c67217321b44309b)

set(NGRAPH_TF_SRC_DIR
${CMAKE_BINARY_DIR}/${NGRAPH_TF_CMAKE_PREFIX}/src/${NGRAPH_TF_CMAKE_PREFIX})
Expand Down Expand Up @@ -56,6 +56,9 @@ if(${USE_PREBUILT_TF})
)
endif()

# TODO: enable other options
set(ng_tf_build_flags "--use_grappler_optimizer")

ExternalProject_Add(ext_ngraph_tf
GIT_REPOSITORY ${NGRAPH_TF_REPO_URL}
GIT_TAG ${NGRAPH_TF_GIT_LABEL}
Expand Down
3 changes: 2 additions & 1 deletion configs/he_seal_ckks_config_N11_L1.json
Expand Up @@ -4,5 +4,6 @@
"security_level": 128,
"coeff_modulus": [
54
]
],
"complex_packing": true
}
@@ -1,18 +1,23 @@
# MobileNet V2 example

This folder demonstrates an example of inference on MobileNetV2.
Note: this is a work in progress, and requires ~150GB memory.
Note: this is a work in progress, and requires ~50GB memory.
Runtime will be very slow without many cores.

See here: https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet
See https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet
for a description.

# Setup
1. Make sure python env is active, i.e. run
```bash
source $HE_TRANSFORMER/build/external/venv-tf-py3/bin/activate
```
Also be sure the `pyhe_client` wheel has been installed
Also ensure the `pyhe_client` wheel has been installed (see `python` folder for instructions).

The examples rely on numpy and pillow, so run
```bash
pip install numpy pillow
```

2. Build Tensorflow graph transforms and add them to your path:

Expand All @@ -32,11 +37,6 @@ export PATH=$HE_TRANSFORMER/build/ext_ngraph_tf/src/ext_ngraph_tf/build_cmake/te
python get_models.py
```

4. To enable image processing, run
```bash
pip install pillow
```

# Image-Net evaluation
1. First, sign up for an account at image-net.org
2. Download the 2012 test_images (all tasks)) 13GB MD5: `e64ceb247e473635708aed23ab6d839` file on image-net.org
Expand All @@ -61,21 +61,23 @@ For the remaining instructions, run```bash
export DATA_DIR=path_to_your_data_dir
```
4. To run inference using TensorFlow on unencrypted data, call
## CPU backend
To run inference using the CPU backend on unencrypted data, call
```bash
python test.py \
--data_dir=$DATA_DIR \
--batch_size=300
--batch_size=300 \
--backend=CPU
```

##
5. To call inference using HE_SEAL's plaintext operations (for debugging), call
```bash
NGRAPH_TF_BACKEND=HE_SEAL \
STOP_CONST_FOLD=1 \
python test.py \
--data_dir=$DATA_DIR \
--ngraph=true \
--batch_size=300
--batch_size=300 \
--backend=HE_SEAL
```
Note, the `STOP_CONST_FOLD` flag will prevent the constant folding graph optimization.
For large batch sizes, const folding incurs significant overhead during graph compilation, and doesn't result in much runtime speedup.
Expand Down
Expand Up @@ -36,8 +36,6 @@
import util
import numpy as np

FLAGS = None


def print_nodes(filename):
graph_def = read_pb_file(filename)
Expand Down Expand Up @@ -68,8 +66,6 @@ def get_imagenet_labels():


def main(FLAGS):
util.VAL_IMAGE_FLAGS = FLAGS

imagenet_inference_labels = get_imagenet_inference_labels()
imagenet_training_labels = get_imagenet_training_labels()
assert (
Expand Down
Expand Up @@ -19,21 +19,21 @@
import numpy as np
import json
import argparse
import os
import time
import PIL
from PIL import Image
import multiprocessing as mp
import util
import ngraph_bridge

from util import get_imagenet_inference_labels, \
get_imagenet_training_labels, \
get_validation_image, \
get_validation_images, \
get_validation_labels, \
str2bool

FLAGS = None
str2bool, \
server_argument_parser, \
server_config_from_flags


def print_nodes(filename):
Expand All @@ -56,9 +56,8 @@ def load_model(filename):


def main(FLAGS):
using_client = (os.environ.get('NGRAPH_ENABLE_CLIENT') is not None)

if using_client:
if FLAGS.enable_client:
print('Using client')
else:
print('Not using client')
Expand All @@ -71,25 +70,27 @@ def main(FLAGS):
assert (
sorted(imagenet_training_labels) == sorted(imagenet_inference_labels))

if not using_client:
validation_nums = get_validation_labels(FLAGS)
x_test = get_validation_images(FLAGS)
validation_labels = imagenet_inference_labels[validation_nums]
else:
validation_nums = get_validation_labels(FLAGS)
validation_labels = imagenet_inference_labels[validation_nums]

if FLAGS.enable_client:
# Server input is dummy
x_test = np.random.rand(FLAGS.batch_size, FLAGS.image_size,
FLAGS.image_size, 3)
else:
x_test = get_validation_images(FLAGS)

if FLAGS.ngraph:
import ngraph_bridge
print(ngraph_bridge.__version__)
config = server_config_from_flags(FLAGS, 'input')

config = tf.compat.v1.ConfigProto()
config.intra_op_parallelism_threads = 44
config.inter_op_parallelism_threads = 44
if FLAGS.ngraph:
config = ngraph_bridge.update_config(config)
sess = tf.compat.v1.Session(config=config)
graph_def = load_model(FLAGS.model)

for node in graph_def.node:
if 'FusedBatchNorm' in node.name or 'Pow' in node.name:
print(node)

#print('node names', [n.name for n in graph_def.node])

tf.import_graph_def(graph_def, name='')

input_tensor = sess.graph.get_tensor_by_name('input:0')
Expand All @@ -111,7 +112,7 @@ def main(FLAGS):
else:
top5 = np.flip(y_pred.argsort()[:, -5:], axis=1)

if not using_client:
if not FLAGS.enable_client:
preds = imagenet_training_labels[top5]

if FLAGS.batch_size < 10:
Expand All @@ -124,7 +125,7 @@ def main(FLAGS):


if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser = server_argument_parser()
parser.add_argument(
'--data_dir',
type=str,
Expand Down Expand Up @@ -161,7 +162,6 @@ def main(FLAGS):
help='crop to this size before resizing to image_size')
parser.add_argument(
'--ngraph', type=str2bool, default=False, help='use ngraph backend')
parser.add_argument('--batch_size', type=int, default=1, help='Batch size')
parser.add_argument(
'--start_batch', type=int, default=0, help='Test data start index')

Expand Down

0 comments on commit 862e300

Please sign in to comment.