Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Tensorflow conversion #47

Merged
merged 46 commits into from Jul 27, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
46 commits
Select commit Hold shift + click to select a range
5655c0e
Add pyredner Tensorflow front-end
SuperShinyEyes Jul 15, 2019
3e08bb7
Add auto build script for pyredner Tensorflow
SuperShinyEyes Jul 15, 2019
7ba7300
Add pyrednertensorflow to setup.py
SuperShinyEyes Jul 15, 2019
ef70d5b
Add tests-tensorflow/
SuperShinyEyes Jul 15, 2019
32260d1
tensorflow cmake
BachiLi Jul 15, 2019
04e4edd
remove temp files
BachiLi Jul 16, 2019
68754a2
minor style thing
BachiLi Jul 16, 2019
1533a82
remove tensorflow scatter add custom op
BachiLi Jul 16, 2019
a1d68e6
Fix tensorflow cmake
BachiLi Jul 16, 2019
0c82829
tensorflow gpu
Jul 25, 2019
f1eadea
don't use tfe.Variable, remove test_data_ptr
Jul 25, 2019
8fc517e
Tensor conversion in camera.py
Jul 25, 2019
4809b75
remove unused import
Jul 25, 2019
28b575b
better comments
Jul 25, 2019
17e0373
remove unnecessary device specification
Jul 25, 2019
94725ad
Fix device memory & minor cleanup
Jul 25, 2019
3392dd8
Fix tensorflow scalar cache corruption
Jul 26, 2019
96e3414
rewrite test_envmap
Jul 26, 2019
53cb552
cleanup
Jul 26, 2019
f0d24a5
suppress scikit-image warning
Jul 26, 2019
db8a327
rewrite test g buffer
Jul 26, 2019
0633979
cleanup
Jul 26, 2019
8ffb62a
rewrite shadow blocker
Jul 26, 2019
e604201
use gpu
Jul 26, 2019
d679caa
rewrite shadow camera
Jul 26, 2019
8dd52e6
rewrite shadow glossy
Jul 26, 2019
a8b6052
rewrite test_shadow_light
Jul 26, 2019
6e7ddd5
rewrite test_shadow_receiver
Jul 26, 2019
b3f1249
rewrite test_single_triangle_background
Jul 26, 2019
da9735d
rewrite test_single_triangle_camera
Jul 26, 2019
1b32866
use gpu
Jul 26, 2019
396f59b
rewrite test_single_triangle_camera_fisheye
Jul 26, 2019
899ea22
rewrite test_single_triangle_clipped.py
Jul 26, 2019
0f11dd0
cleanup
Jul 26, 2019
9ab1a5a
rewrite test_svbrdf
Jul 26, 2019
ea7e64c
rewrite test_teapot_reflectance
Jul 27, 2019
4b549b7
todo comments
Jul 27, 2019
35418fe
force on-demand gpu memory allocation for tensorflow
Jul 27, 2019
dab4775
show warning only when using gpu
Jul 27, 2019
4d9efd6
rewrite test teapot specular
Jul 27, 2019
406d8ef
rewrite test texture
Jul 27, 2019
51cbacb
rewrite test two triangles
Jul 27, 2019
2b56ff0
cleanup
Jul 27, 2019
99ef270
cleanup
Jul 27, 2019
7543e39
cleanup
Jul 27, 2019
8783691
readme
Jul 27, 2019
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Expand Up @@ -4,6 +4,7 @@ __pycache__
pydelta/__pycache__
.cache
tests/results
tests_tensorflow/results
tutorials/results
examples/results
debug.exr
Expand Down
17 changes: 13 additions & 4 deletions CMakeLists.txt
Expand Up @@ -10,12 +10,14 @@ if(WIN32)
else()
find_package(Python 3.6 COMPONENTS Development REQUIRED)
endif()

find_package(CUDA 10)
if(CUDA_FOUND)
find_package(OptiX REQUIRED)
else()
find_package(Thrust REQUIRED)
endif()

find_package(Embree)
if(NOT EMBREE_FOUND)
add_subdirectory(embree)
Expand Down Expand Up @@ -112,7 +114,7 @@ if(APPLE)
set(DYNAMIC_LOOKUP "-undefined dynamic_lookup")
endif()
if (WIN32)
pybind11_add_module(redner SHARED ${SRCS} )
pybind11_add_module(redner SHARED ${SRCS})
endif()

if(CUDA_FOUND)
Expand Down Expand Up @@ -163,9 +165,8 @@ if (NOT WIN32)
${DYNAMIC_LOOKUP})
else()
target_link_libraries(redner
PRIVATE
${EMBREE_LIBRARY}
)
PRIVATE
${EMBREE_LIBRARY})
endif()

endif()
Expand All @@ -188,11 +189,19 @@ find_package(OpenEXR REQUIRED)
if(NOT OPENEXR_FOUND)
add_subdirectory(openexr)
endif()

# Install openexrpython
if(APPLE)
set(ENV{MACOSX_DEPLOYMENT_TARGET} 10.9)
endif()
install(CODE "execute_process(COMMAND python ${CMAKE_CURRENT_SOURCE_DIR}/openexrpython/setup.py install
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/openexrpython)")

# Install pyredner & pyredner_tensorflow
install(CODE "execute_process(COMMAND python ${CMAKE_CURRENT_SOURCE_DIR}/setup.py install
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})")

find_package(TensorFlow)
if(TensorFlow_FOUND)
add_subdirectory(pyredner_tensorflow/custom_ops)
endif()
2 changes: 2 additions & 0 deletions README.md
Expand Up @@ -2,6 +2,7 @@

News

07/27/2019 - Tensorflow 1.14 support! See tests_tensorflow for examples. Tutorials are work in progress. Thanks [Seyoung Park](https://github.com/SuperShinyEyes) for the contribuion.
06/25/2019 - Added orthographic cameras (see examples/two_d_mesh.py).
05/13/2019 - Fixed quite a few bugs related to camera derivatives. If something didn't work for you before, maybe try again.
04/28/2019 - Added QMC support (see tests/test_qmc.py and the documentation in pyredner.serialize_scene()).
Expand Down Expand Up @@ -30,6 +31,7 @@ redner depends on a few libraries/systems:
- [Python 3.6 or above](https://www.python.org) (required)
- [pybind11](https://github.com/pybind/pybind11) (required)
- [PyTorch 0.4.1 or 1.0](https://pytorch.org) (required)
- [Tensorflow 1.14](https://www.tensorflow.org/) (optional, required if PyTorch is not installed)
- [OpenEXR](https://github.com/openexr/openexr) (required)
- [Embree](https://embree.github.io) (required)
- [CUDA 10](https://developer.nvidia.com/cuda-downloads) (optional, need GPU at Kepler class or newer)
Expand Down
24 changes: 24 additions & 0 deletions cmake/FindTensorFlow.cmake
@@ -0,0 +1,24 @@
# https://github.com/PatWie/tensorflow-cmake/blob/master/cmake/modules/FindTensorFlow.cmake

execute_process(
COMMAND python -c "import tensorflow as tf; print(tf.__version__); print(tf.__cxx11_abi_flag__); print(tf.sysconfig.get_include()); print(tf.sysconfig.get_lib())"
OUTPUT_VARIABLE TF_INFORMATION_STRING
OUTPUT_STRIP_TRAILING_WHITESPACE
RESULT_VARIABLE retcode)

if("${retcode}" STREQUAL "0")
string(REPLACE "\n" ";" TF_INFORMATION_LIST ${TF_INFORMATION_STRING})
list(GET TF_INFORMATION_LIST 0 TF_DETECTED_VERSION)
list(GET TF_INFORMATION_LIST 1 TF_DETECTED_ABI)
list(GET TF_INFORMATION_LIST 2 TF_DETECTED_INCLUDE_DIR)
list(GET TF_INFORMATION_LIST 3 TF_DETECTED_LIBRARY_DIR)
# For some reason my tensorflow doesn't have a .so file
list(APPEND CMAKE_FIND_LIBRARY_SUFFIXES .so.1)
find_library(TF_DETECTED_LIBRARY NAMES tensorflow_framework PATHS
${TF_DETECTED_LIBRARY_DIR})
set(TensorFlow_VERSION ${TF_DETECTED_VERSION})
set(TensorFlow_ABI ${TF_DETECTED_ABI})
set(TensorFlow_INCLUDE_DIR ${TF_DETECTED_INCLUDE_DIR})
set(TensorFlow_LIBRARY ${TF_DETECTED_LIBRARY})
set(TensorFlow_FOUND TRUE)
endif()
56 changes: 56 additions & 0 deletions pyredner_tensorflow/__init__.py
@@ -0,0 +1,56 @@
import numpy as np
import tensorflow as tf
from .device import *
from .camera import *
from .shape import *
from .material import *
from .texture import *
from .area_light import *
from .envmap import *
from .scene import *
from .render_tensorflow import *
from .image import *
from .load_obj import load_obj
from .load_mitsuba import load_mitsuba
from .transform import gen_rotate_matrix
from .utils import *
from .channels import *

import os.path
import redner
from tensorflow.python.framework import ops

__data_ptr_module = tf.load_op_library(os.path.join(os.path.dirname(redner.__file__), 'libredner_tf_data_ptr.so'))

DEBUG = False
IS_UNIT_TEST = False

def data_ptr(tensor):
addr_as_uint64 = __data_ptr_module.data_ptr(tensor)
return int(addr_as_uint64)

def write_tensor(path, tensor, height, width):
with open(path, 'w') as f:
for i in range(height):
for j in range(width):
f.write(f'{tensor[i,j]} ')
f.write('\n')

def pretty_debug_print(grads, vars, iter_num=-1):
from pprint import pprint
print("/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\")
if iter_num > -1:
print("Iteration: ", iter_num)
print(">>> GRADIENTS:")
if (isinstance(grads, dict)):
for k, v in grads.items():
print(k, v.shape, v.numpy())
elif (isinstance(grads, list)):
for k in grads:
print(k)
print("\n>>> VARIABLES:")
for v in vars:
print(v.name, v.shape, v.numpy())

def get_render_args(seed, scene_args):
return [tf.constant(seed)] + scene_args
22 changes: 22 additions & 0 deletions pyredner_tensorflow/area_light.py
@@ -0,0 +1,22 @@
import tensorflow as tf

class AreaLight:
def __init__(self, shape_id, intensity, two_sided = False):
assert(tf.executing_eagerly())
self.shape_id = shape_id
self.intensity = tf.identity(intensity).cpu()
self.two_sided = two_sided

def state_dict(self):
return {
'shape_id': self.shape_id,
'intensity': self.intensity,
'two_sided': self.two_sided
}

@classmethod
def load_state_dict(cls, state_dict):
return cls(
state_dict['shape_id'],
state_dict['intensity'],
state_dict['two_sided'])
124 changes: 124 additions & 0 deletions pyredner_tensorflow/camera.py
@@ -0,0 +1,124 @@
from typing import Tuple
import numpy as np
import tensorflow as tf
import pyredner_tensorflow.transform as transform
import redner
import pyredner_tensorflow as pyredner

class Camera:
"""
redner supports a perspective camera and a fisheye camera.
Both of them employ a look at transform.

Note:
The Camera constructor converts all variables into a CPU device,
no matter where they are originally.

Args:
position (length 3 float tensor): the origin of the camera
look_at (length 3 float tensor): the point camera is looking at
up (length 3 float tensor): the up vector of the camera
fov (length 1 float tensor): the field of view of the camera in angle,
no effect if the camera is a fisheye camera
clip_near (float): the near clipping plane of the camera, need to > 0
resolution (length 2 tuple): the size of the output image in (height, width)
cam_to_ndc (3x3 matrix): a matrix that transforms
[-1, 1/aspect_ratio] x [1, -1/aspect_ratio] to [0, 1] x [0, 1]
where aspect_ratio = width / height
camera_type (render.camera_type): the type of the camera (perspective, orthographic, or fisheye)
fisheye (bool): whether the camera is a fisheye camera (legacy parameter just to ensure compatibility).
"""
def __init__(self,
position: tf.Tensor,
look_at: tf.Tensor,
up: tf.Tensor,
fov: tf.Tensor,
clip_near: float,
resolution: Tuple[int],
cam_to_ndc: tf.Tensor = None,
camera_type = redner.CameraType.perspective,
fisheye: bool = False):
assert(tf.executing_eagerly())
assert(position.dtype == tf.float32)
assert(len(position.shape) == 1 and position.shape[0] == 3)
assert(look_at.dtype == tf.float32)
assert(len(look_at.shape) == 1 and look_at.shape[0] == 3)
assert(up.dtype == tf.float32)
assert(len(up.shape) == 1 and up.shape[0] == 3)
if fov is not None:
assert(fov.dtype == tf.float32)
assert(len(fov.shape) == 1 and fov.shape[0] == 1)
assert(isinstance(clip_near, float))

with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
self.position = tf.identity(position).cpu()
self.look_at = tf.identity(look_at).cpu()
self.up = tf.identity(up).cpu()
self.fov = tf.identity(fov).cpu()
if cam_to_ndc is None:
if camera_type == redner.CameraType.perspective:
fov_factor = 1.0 / tf.tan(transform.radians(0.5 * fov))
o = tf.convert_to_tensor(np.ones([1], dtype=np.float32), dtype=tf.float32)
diag = tf.concat([fov_factor, fov_factor, o], 0)
self._cam_to_ndc = tf.linalg.tensor_diag(diag)
else:
self._cam_to_ndc = tf.eye(3, dtype=tf.float32)
else:
self._cam_to_ndc = tf.identity(cam_to_ndc).cpu()
self.ndc_to_cam = tf.linalg.inv(self.cam_to_ndc)
self.clip_near = clip_near
self.resolution = resolution
self.camera_type = camera_type
if fisheye:
self.camera_type = redner.CameraType.fisheye

@property
def fov(self):
return self._fov

@fov.setter
def fov(self, value):
with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
self._fov = tf.identity(value).cpu()
fov_factor = 1.0 / tf.tan(transform.radians(0.5 * self._fov))
o = tf.convert_to_tensor(np.ones([1], dtype=np.float32), dtype=tf.float32)
diag = tf.concat([fov_factor, fov_factor, o], 0)
self._cam_to_ndc = tf.linalg.tensor_diag(diag)
self.ndc_to_cam = tf.linalg.inv(self._cam_to_ndc)

@property
def cam_to_ndc(self):
return self._cam_to_ndc

@cam_to_ndc.setter
def cam_to_ndc(self, value):
with tf.device('/device:cpu:' + str(pyredner.get_cpu_device_id())):
self._cam_to_ndc = tf.identity(value).cpu()
self.ndc_to_cam = tf.linalg.inv(self._cam_to_ndc)

def state_dict(self):
return {
'position': self.position,
'look_at': self.look_at,
'up': self.up,
'fov': self.fov,
'cam_to_ndc': self._cam_to_ndc,
'ndc_to_cam': self.ndc_to_cam,
'clip_near': self.clip_near,
'resolution': self.resolution,
'camera_type': self.camera_type
}

@classmethod
def load_state_dict(cls, state_dict):
out = cls.__new__(Camera)
out.position = state_dict['position']
out.look_at = state_dict['look_at']
out.up = state_dict['up']
out.fov = state_dict['fov']
out._cam_to_ndc = state_dict['cam_to_ndc']
out.ndc_to_cam = state_dict['ndc_to_cam']
out.clip_near = state_dict['clip_near']
out.resolution = state_dict['resolution']
out.camera_type = state_dict['camera_type']
return out