Skip to content

Commit

Permalink
Merge branch 'master' of https://github.com/unifyai/memory
Browse files Browse the repository at this point in the history
  • Loading branch information
juliagsy committed Jun 15, 2023
2 parents 3fcae00 + 9c5b336 commit 0b981a9
Show file tree
Hide file tree
Showing 6 changed files with 28 additions and 25 deletions.
2 changes: 2 additions & 0 deletions install_dependencies.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
pip install -r requirements.txt || exit 1
pip install -r optional.txt || exit 1
14 changes: 6 additions & 8 deletions ivy_memory/geometric/esm.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,8 @@ def _frame_to_omni_frame_projection(
cam_rel_poses
Relative pose of camera to agent *[batch_size, n, c, 6]*
cam_rel_mats
Relative transformation matrix from camera to agent *[batch_size, n, c, 3, 4]*
Relative transformation matrix from camera to agent
*[batch_size, n, c, 3, 4]*
uniform_sphere_pixel_coords
Pixel coords *[batch_size, n, h, w, 3]*
cam_coords_f1
Expand Down Expand Up @@ -260,7 +261,6 @@ def _frame_to_omni_frame_projection(
*[batch_size, 1, h, w, 3+f]*
"""

# cam 1 to cam 2 coords

if ivy.is_bool_dtype(cam_coords_f1):
Expand Down Expand Up @@ -383,7 +383,6 @@ def _omni_frame_to_omni_frame_projection(
*[batch_size, h, w, 3+f]*
"""

# Frame 1 #
# --------#

Expand Down Expand Up @@ -500,7 +499,6 @@ def _convert_images_to_omni_observations(
batch_size, n, oh, ow, 3+f]* *[batch_size, n, oh, ow, 3+f]*
"""

# coords from all scene cameras wrt world

images_list = list()
Expand Down Expand Up @@ -632,7 +630,8 @@ def _kalman_filter_on_measurement_sequence(
uniform_sphere_pixel_coords
Uniform sphere pixel co-ordinates *[batch_size, oh, ow, 3]*
agent_rel_poses
Relative poses of agents to the previous step *[batch_size, num_timesteps, 6]*
Relative poses of agents to the previous step
*[batch_size, num_timesteps, 6]*
agent_rel_pose_covs
Agent relative pose covariances *[batch_size, num_timesteps, 6, 6]*
agent_rel_mats
Expand All @@ -649,7 +648,6 @@ def _kalman_filter_on_measurement_sequence(
list of *[batch_size, oh, ow, (3+f)]*, list of *[batch_size, oh, ow, (3+f)]*
"""

fused_list = list()
fused_variances_list = list()

Expand Down Expand Up @@ -933,7 +931,8 @@ def smooth(
)

# replace temporary zeros with their prior values
# This ensures that the smoothing operation only changes the values for regions of high variance
# This ensures that the smoothing operation only
# changes the values for regions of high variance
if fix_low_var_pixels:
fused_val = ivy.where(low_var_mask, fused_val, smoothed_fused_val)
else:
Expand Down Expand Up @@ -977,7 +976,6 @@ def _forward(
New memory of type ESMMemory
"""

# get shapes
img_meas = (next(iter(obs.img_meas.values()))).img_mean
if batch_size is None:
Expand Down
3 changes: 0 additions & 3 deletions ivy_memory_demos/interactive/learning_to_copy_with_ntm.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,6 @@ def main(
lambda v, ttl_sq, trgt_sq, sq_ln: loss_fn(ntm, v, ttl_sq, trgt_sq, sq_ln),
return_backend_compiled_fn=True,
)
# loss_fn_maybe_compiled = lambda v, ttl_sq, trgt_sq, sq_ln: loss_fn(
# ntm, v, ttl_sq, trgt_sq, sq_ln
# )
else:
loss_fn_maybe_compiled = lambda v, ttl_sq, trgt_sq, sq_ln: loss_fn(
ntm, v, ttl_sq, trgt_sq, sq_ln
Expand Down
23 changes: 13 additions & 10 deletions ivy_memory_demos/interactive/mapping_a_room_with_esm.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,32 +160,35 @@ def __init__(self, interactive, try_use_sim):
self._default_camera.set_orientation(np.array([2.642, 0.596, -0.800]))

# make vision sensor child of drone
vision_sensor = self._vision_sensors[0]
self._vision_sensors[0]
vision_sensor_body = self._vision_sensor_bodies[0]
vision_sensor_body.set_quaternion([0.5, 0.5, 0.5, 0.5])
vision_sensor_body.set_position(self._drone.get_position())
vision_sensor_body.set_position([0.0, 0.0, 0.15], vision_sensor_body)
vision_sensor_body.set_parent(self._drone)

# public drone
cam_rel_to_mat = ivy.array(
vision_sensor.get_matrix(self._drone)[0:3].tolist()
)
self.drone = Drone(self._drone, DroneCam(self._vision_sensors[0]))

# wait for user input
self._user_prompt(
"\nInitialized scene with a drone in the centre.\n\n"
"You can click on the drone,"
"then select the box icon with four arrows in the top panel of the simulator, "
"then select the box icon with four arrows in the top panel "
"of the simulator, "
"and then drag the drone around dynamically.\n"
"Starting to drag and then holding ctrl allows you to also drag the camera up and down. \n\n"
"This demo enables you to capture 10 different images from the drone forward facing camera, "
"and render the first 10 point cloud representations of the ESM memory in an open3D visualizer.\n\n"
"Both visualizers can be translated and rotated by clicking either the left mouse button or the wheel, "
"Starting to drag and then holding ctrl allows you to also "
"drag the camera up and down. \n\n"
"This demo enables you to capture 10 different images "
"from the drone forward facing camera, "
"and render the first 10 point cloud representations of the "
"ESM memory in an open3D visualizer.\n\n"
"Both visualizers can be translated and rotated by clicking "
"either the left mouse button or the wheel, "
"and then dragging the mouse.\n"
"Scrolling the mouse wheel zooms the view in and out.\n\n"
"Both visualizers can be rotated and zoomed by clicking either the left mouse button or the wheel, "
"Both visualizers can be rotated and zoomed by clicking "
"either the left mouse button or the wheel, "
"and then dragging with the mouse.\n\n"
"Press enter in the terminal to start the demo.\n\n"
)
Expand Down
3 changes: 2 additions & 1 deletion ivy_memory_tests/test_ivy_memory_demos.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@ def test_demo_ntm_copy(compile_flag, dev_str, f, fw):
from ivy_memory_demos.interactive.learning_to_copy_with_ntm import main

if fw in ["numpy", "tensorflow_graph"]:
# numpy does not support gradients, and demo compiles already, so no need to use tf_graph_call
# numpy does not support gradients, and demo compiles already,
# so no need to use tf_graph_call
pytest.skip()
if fw in ["torch", "jax"] and compile_flag:
# PyTorch Dictionary inputs to traced functions must have consistent type
Expand Down
8 changes: 5 additions & 3 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def _replace_logos_html(txt):
backends_chunk = chunks[2]
bc = backends_chunk.split("\n\n")
img_str = (
".. image:: https://github.com/unifyai/unifyai.github.io/blob/master/img/externally_linked/logos/supported/frameworks.png?raw=true\n"
".. image:: https://github.com/unifyai/unifyai.github.io/blob/master/img/externally_linked/logos/supported/frameworks.png?raw=true\n" # noqa
" :width: 100%"
)
backends_chunk = "\n\n".join(bc[0:1] + [img_str] + bc[2:])
Expand All @@ -39,7 +39,7 @@ def _replace_logos_html(txt):
libraries_chunk = chunks[3]
lc = libraries_chunk.split("\n\n")
img_str = (
".. image:: https://github.com/unifyai/unifyai.github.io/blob/master/img/externally_linked/ivy_libraries.png?raw=true\n"
".. image:: https://github.com/unifyai/unifyai.github.io/blob/master/img/externally_linked/ivy_libraries.png?raw=true\n" # noqa
" :width: 100%"
)
libraries_chunk = "\n\n".join(lc[0:1] + [img_str] + lc[2:])
Expand Down Expand Up @@ -117,7 +117,9 @@ def _is_raw_block(line):
version="1.1.9",
author="Ivy Team",
author_email="ivydl.team@gmail.com",
description="End-to-end memory modules for machine learning developers, written in Ivy.",
description=(
"End-to-end memory modules for machine learning developers, " "written in Ivy."
),
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://lets-unify.ai/memory",
Expand Down

0 comments on commit 0b981a9

Please sign in to comment.