Skip to content

Commit

Permalink
Merge bba0d74 into d1cda00
Browse files Browse the repository at this point in the history
  • Loading branch information
fegorsch committed Mar 18, 2020
2 parents d1cda00 + bba0d74 commit 352101d
Show file tree
Hide file tree
Showing 3 changed files with 131 additions and 32 deletions.
18 changes: 18 additions & 0 deletions models/points_ascii_with_lists.ply
@@ -0,0 +1,18 @@
ply
format ascii 1.0
element vertex 5
property float x
property float y
property float z
element point_list 2
property list uchar uint point_indices1
property list uchar uint point_indices2
property float some_float
end_header
-0.06325 0.0359793 0.0420873
-0.06275 0.0360343 0.0425949
-0.0645 0.0365101 0.0404362
-0.064 0.0366195 0.0414512
-0.0635 0.0367289 0.0424662
3 10 11 12 2 13 14 1.1
2 10 11 3 12 13 14 2.2
21 changes: 21 additions & 0 deletions tests/test_ply.py
Expand Up @@ -57,6 +57,27 @@ def test_points(self):
assert m.vertices.shape == (1024, 3)
assert isinstance(m, g.trimesh.PointCloud)

def test_list_properties(self):
"""
Test reading point clouds with the following metadata:
- lists of differing length
- multiple list properties
- single-element properties that come after list properties
"""
m = g.get_mesh('points_ascii_with_lists.ply')

point_list = m.metadata['ply_raw']['point_list']['data']
assert g.np.array_equal(
point_list['point_indices1'][0], g.np.array([10, 11, 12], dtype=g.np.uint32))
assert g.np.array_equal(
point_list['point_indices1'][1], g.np.array([10, 11], dtype=g.np.uint32))
assert g.np.array_equal(
point_list['point_indices2'][0], g.np.array([13, 14], dtype=g.np.uint32))
assert g.np.array_equal(
point_list['point_indices2'][1], g.np.array([12, 13, 14], dtype=g.np.uint32))
assert g.np.array_equal(
point_list['some_float'], g.np.array([1.1, 2.2], dtype=g.np.float32))


if __name__ == '__main__':
g.trimesh.util.attach_to_log()
Expand Down
124 changes: 92 additions & 32 deletions trimesh/exchange/ply.py
Expand Up @@ -477,6 +477,69 @@ def element_colors(element):
return None, 0.0


def load_element_with_differing_length_lists(properties, data):
"""
Load element data based on the element's property-definitions.
Parameters
------------
properties: Property definitions encoded in a dict where the property name is the key
and the property data type the value.
data: numpy-array of data rows for this element.
"""
element_data = {k: [] for k in properties.keys()}
for row in data:
start = 0
for name, dt in properties.items():
length = 1
if '$LIST' in dt:
dt = dt.split('($LIST,)')[-1]
# the first entry in a list-property is the number of elements in the list
length = int(row[start])
# skip the first entry (the length), when reading the data
start += 1
end = start + length
element_data[name].append(row[start:end].astype(dt))
# start next property at the end of this one
start = end

# convert all property lists to numpy arrays
for name in element_data.keys():
element_data[name] = np.array(element_data[name]).squeeze()

return element_data


def load_element_with_single_length_lists(properties, data):
"""
Load element data based on the element's property-definitions.
Parameters
------------
properties: Property definitions encoded in a dict where the property name is the key
and the property data type the value.
data: numpy-array of data rows for this element. If the data contains list-properties,
all lists belonging to one property must have the same length.
"""
col_ranges = []
start = 0
row0 = data[0]
for name, dt in properties.items():
length = 1
if '$LIST' in dt:
# the first entry in a list-property is the number of elements in the list
length = int(row0[start])
# skip the first entry (the length), when reading the data
start += 1
end = start + length
col_ranges.append((start, end))
# start next property at the end of this one
start = end

return {n: data[:, c[0]:c[1]].astype(dt.split('($LIST,)')[-1])
for c, (n, dt) in zip(col_ranges, properties.items())}


def ply_ascii(elements, file_obj):
"""
Load data from an ASCII PLY file into an existing elements data structure.
Expand All @@ -501,44 +564,41 @@ def ply_ascii(elements, file_obj):
for i in lines])

# store the line position in the file
position = 0
row_pos = 0

# loop through data we need
for key, values in elements.items():
# if the element is empty ignore it
if 'length' not in values or values['length'] == 0:
continue
# will store (start, end) column index of data
columns = collections.deque()
# will store the total number of rows
rows = 0

for name, dtype in values['properties'].items():
# we need to know how many elements are in this dtype
if '$LIST' in dtype:
# if an element contains a list property handle it here
row = array[position]
list_count = int(row[rows])
# ignore the count and take the data
columns.append([rows + 1,
rows + 1 + list_count])
rows += list_count + 1
# change the datatype to just the dtype for data
values['properties'][name] = dtype.split('($LIST,)')[-1]
else:
# a single column data field
columns.append([rows, rows + 1])
rows += 1
# get the lines as a 2D numpy array
data = np.vstack(array[position:position + values['length']])
# offset position in file
position += values['length']
# store columns we care about by name and convert to data type
elements[key]['data'] = {n: data[:, c[0]:c[1]].astype(dt)
for n, dt, c in zip(
values['properties'].keys(), # field name
values['properties'].values(), # data type of field
columns)} # list of (start, end) column indexes

data = array[row_pos:row_pos + values['length']]
row_pos += values['length']

# try stacking the data, which simplifies column-wise access. this is only
# possible, if all rows have the same length.
try:
data = np.vstack(data)
col_count_equal = True
except ValueError:
col_count_equal = False

# number of list properties in this element
list_count = sum(1 for dt in values['properties'].values() if '$LIST' in dt)
if col_count_equal and list_count <= 1:
# all rows have the same length and we only have at most one list
# property where all entries have the same length. this means we can
# use the quick numpy-based loading.
element_data = load_element_with_single_length_lists(
values['properties'], data)
else:
# there are lists of differing lengths. we need to fall back to loading
# the data by iterating all rows and checking for list-lengths. this is
# slower than the variant above.
element_data = load_element_with_differing_length_lists(
values['properties'], data)

elements[key]['data'] = element_data


def ply_binary(elements, file_obj):
Expand Down

0 comments on commit 352101d

Please sign in to comment.