Skip to content
Permalink
Browse files

Take point record size into account when decoding

  • Loading branch information
wonder-sk authored and nyalldawson committed Oct 26, 2020
1 parent da969e8 commit 63ecfb9bcfe8b483d0437492ed39c1193d32fdb7
@@ -26,23 +26,22 @@
#include "laz-perf/io.hpp"
#include "laz-perf/common/common.hpp"

QVector<qint32> QgsPointCloudDecoder::decompressBinary( const QString &filename )
QVector<qint32> QgsPointCloudDecoder::decompressBinary( const QString &filename, int pointRecordSize )
{
Q_ASSERT( QFile::exists( filename ) );

QFile f( filename );
bool r = f.open( QIODevice::ReadOnly );
Q_ASSERT( r );

// WHY??? per-record should be 18 based on schema, not 46
int stride = 46; //18;
int count = f.size() / stride;
int count = f.size() / pointRecordSize;
QVector<qint32> data( count * 3 );
for ( int i = 0; i < count; ++i )
{
QByteArray bytes = f.read( stride );
QByteArray bytes = f.read( pointRecordSize );
// WHY??? X,Y,Z are int32 values stored as doubles
double *bytesD = ( double * ) bytes.constData();
// TODO: we should respect the schema (offset and data type of X,Y,Z)
data[i * 3 + 0] = ( bytesD[0] );
data[i * 3 + 1] = ( bytesD[1] );
data[i * 3 + 2] = ( bytesD[2] );
@@ -80,7 +79,7 @@ QByteArray decompressZtdStream( const QByteArray &dataCompressed )
return dataUncompressed;
}

QVector<qint32> QgsPointCloudDecoder::decompressZStandard( const QString &filename )
QVector<qint32> QgsPointCloudDecoder::decompressZStandard( const QString &filename, int pointRecordSize )
{
Q_ASSERT( QFile::exists( filename ) );

@@ -93,17 +92,15 @@ QVector<qint32> QgsPointCloudDecoder::decompressZStandard( const QString &filena

// from here it's the same as "binary"

// WHY??? per-record should be 18 based on schema, not 46
int stride = 46; //18;
int count = dataUncompressed.size() / stride;

int count = dataUncompressed.size() / pointRecordSize;

QVector<qint32> data( count * 3 );
const char *ptr = dataUncompressed.constData();
for ( int i = 0; i < count; ++i )
{
// WHY??? X,Y,Z are int32 values stored as doubles
double *bytesD = ( double * )( ptr + stride * i );
double *bytesD = ( double * )( ptr + pointRecordSize * i );
// TODO: we should respect the schema (offset and data type of X,Y,Z)
data[i * 3 + 0] = ( bytesD[0] );
data[i * 3 + 1] = ( bytesD[1] );
data[i * 3 + 2] = ( bytesD[2] );
@@ -29,8 +29,8 @@

namespace QgsPointCloudDecoder
{
QVector<qint32> decompressBinary( const QString &filename );
QVector<qint32> decompressZStandard( const QString &filename );
QVector<qint32> decompressBinary( const QString &filename, int pointRecordSize );
QVector<qint32> decompressZStandard( const QString &filename, int pointRecordSize );
QVector<qint32> decompressLaz( const QString &filename );
};

@@ -161,12 +161,15 @@ bool QgsPointCloudIndex::load( const QString &fileName )

QJsonArray schemaArray = doc["schema"].toArray();

mPointRecordSize = 0;
for ( QJsonValue schemaItem : schemaArray )
{
QJsonObject schemaObj = schemaItem.toObject();
QString name = schemaObj["name"].toString();
QString type = schemaObj["type"].toString();

int size = schemaObj["size"].toInt();
mPointRecordSize += size;

float scale = 1.f;
if ( schemaObj.contains( "scale" ) )
@@ -195,6 +198,10 @@ bool QgsPointCloudIndex::load( const QString &fileName )
// TODO: can parse also stats: "count", "minimum", "maximum", "mean", "stddev", "variance"
}

// There seems to be a bug in Entwine: https://github.com/connormanning/entwine/issues/240
// point records for X,Y,Z seem to be written as 64-bit doubles even if schema says they are 32-bit ints
mPointRecordSize += 3 * 4;

// save mRootBounds

// bounds (cube - octree volume)
@@ -273,13 +280,13 @@ QVector<qint32> QgsPointCloudIndex::nodePositionDataAsInt32( const IndexedPointC
{
QString filename = QString( "%1/ept-data/%2.bin" ).arg( mDirectory ).arg( n.toString() );
Q_ASSERT( QFile::exists( filename ) );
return QgsPointCloudDecoder::decompressBinary( filename );
return QgsPointCloudDecoder::decompressBinary( filename, mPointRecordSize );
}
else if ( mDataType == "zstandard" )
{
QString filename = QString( "%1/ept-data/%2.zst" ).arg( mDirectory ).arg( n.toString() );
Q_ASSERT( QFile::exists( filename ) );
return QgsPointCloudDecoder::decompressZStandard( filename );
return QgsPointCloudDecoder::decompressZStandard( filename, mPointRecordSize );
}
else if ( mDataType == "laszip" )
{
@@ -130,6 +130,7 @@ class CORE_EXPORT QgsPointCloudIndex: public QObject

QgsRectangle mExtent; //!< 2D extent of data
double mZMin = 0, mZMax = 0; //!< Vertical extent of data
int mPointRecordSize = 0; //!< Size of one point record in bytes (only relevant for "binary" and "zstandard" data type)
QHash<IndexedPointCloudNode, int> mHierarchy;
QgsVector3D mScale; //!< Scale of our int32 coordinates compared to CRS coords
QgsVector3D mOffset; //!< Offset of our int32 coordinates compared to CRS coords
@@ -33,8 +33,12 @@ QgsPointCloudRenderer::QgsPointCloudRenderer( QgsPointCloudLayer *layer, QgsRend

// TODO: use config from layer
mConfig.penWidth = context.convertToPainterUnits( 1, QgsUnitTypes::RenderUnit::RenderMillimeters );
// good range for 26850_12580.laz
mConfig.zMin = 400;
mConfig.zMax = 600;
// good range for Trencin castle
//mConfig.zMin = 150;
//mConfig.zMax = 350;
mConfig.colorRamp.reset( QgsStyle::defaultStyle()->colorRamp( "Viridis" ) );
}

0 comments on commit 63ecfb9

Please sign in to comment.
You can’t perform that action at this time.