-
Notifications
You must be signed in to change notification settings - Fork 0
/
01_main_extract_vza_aza_reflectance.py
209 lines (181 loc) · 9.95 KB
/
01_main_extract_vza_aza_reflectance.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Created By : Rene HJ Heim
# Created Date: 2022/06/22
# version ='1.0'
# ---------------------------------------------------------------------------
"""
This script retrieves pixel-wise reflectance values from Metashape orthophotos. For each pixel, the
observation/illumination angles will be calculated based on Roosjen, 2017. Orthophotos are used instead of
ortho mosaics as the camera positions are available for each ortho photo but not for the mosaic. The pixel resolution
is determined by the orthophotos and dem.
The following input variables are required to build a oblique-reflectance data frame:
- camera positions as omega, phi, kappa txt file (Agisoft Metashape)
- digital elevation model (dem)
- list of ortho photos (same crs as dem)
- ...
This codes implements parallelization to speed up the process using the multiprocessing module
"""
# Load libraries
import pandas as pd
import glob
import os
from smac_functions import *
from config_object import config
from functools import reduce, partial
import numpy as np
from joblib import Parallel, delayed
# Each Dictionnary is comprised of 6 elements:
# the output directory
# the camera position textfile
# the directory to the dem
# the directory to the original images that still have their exif data
# the name under which the output data will be saved
# the directory to orthophotos
sources =[
{'out': config.main_extract_out,
'cam_path': config.main_extract_cam_path,
'dem_path': config.main_extract_dem_path,
'ori': config.main_extract_ori,
'name': config.main_extract_name,
'path_list_tag': config.main_extract_path_list_tag}
]
# Loop trough the list of input elements
for source in sources:
# Define the function that will be used as input for the multiprocessing
# This function takes as input a tuple of 2 elements : an index, and a chunk of images (1/15th of all orthophotos)
def build_database(tuple_chunk):
iteration=tuple_chunk[0]
chunk=tuple_chunk[1]
df_list = []
# import modules need for the function
from tqdm import tqdm
import pandas as pd
import glob
import os
import rasterio as rio
from functools import reduce, partial
import math
import numpy as np
import exiftool as exif
from tqdm import tqdm
from pathlib import Path, PureWindowsPath
from timeit import default_timer as timer
import pyarrow
# Assign values in the dictionnary to variables
out = source['out']
cam_path = source['cam_path']
dem_path = source['dem_path']
ori = source['ori']
# Turn DEM into table with pixels in rows and coordinates in columns
start = timer()
with rio.open(dem_path) as dem:
d1 = dem.read(1)
arr_dem = np.array(d1)
Xp_dem, Yp_dem, val_dem = xyval(arr_dem)
res_dem = []
with rio.open(dem_path) as dem_layer:
for i, j in zip(Xp_dem, Yp_dem):
res_dem.append(dem_layer.xy(i, j))
df = pd.DataFrame(res_dem)
df_dem = pd.concat([pd.Series(val_dem), df], axis=1)
df_dem.columns = ['elev', 'Xw', 'Yw']
df_dem = df_dem.round({'elev': 2, 'Xw': 1, 'Yw': 1})
end = timer()
print('Break DEM into pixel: ', end - start, 'seconds')
# List all path of images containing exif data from different flights and put them in one linge list
ori_list = []
for item in ori:
ori_list.append(glob.glob(item + "\\*.tif")) # paths from all non-calibrated images as these have relevant EXIF
def flatten(t):
return [item for sublist in t for item in sublist]
path_flat = flatten(ori_list)
path_norm = []
for i in path_flat:
path_norm.append(str(PureWindowsPath(i)))
# Loop trhough images in each chunk
for each_ortho in tqdm(chunk): # select just a few in chunk to test loop
# Import camera positions and the search the camera coords for the associated orthophoto
start2 = timer()
campos = pd.read_csv(cam_path, sep='\t', skiprows=2, header=None)
campos.columns = ['PhotoID', 'X', 'Y', 'Z', 'Omega', 'Phi', 'Kappa', 'r11', 'r12', 'r13', 'r21',
'r22', 'r23', 'r31', 'r32', 'r33']
path, file = os.path.split(each_ortho)
name, ext = os.path.splitext(file)
reduced_string = file[5:]
reduced_name = name[5:]
campos1 = campos[campos['PhotoID'].str.match(name)] ### was producing error with reduced_name
xcam = campos1['X'].values # easting
ycam = campos1['Y'].values # northing
zcam = campos1['Z'].values
del campos, campos1 # trying to reduce memory load
end2 = timer()
print('Finding camera position for first ortho in first chunk: ', end2 - start2, 'seconds')
# Get sun elevation and sun azimuth angle
start3 = timer()
filter_object = filter(lambda a: reduced_string in a, path_norm)
# Convert the filter object to list
exifobj = list(filter_object)
with exif.ExifToolHelper() as et:
sunelev = float(et.get_tags(exifobj[0], 'XMP:SolarElevation')[0]['XMP:SolarElevation'])* (180/math.pi)
saa = float(et.get_tags(exifobj[0], 'XMP:SolarAzimuth')[0]['XMP:SolarAzimuth'])* (180/math.pi)
end3 = timer()
print('Getting SAA and Sun Elevation from ortho EXIF data: ', end3 - start3, 'seconds')
start4 = timer()
# Get XY + value for each ortho photo
bands = {}
with rio.open(each_ortho) as rst:
# Loop throug bands
for counter in range(1, 11, 1):
res = []
b1 = rst.read(counter)
arr = np.array(b1)
Xp, Yp, val = xyval(arr) # this function is loaded from smac_functions.py
# Loop trhough pixels
for i, j in zip(Xp, Yp):
# coords2pixels = map_layer.index(235059.32,810006.31) # input lon,lat
res.append(rst.xy(i, j)) # input px, py -> see rasterio documentation
df = pd.DataFrame(res)
df_ortho = pd.concat([pd.Series(val), df], axis=1)
df_ortho.columns = ['band'+str(counter), 'Xw', 'Yw']
df_ortho = df_ortho.round({'Xw': 1, 'Yw': 1}) # rounding coords for matches; careful when aligning raster
# outputs and original raster files.
bands[f"band{counter}"] = df_ortho
my_reduce = partial(pd.merge, on=["Xw", "Yw"], how='outer') # define a function to merge bands together
df_allbands = reduce(my_reduce, bands.values()) # iterates functions for all 11 bands
end4 = timer()
print('Break all ortho bands into pixel: ', end4 - start4, 'seconds')
# Combine ortho values and DEM values, Compute vza and vaa and insert some imoprtant columns
start5 = timer()
dfs = [df_dem, df_allbands] #, df_slp, df_asp] TODO: slp and asp not needed without correcting vza
df_merged = reduce(lambda left, right: pd.merge(left, right, on=["Xw", "Yw"]), dfs) # combine all dfs above
df_merged['vza'] = df_merged.apply(lambda x: np.arctan((zcam[0] - x.elev)/math.sqrt(math.pow(xcam[0] - x.Xw, 2)+math.pow(ycam[0] - x.Yw, 2))), axis=1)
df_merged['vza'] = round(90 - (df_merged['vza'] * (180/math.pi)), 2) # substr 90° to get correct angle
df_merged['vza'] = np.where((df_merged['band1'] == 65535), np.nan, df_merged['vza']) # set nan when band = nan
# Calculate VAA acoording to Roosjen (2018)
df_merged['vaa_rad'] = df_merged.apply(lambda x: math.acos((ycam[0] - x.Yw)/(math.sqrt((x.Xw - xcam[0])**2 + (x.Yw - ycam[0])**2))) if x.Xw - xcam[0] < 0 else - math.acos((ycam[0] - x.Yw)/(math.sqrt((x.Xw - xcam[0])**2 + (x.Yw - ycam[0])**2))), axis=1)
df_merged['vaa'] = np.where (round((df_merged['vaa_rad'] * (180/math.pi)), 2) - saa > -180, round((df_merged['vaa_rad'] * (180/math.pi)), 2) - saa, round((df_merged['vaa_rad'] * (180/math.pi)), 2) - saa + 360)
df_merged['vaa'] =np.where((df_merged['band1'] == 65535), np.nan, df_merged['vaa']) # set nan when band = nan
df_merged['vaa_rad'] = np.where((df_merged['band1'] == 65535), np.nan, df_merged['vaa_rad'])
df_merged.insert(0, 'path', file)
df_merged.insert(1,'xcam', xcam[0])
df_merged.insert(2,'ycam', ycam[0])
df_merged.insert(3, 'sunelev', round(sunelev, 2))
df_merged.insert(4, 'saa', round(saa, 2))
df_list.append(df_merged)
end5 = timer()
print('Combine DEM and ortho values + calculate VZA and VAA', end5 - start5, 'seconds')
# Save table as a feather file and free memory
result = pd.concat(df_list)
result=result.reset_index().drop('index', axis=1)
if not os.path.isdir(source['out']):
os.makedirs(source['out'])
result.to_feather(f"{source['out']}\\{source['name']}_{iteration}.feather")
del result, df_merged, df_allbands, df_list
# Split loaded data into chunks">
path_list = [] # listing all orthophoto paths
path_list = glob.glob(source['path_list_tag'])
chunks = np.array_split(path_list, 15)
# Parallelize using the defined function and chunks as inputs
Parallel(n_jobs=8)(delayed(build_database)(i) for i in list(enumerate(chunks)))